Skip to content

relevance_extractor_agent

langroid/agent/special/relevance_extractor_agent.py

Agent to retrieve relevant segments from a body of text, that are relevant to a query.

RelevanceExtractorAgent(config)

Bases: ChatAgent

Agent for extracting segments from text, that are relevant to a given query.

Source code in langroid/agent/special/relevance_extractor_agent.py
def __init__(self, config: RelevanceExtractorAgentConfig):
    super().__init__(config)
    self.config: RelevanceExtractorAgentConfig = config
    self.enable_message(SegmentExtractTool)
    self.numbered_passage: Optional[str] = None

llm_response(message=None)

Compose a prompt asking to extract relevant segments from a passage. Steps: - number the segments in the passage - compose prompt - send to LLM

Source code in langroid/agent/special/relevance_extractor_agent.py
@no_type_check
def llm_response(
    self, message: Optional[str | ChatDocument] = None
) -> Optional[ChatDocument]:
    """Compose a prompt asking to extract relevant segments from a passage.
    Steps:
    - number the segments in the passage
    - compose prompt
    - send to LLM
    """
    assert self.config.query is not None, "No query specified"
    assert message is not None, "No message specified"
    message_str = message.content if isinstance(message, ChatDocument) else message
    # number the segments in the passage
    self.numbered_passage = number_segments(message_str, self.config.segment_length)
    # compose prompt
    prompt = f"""
    PASSAGE:
    {self.numbered_passage}

    QUERY: {self.config.query}
    """
    # send to LLM
    return super().llm_response(prompt)

llm_response_async(message=None) async

Compose a prompt asking to extract relevant segments from a passage. Steps: - number the segments in the passage - compose prompt - send to LLM The LLM is expected to generate a structured msg according to the SegmentExtractTool schema, i.e. it should contain a segment_list field whose value is a list of segment numbers or ranges, like "10,12,14-17".

Source code in langroid/agent/special/relevance_extractor_agent.py
@no_type_check
async def llm_response_async(
    self, message: Optional[str | ChatDocument] = None
) -> Optional[ChatDocument]:
    """
    Compose a prompt asking to extract relevant segments from a passage.
    Steps:
    - number the segments in the passage
    - compose prompt
    - send to LLM
    The LLM is expected to generate a structured msg according to the
    SegmentExtractTool schema, i.e. it should contain a `segment_list` field
    whose value is a list of segment numbers or ranges, like "10,12,14-17".
    """

    assert self.config.query is not None, "No query specified"
    assert message is not None, "No message specified"
    message_str = message.content if isinstance(message, ChatDocument) else message
    # number the segments in the passage
    self.numbered_passage = number_segments(message_str, self.config.segment_length)
    # compose prompt
    prompt = f"""
    PASSAGE:
    {self.numbered_passage}

    QUERY: {self.config.query}
    """
    # send to LLM
    return await super().llm_response_async(prompt)

extract_segments(msg)

Method to handle a segmentExtractTool message from LLM

Source code in langroid/agent/special/relevance_extractor_agent.py
def extract_segments(self, msg: SegmentExtractTool) -> str:
    """Method to handle a segmentExtractTool message from LLM"""
    spec = msg.segment_list
    if len(self.message_history) == 0:
        return DONE + " " + NO_ANSWER
    if spec is None or spec.strip() in ["", NO_ANSWER]:
        return DONE + " " + NO_ANSWER
    assert self.numbered_passage is not None, "No numbered passage"
    # assume this has numbered segments
    try:
        extracts = extract_numbered_segments(self.numbered_passage, spec)
    except Exception:
        return DONE + " " + NO_ANSWER
    # this response ends the task by saying DONE
    return DONE + " " + extracts

handle_message_fallback(msg)

Handle case where LLM forgets to use SegmentExtractTool

Source code in langroid/agent/special/relevance_extractor_agent.py
def handle_message_fallback(
    self, msg: str | ChatDocument
) -> str | ChatDocument | None:
    """Handle case where LLM forgets to use SegmentExtractTool"""
    if isinstance(msg, ChatDocument) and msg.metadata.sender == Entity.LLM:
        return DONE + " " + NO_ANSWER
    else:
        return None