# 📋Standard Operation Procedure (SOP) System

### Brief Introduction

A **Standard Operating Procedure (SOP)** is a reasoning graph that consists of a set of step-by-step instructions outlining how to execute a specific task or process. Overall, the SOP System enables users to communicate with different agents simultaneously or create virtual cases, allowing agents to interact with each other.

### SOPConfig

The SOPConfig class is the configuration class for the SOP system. It initializes the required fields of the SOP config, such as nodes, edges, root, and end. The SOPConfig class also generates the SOP config automatically based on the query and task description.

```python
class SOPConfig(Config):
    required_fields = ["nodes", "edges", "root"]

    def __init__(self, config_path_or_dict: Union[str, dict] = None) -> None:
        super().__init__(config_path_or_dict)
        self._validate_config()

        self.nodes: Dict[str, dict] = self.config_dict["nodes"]
        self.edges: Dict[str, List[str]] = self.config_dict["edges"]
        self.root: str = self.config_dict["root"]
        self.end: str = self.config_dict.get("end", "end_node")
        self.global_kb: Dict[str, Any[list, str]] = self.config_dict.get("kb", None)
```

#### Generate config

The generate\_config method generates the SOP config automatically based on the query and task description. The SOP config is generated by the OpenAI LLM model.

```python
@classmethod
def generate_config(cls, query, task_description):
    llm_config = {
        "LLM_type": "OpenAI",
        "model": "gpt-4-turbo-2024-04-09",
        "temperature": 0.3,
        "log_path": "logs/generate_config/sop",
        "ACTIVE_MODE": True,
        "SAVE_LOGS": True,
    }
    llm = OpenAILLM(LLMConfig(llm_config))
    system_prompt = "You are a helpful assistant designed to output JSON."
    last_prompt = SOP_CONFIG_GENERATION_PROMPT_TEMPLATE.format(
        query=query, task_description=task_description
    )

    response, content = llm.get_response(
        chat_messages=None,
        system_prompt=system_prompt,
        last_prompt=last_prompt,
        response_format={"type": "json_object"},
    )

    # Converting the JSON format string to a JSON object
    json_config = json.loads(content.strip("`").strip("json").strip())
    checked_config = cls.check_config(json_config)

    nodes_dict = {}
    for node_name, node_description in checked_config["nodes"].items():
        if node_name == checked_config["end"]:
            continue
        node_config = NodeConfig.generate_config(
            task_description,
            node_name,
            node_description,
            checked_config["edges"][node_name],
        )
        nodes_dict[node_name] = node_config.to_dict()

    return cls(
        config_path_or_dict={
            "nodes": nodes_dict,
            "edges": checked_config["edges"],
            "root": checked_config["root"],
            "end": checked_config["end"],
        }
    )
```

#### Check config

The check\_config method checks the validation of SOP config. It checks whether the required fields are in the config, whether the nodes in the edges are in the nodes, and whether the root and end nodes are in the nodes.

```python
@staticmethod
def check_config(config: dict):
    if "nodes" not in config:
        raise ValueError("The 'nodes' field is required in the SOP config.")
    if "edges" not in config:
        raise ValueError("The 'edges' field is required in the SOP config.")
    if "root" not in config:
        raise ValueError("The 'root' field is required in the SOP config.")
    if "end" not in config:
        raise ValueError("The 'end' field is required in the SOP config.")

    validate_nodes_name_set = set(config["nodes"].keys())
    visited_nodes_name_set = set()
    for node_name, next_nodes in config["edges"].items():
        if node_name not in validate_nodes_name_set:
            raise ValueError(
                f"The node name '{node_name}' in the edges is not a validate"
            )
        else:
            visited_nodes_name_set.add(node_name)

        for next_node in next_nodes:
            if next_node not in validate_nodes_name_set:
                raise ValueError(
                    f"The next node name '{next_node}' of '{node_name} in the edges is not in the nodes."
                )

        # If the node is not in the list of next_nodes, insert it into the first position of the list
        if node_name not in next_nodes:
            next_nodes.insert(0, node_name)

    if config["root"] not in validate_nodes_name_set:
        raise ValueError(f"The root node '{config['root']}' is not in the nodes.")
    if config["end"] not in validate_nodes_name_set:
        raise ValueError(f"The end node '{config['end']}' is not in the nodes.")

    visited_nodes_name_set.add(config["end"])
    if visited_nodes_name_set != validate_nodes_name_set:
        raise ValueError(
            f"The nodes in the edges are not the same as the nodes.\n Validate nodes: {validate_nodes_name_set}\n Visited nodes: {visited_nodes_name_set}"
        )

    return config
```

#### Next

The Next method determines the next state and the role that needs action based on the current situation. Detailed remarks are added to the codes.

```python
    def next(self, environment: Environment):
    """
    Determine the next node and the agent that needs action based on the current situation
    Return :
    next_node(node) : the next node
    next_agent_name(str) : the name of the next act agent
    """

    # Check if it is the first time to enter the current node
    if self.current_node.is_begin:
        # Get the agent according to the beginning node
        agent_name = self.current_node.name_role_hash.inverse[
            self.current_node.begin_role
        ]
        return self.current_node, agent_name

    # Get relevant memory
    shared_short_term_memory: ShortTermMemory = environment.shared_memory[
        "short_term_memory"
    ]
    shared_long_term_memory: LongTermMemory = environment.shared_memory[
        "long_term_memory"
    ]
    if len(shared_short_term_memory) > 0 and self.global_kb:
        relevant_memory = self.global_kb.retrieve_from_file(
            query=shared_short_term_memory.get_memory()[-1]["content"],
            file_path=shared_long_term_memory.json_path,
        )
        if "no suitable information retrieved" in relevant_memory.lower():
            relevant_memory = ""
    else:
        relevant_memory = ""

    if shared_short_term_memory:
        history_messages = shared_short_term_memory.get_memory()
    else:
        history_messages = []

    # Transit to the next node
    next_node: Node = self.transit(
        history_messages=history_messages,
        relevant_memory=relevant_memory,
        environment_summary=environment.shared_memory["summary"],
    )
    self.current_node = next_node
    # If the next node is the end node, finish the process directly
    if not next_node or next_node.node_name == self.end:
        self.finished = True
        return None, None

    # Route to get the next agent name
    next_agent_name = self.route(
        history_messages=history_messages,
        relevant_memory=relevant_memory,
    )

    return next_node, next_agent_name
```
