Skip to content

Architects

AgentInput

Bases: BaseModel

The input to the architect agent.

Attributes:

Name Type Description
problem_description str

The description of the problem that the generated graph should solve.

node_types list[Literal[tuple(node_types_names)]]

The types of the nodes that can be used in the generated graph.

max_llm_nodes int

The maximum number of LLM nodes that can be used in the generated graph.

input_placeholders int

The placeholders that must be used in the shared context prompt of the LLM nodes.

Source code in ebiose/architects/architect_agent.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
class AgentInput(BaseModel):
    """The input to the architect agent.

    Attributes:
        problem_description: The description of the problem that the generated graph should solve.
        node_types: The types of the nodes that can be used in the generated graph.
        max_llm_nodes: The maximum number of LLM nodes that can be used in the generated graph.
        input_placeholders: The placeholders that must be used in the shared context prompt of the LLM nodes.
    """

    problem_description: str = Field(
        ...,
        description="The description of the problem that the generated graph should solve.",
    )
    node_types: list[Literal[tuple(node_types_names)]] = Field(
        node_types_names,
        description="The types of the nodes that can be used in the generated graph.",
    )
    max_llm_nodes: int = Field(
        ...,
        description="The maximum number of LLM nodes that can be used in the generated graph.",
    )
    authorized_placeholders: Sequence[str] = Field(
        ...,
        description="The placeholders that must be used in the shared context prompt of the LLM nodes.",
    )

    node_types_description: str = Field(
        default_factory=lambda: get_node_types_docstrings(
            node_types_names,
        ),
        description="A string describing each node type.",
    )

AgentOutput

Bases: BaseModel

The expected final output.

graph: The final generated graph.

Source code in ebiose/architects/architect_agent.py
61
62
63
64
65
66
67
class AgentOutput(BaseModel):
    """The expected final output.

    graph: The final generated graph.
    """

    graph: Graph = Field(..., description="The final generated graph.")

build_architect_agent(model)

Build an architect agent that generates a graph to solve a problem.

Source code in ebiose/architects/architect_agent.py
141
142
143
144
145
146
147
148
149
150
def build_architect_agent(model: Literal[str]) -> LangGraph.Agent:
    """Build an architect agent that generates a graph to solve a problem."""
    architect_graph = build_architect_graph(model)

    return LangGraph.Agent(
        graph=architect_graph,
        input_model=AgentInput,
        output_model=AgentOutput,
        model=model,
    )

build_architect_graph(model)

Build a graph that will go trough 2 steps.

  • Generate the architecture of the graph
  • Generate the prompts of each LLMNode

Validation lies within the last node of the graph.

Source code in ebiose/architects/architect_agent.py
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
def build_architect_graph(
    model: str,
) -> Graph:
    """Build a graph that will go trough 2 steps.

    - Generate the architecture of the graph
    - Generate the prompts of each LLMNode

    Validation lies within the last node of the graph.
    """
    architect_graph = Graph(
        description="Architect AI",
        shared_context_prompt=SHARED_CONTEXT_PROMPT,
    )

    # Create and add start and end noded
    start_node = StartNode()
    end_node = EndNode()

    architect_graph.add_node(start_node)
    architect_graph.add_node(end_node)

    # Create, add and connect the architecture generation node to the graph
    architecture_node = LangGraph.LLMNode(
        id="graph_outline_generation",
        name="Graph Outline Generation",
        purpose="Step 1: Generate the outline of the graph",
        prompt=STEP1_PROMPT,
        model=model,
    )

    architect_graph.add_node(architecture_node)
    architect_graph.add_edge(
        Edge(start_node_id=start_node.id, end_node_id=architecture_node.id),
    )

    # Create, add and connect the prompt generation node to the graph
    prompt_node = LangGraph.LLMNode(
        id="llm_nodes_prompt_generation",
        name="LLM Nodes Prompt Generation",
        purpose="Step 2: Generate the prompts of each LLMNode",
        prompt=STEP2_PROMPT,
        model=model,
    )

    architect_graph.add_node(prompt_node)
    architect_graph.add_edge(
        Edge(start_node_id=architecture_node.id, end_node_id=prompt_node.id),
    )

    # Create, add and connect the final graph generation node to the graph
    graph_node = LangGraph.LLMNode(
        id="structured_graph_generation",
        name="Structured Graph Generation",
        purpose="Step 3: Generate the final graph",
        prompt=STEP3_PROMPT,
        model=model,
        retry_after_validation_errors=True,
        max_retries=3,
        temperature=0,
    )

    architect_graph.add_node(graph_node)
    architect_graph.add_edge(
        Edge(start_node_id=prompt_node.id, end_node_id=graph_node.id),
    )
    architect_graph.add_edge(Edge(start_node_id=graph_node.id, end_node_id=end_node.id))

    return architect_graph

generate_agent(problem_description, agent_input, agent_output, model, node_types=None, max_llm_nodes=5, backend=LangGraph)

Generates an agent that can solve a problem given the problem description, takes input of type agent_input and returns output of type agent_output.

Parameters:

Name Type Description Default
problem_description str

The description of the problem that the generated graph should solve.

required
agent_input type[BaseModel]

The input model of the agent.

required
agent_output type[BaseModel]

The output model of the agent.

required
model str

The model to use for generating the agent.

required
node_types set[Literal[tuple(node_types_names)]] | None

The types of the nodes that can be used in the generated graph.

None
max_llm_nodes int

The maximum number of LLM nodes that can be used in the generated graph.

5
backend Backend

The backend used to run the architect agent.

LangGraph
Source code in ebiose/architects/architect_agent.py
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
def generate_agent(  # noqa:PLR0913
    problem_description: str,
    agent_input: type[BaseModel],
    agent_output: type[BaseModel],
    model: str,
    node_types: set[Literal[tuple(node_types_names)]] | None = None,
    max_llm_nodes: int = 5,
    backend: Backend = LangGraph,
) -> LangGraph.Agent:
    """Generates an agent that can solve a problem given the problem description, takes input of type agent_input and returns output of type agent_output.

    Args:
        problem_description: The description of the problem that the generated graph should solve.
        agent_input: The input model of the agent.
        agent_output: The output model of the agent.
        model: The model to use for generating the agent.
        node_types: The types of the nodes that can be used in the generated graph.
        max_llm_nodes: The maximum number of LLM nodes that can be used in the generated graph.
        backend: The backend used to run the architect agent.
    """
    if node_types is None:
        node_types = set(node_types_names)
    node_types.union(node_types)

    architect_agent = build_architect_agent(model)

    architect_input = AgentInput(
        problem_description=problem_description,
        node_types=node_types,
        max_llm_nodes=max_llm_nodes,
        authorized_placeholders=list(agent_input.model_fields.keys()),
    )

    architect_output = architect_agent.run(architect_input)

    return backend.Agent(
        graph=architect_output.graph,
        input_model=agent_input,
        output_model=agent_output,
        model=model,
    )

generate_agents(problem_description, agent_input, agent_output, model, node_types=None, max_llm_nodes=5, batch_number=2, backend=LangGraph)

Generates an agent that can solve a problem given the problem description, takes input of type agent_input and returns output of type agent_output.

Parameters:

Name Type Description Default
problem_description str

The description of the problem that the generated graph should solve.

required
agent_input type[BaseModel]

The input model of the agent.

required
agent_output type[BaseModel]

The output model of the agent.

required
model str

The model to use for generating the agent.

required
node_types set[Literal[tuple(node_types_names)]] | None

The types of the nodes that can be used in the generated graph.

None
max_llm_nodes int

The maximum number of LLM nodes that can be used in the generated graph.

5
backend Backend

The backend used to run the architect agent.

LangGraph
batch_number int

The number agents to be created.

2
Source code in ebiose/architects/architect_agent.py
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
def generate_agents(  # noqa:PLR0913
    problem_description: str,
    agent_input: type[BaseModel],
    agent_output: type[BaseModel],
    model: str,
    node_types: set[Literal[tuple(node_types_names)]] | None = None,
    max_llm_nodes: int = 5,
    batch_number: int = 2,
    backend: Backend = LangGraph,
) -> LangGraph.Agent:
    """Generates an agent that can solve a problem given the problem description, takes input of type agent_input and returns output of type agent_output.

    Args:
        problem_description: The description of the problem that the generated graph should solve.
        agent_input: The input model of the agent.
        agent_output: The output model of the agent.
        model: The model to use for generating the agent.
        node_types: The types of the nodes that can be used in the generated graph.
        max_llm_nodes: The maximum number of LLM nodes that can be used in the generated graph.
        backend: The backend used to run the architect agent.
        batch_number: The number agents to be created.
    """
    if node_types is None:
        node_types = set(node_types_names)
    node_types.union(node_types)

    architect_agent = build_architect_agent(model)
    agent_inputs = [
        AgentInput(
            problem_description=problem_description,
            node_types=node_types,
            max_llm_nodes=max_llm_nodes,
            authorized_placeholders=list(agent_input.model_fields.keys()),
        )
        for _ in range(batch_number)
    ]

    architect_outputs = architect_agent.run_in_batch(agent_inputs, batch_number)

    return [
        backend.Agent(
            graph=architect_output.graph,
            input_model=agent_input,
            output_model=agent_output,
            model=model,
        )
        for architect_output in architect_outputs
    ]