Getting started

Structured outputs

You can force your Assistant to reply using structured outputs. This can be beneficial when you want the Assisatant to return data in the form of json.

Step-by-step guide

  1. Start by creating an LLM, a Tool and an Agent. Note that you usually only need to create the llm object once and re-use it for subsequent agents.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import os
from superagent.client import Superagent

client = Superagent(
    base_url="https://api.beta.superagent.sh",
    token=os.environ["SUPERAGENT_API_KEY"]
)

# We recommend querying for existing LLMs prior to creating.
llm = client.llm.create(request={
    "provider": "OPENAI",
    "apiKey": os.environ["OPENAI_API_KEY"]
})

agent = client.agent.create(request={
    "name": "Structured Assistant",
    "description": "An Assistant that returns responses in json",
    "avatar": "https://mylogo.com/logo.png", # Replace with a real image
    "isActive": True,
    "llmModel": "GPT_4_1106_PREVIEW",
    "initialMessage": "Hi there, how can I help you?",
    "prompt": "Use the Browser to answer the users question."
})

tool = client.tool.create(request={
    "name": "Browser",
    "description": "useful for analyzing and summarizing websites and urls.",
    "type": "BROWSER"
})

client.agent.add_tool(agent_id=agent.data.id, tool_id=tool.data.id)
client.agent.add_llm(agent_id=agent.data.id, llm_id=llm.data.id)
  1. Invoke your Agent with the output_schema parameter. This parameter should hold the desired schema.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
prediction = client.agent.invoke(
    agent_id=agent.data.id,
    input="List the top 5 articles on https://news.ycombinator.com."
    enable_streaming=False,
    session_id="my_session_id",
    output_schema="[{title: string, points: number, url: string}]" # Your desired output schema
)

print(prediction.data.get("output"))

# [{
#     "title": "...",
#     "points": "...",
#     "url": "..."
# }, {
#     ...
# }]

By passing the output_schema we make sure the Assistant returns a json repsonse in our desired output schema.

Full code

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import os
from superagent.client import Superagent

client = Superagent(
    base_url="https://api.beta.superagent.sh",
    token=os.environ["SUPERAGENT_API_KEY"]
)

# We recommend querying for existing LLMs prior to creating.
llm = client.llm.create(request={
    "provider": "OPENAI",
    "apiKey": os.environ["OPENAI_API_KEY"]
})

agent = client.agent.create(request={
    "name": "Structured Assistant",
    "description": "An Assistant that returns responses in json",
    "avatar": "https://mylogo.com/logo.png", # Replace with a real image
    "isActive": True,
    "llmModel": "GPT_4_1106_PREVIEW",
    "initialMessage": "Hi there, how can I help you?",
    "prompt": "Use the Browser to answer the users question."
})

tool = client.tool.create(request={
    "name": "Browser",
    "description": "useful for analyzing and summarizing websites and urls.",
    "type": "BROWSER"
})

client.agent.add_tool(agent_id=agent.data.id, tool_id=tool.data.id)
client.agent.add_llm(agent_id=agent.data.id, llm_id=llm.data.id)

prediction = client.agent.invoke(
    agent_id=agent.data.id,
    input="List the top 5 articles on https://news.ycombinator.com."
    enable_streaming=False,
    session_id="my_session_id",
    output_schema="[{title: string, points: number, url: string}]" # Your desired output schema
)

print(prediction.data.get("output"))

# [{
#     "title": "...",
#     "points": "...",
#     "url": "..."
# }, {
#     ...
# }]