39 lines
1.3 KiB
Python
39 lines
1.3 KiB
Python
from agents import Agent, AgentHooks
|
|
from agents import OpenAIChatCompletionsModel, AsyncOpenAI, ModelSettings
|
|
from agents import set_tracing_disabled
|
|
|
|
from core.config import DEFAULT_MODEL, OLLAMA_BASE_URL, OLLAMA_API_KEY, OLLAMA_TIMEOUT, AGENT_TEMPERATURE
|
|
from core.system_prompt import get_system_prompt
|
|
from api.tools import ALL_TOOLS
|
|
|
|
set_tracing_disabled(True)
|
|
|
|
class MyAgentHooks(AgentHooks):
|
|
async def on_start(self, context, agent):
|
|
print(f"\n🏃♂️➡️ [AgentHooks] {agent.name} started.")
|
|
|
|
async def on_end(self, context, agent, output):
|
|
print(f"🏁 [AgentHooks] {agent.name} ended.")
|
|
|
|
def assistant_agent(model_name: str = DEFAULT_MODEL) -> Agent:
|
|
|
|
client = AsyncOpenAI(
|
|
base_url=OLLAMA_BASE_URL,
|
|
api_key=OLLAMA_API_KEY,
|
|
timeout=OLLAMA_TIMEOUT,
|
|
max_retries=0
|
|
)
|
|
model = OpenAIChatCompletionsModel(model=model_name, openai_client=client)
|
|
|
|
agent = Agent(
|
|
name="Assistant",
|
|
instructions=get_system_prompt(model_name),
|
|
model=model,
|
|
model_settings=ModelSettings(temperature=AGENT_TEMPERATURE, tool_choice="auto", parallel_tool_calls=False),
|
|
tools=ALL_TOOLS,
|
|
tool_use_behavior="run_llm_again",
|
|
reset_tool_choice=True,
|
|
hooks=MyAgentHooks(),
|
|
)
|
|
|
|
return agent |