from agents import Agent, AgentHooks from agents import OpenAIChatCompletionsModel, AsyncOpenAI, ModelSettings from agents import set_tracing_disabled, enable_verbose_stdout_logging from core.system_prompt import get_system_prompt from api.tools.judges import JudgesAPI from api.tools.coutrs import CourtsAPI from api.tools.contracts import ContractsAPI from api.tools.decisions import DecisionsAPI from api.tools.admin_proceedings import AdminProceedingsAPI from api.tools.civil_proceedings import CivilProceedingsAPI set_tracing_disabled(True) enable_verbose_stdout_logging() class BaseAgent(Agent[None]): pass class MyAgentHooks(AgentHooks): async def on_start(self, context, agent): print(f"\nšŸƒā€ā™‚ļøā€āž”ļø[AgentHooks] {agent.name} started.") async def on_end(self, context, agent, output): print(f"\nšŸ[AgentHooks] {agent.name} ended.") def assistant_agent(model_name: str = "qwen3-next:80b-cloud") -> BaseAgent: judge_api = JudgesAPI() court_api = CourtsAPI() contract_api = ContractsAPI() decision_api = DecisionsAPI() admin_proceedings_api = AdminProceedingsAPI() civil_proceedings_api = CivilProceedingsAPI() client = AsyncOpenAI(base_url="http://localhost:11434/v1", api_key="ollama") model = OpenAIChatCompletionsModel(model=model_name, openai_client=client) agent = BaseAgent( name="Assistant", instructions=get_system_prompt(model_name), model=model, model_settings=ModelSettings( temperature=0.7, tool_choice="auto", parallel_tool_calls=False, ), tools=[ judge_api.judge, judge_api.judge_id, judge_api.judge_autocomplete, court_api.court, court_api.court_id, court_api.court_autocomplete, contract_api.contract, contract_api.contract_id, contract_api.contract_autocomplete, decision_api.decision, decision_api.decision_id, decision_api.decision_autocomplete, admin_proceedings_api.admin_proceedings, admin_proceedings_api.admin_proceedings_id, admin_proceedings_api.admin_proceedings_autocomplete, civil_proceedings_api.civil_proceedings, civil_proceedings_api.civil_proceedings_id, civil_proceedings_api.civil_proceedings_autocomplete, ], tool_use_behavior="run_llm_again", reset_tool_choice=True, hooks=MyAgentHooks(), ) return agent