58 lines
2.2 KiB
Python
58 lines
2.2 KiB
Python
from agents import Agent
|
|
from agents import OpenAIChatCompletionsModel, AsyncOpenAI, ModelSettings
|
|
from agents import set_tracing_disabled, enable_verbose_stdout_logging
|
|
|
|
from core.system_prompt import SYSTEM_PROMPT
|
|
from core.stream_response import stream_response
|
|
|
|
from core.api.tools.judges import JudgesAPI
|
|
from core.api.tools.coutrs import CourtsAPI
|
|
from core.api.tools.contracts import ContractsAPI
|
|
from core.api.tools.decisions import DecisionsAPI
|
|
from core.api.tools.admin_proceedings import AdminProceedingsAPI
|
|
from core.api.tools.civil_proceedings import CivilProceedingsAPI
|
|
|
|
set_tracing_disabled(True)
|
|
#enable_verbose_stdout_logging()
|
|
|
|
class BaseAgent(Agent[None]): pass
|
|
|
|
def assistant_agent(prompt: str, session):
|
|
|
|
judge_api = JudgesAPI()
|
|
court_api = CourtsAPI()
|
|
contract_api = ContractsAPI()
|
|
decision_api = DecisionsAPI()
|
|
admin_proceedings_api = AdminProceedingsAPI()
|
|
civil_proceedings_api = CivilProceedingsAPI()
|
|
|
|
model = OpenAIChatCompletionsModel(
|
|
model="gpt-oss:20b-cloud",
|
|
openai_client=AsyncOpenAI(base_url="http://localhost:11434/v1",
|
|
api_key="ollama"
|
|
)
|
|
)
|
|
|
|
agent = BaseAgent(
|
|
name="Assistant",
|
|
instructions=SYSTEM_PROMPT,
|
|
model=model,
|
|
model_settings=ModelSettings(
|
|
temperature=0.1,
|
|
tool_choice="auto",
|
|
parallel_tool_calls=False,
|
|
max_tokens=512,
|
|
),
|
|
tools=[
|
|
judge_api.judge, judge_api.judge_id, judge_api.judge_autocomplete,
|
|
court_api.court, court_api.court_id, court_api.court_autocomplete,
|
|
contract_api.contract, contract_api.contract_id, contract_api.contract_autocomplete,
|
|
decision_api.decision, decision_api.decision_id, decision_api.decision_autocomplete,
|
|
admin_proceedings_api.admin_proceedings, admin_proceedings_api.admin_proceedings_id, admin_proceedings_api.admin_proceedings_autocomplete,
|
|
civil_proceedings_api.civil_proceedings, civil_proceedings_api.civil_proceedings_id, civil_proceedings_api.civil_proceedings_autocomplete,
|
|
],
|
|
tool_use_behavior="run_llm_again",
|
|
reset_tool_choice=True,
|
|
)
|
|
|
|
return stream_response(agent, prompt, session) |