44 lines
1.4 KiB
Python
44 lines
1.4 KiB
Python
from agents import Agent, OpenAIChatCompletionsModel, AsyncOpenAI, ModelSettings, set_tracing_disabled
|
|
from agents.mcp import MCPServerStreamableHttp
|
|
from backend.core.config import LITELLM_BASE_URL, LITELLM_API_KEY, AGENT_TEMPERATURE, LLM_TIMEOUT, DEFAULT_MODEL, MCP_SERVER_URL
|
|
from backend.core.system_prompt import get_system_prompt
|
|
#from api.tools import ALL_TOOLS
|
|
|
|
def _make_client() -> AsyncOpenAI:
|
|
return AsyncOpenAI (
|
|
base_url=LITELLM_BASE_URL,
|
|
api_key=LITELLM_API_KEY,
|
|
timeout=LLM_TIMEOUT,
|
|
max_retries=0
|
|
)
|
|
|
|
def get_mcp_server() -> MCPServerStreamableHttp:
|
|
return MCPServerStreamableHttp(
|
|
name="Slovak Justice API",
|
|
params={"url": MCP_SERVER_URL},
|
|
cache_tools_list=True
|
|
)
|
|
|
|
def assistant_agent(model_name: str = DEFAULT_MODEL) -> Agent:
|
|
"""Initialize the assistant agent for legal work"""
|
|
|
|
client = _make_client()
|
|
model = OpenAIChatCompletionsModel(
|
|
model=model_name,
|
|
openai_client=client
|
|
)
|
|
|
|
return Agent(
|
|
name="AI Lawyer Assistant",
|
|
instructions=get_system_prompt(model_name),
|
|
model=model,
|
|
model_settings=ModelSettings(
|
|
temperature=AGENT_TEMPERATURE,
|
|
tool_choice="auto",
|
|
parallel_tool_calls=False
|
|
),
|
|
tool_use_behavior="run_llm_again",
|
|
reset_tool_choice=True,
|
|
mcp_servers=[get_mcp_server()],
|
|
)
|