diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..707a9ee --- /dev/null +++ b/.dockerignore @@ -0,0 +1,16 @@ +.git +.gitignore +.venv +venv +env/ +__pycache__/ +*.pyc +*.pyo +*.pyd +.Python +test/ +README.md +docker-compose.yml +Dockerfile +.dockerignore +.files/ \ No newline at end of file diff --git a/.gitignore b/.gitignore index f967714..1b5c3a2 100644 Binary files a/.gitignore and b/.gitignore differ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..5af32eb --- /dev/null +++ b/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.11-slim +WORKDIR /main + +RUN apt-get update && apt-get install -y \ + gcc \ + g++ \ + make \ + pkg-config \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements.txt . + +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +ENTRYPOINT ["chainlit", "run", "app.py", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file diff --git a/core/__init__.py b/README.md similarity index 100% rename from core/__init__.py rename to README.md diff --git a/api/tools/admin_proceedings.py b/api/tools/admin_proceedings.py index 2b5ef81..8bde97a 100644 --- a/api/tools/admin_proceedings.py +++ b/api/tools/admin_proceedings.py @@ -1,4 +1,5 @@ from api.fetch_api_data import fetch_api_data, docstring_from_model + from api.schemas import (AdministrativeProceedings, AdministrativeProceedingsID, AdministrativeProceedingsAutocomplete) diff --git a/app.py b/app.py index de748f1..cd0671d 100644 --- a/app.py +++ b/app.py @@ -1,8 +1,12 @@ +import os import chainlit as cl from core.initialize_agent import assistant_agent from core.stream_response import stream_response +def clear_screen(): + os.system('cls' if os.name == 'nt' else 'clear') + @cl.set_starters async def set_starters(): return [ @@ -47,6 +51,8 @@ async def chat_profile(): @cl.on_chat_start async def start(): + clear_screen() + cl.user_session.set("message_history", []) profile = cl.user_session.get("chat_profile") diff --git a/core/__init__ b/core/__init__ new file mode 100644 index 0000000..e69de29 diff --git a/core/initialize_agent.py b/core/initialize_agent.py index 16d2167..ee29c04 100644 --- a/core/initialize_agent.py +++ b/core/initialize_agent.py @@ -1,4 +1,4 @@ -from agents import Agent +from agents import Agent, AgentHooks from agents import OpenAIChatCompletionsModel, AsyncOpenAI, ModelSettings from agents import set_tracing_disabled, enable_verbose_stdout_logging @@ -16,6 +16,10 @@ enable_verbose_stdout_logging() class BaseAgent(Agent[None]): pass +class MyAgentHooks(AgentHooks): + async def on_start(self, context, agent): print(f"\n🏃‍♂️‍➡️[AgentHooks] {agent.name} started.") + async def on_end(self, context, agent, output): print(f"\n🏁[AgentHooks] {agent.name} ended.") + def assistant_agent(model_name: str = "qwen3-next:80b-cloud") -> BaseAgent: judge_api = JudgesAPI() @@ -50,6 +54,7 @@ def assistant_agent(model_name: str = "qwen3-next:80b-cloud") -> BaseAgent: ], tool_use_behavior="run_llm_again", reset_tool_choice=True, + hooks=MyAgentHooks(), ) return agent \ No newline at end of file diff --git a/core/stream_response.py b/core/stream_response.py index f08bfd3..1ceeda9 100644 --- a/core/stream_response.py +++ b/core/stream_response.py @@ -1,26 +1,23 @@ -from typing import Any +from typing import Any, AsyncGenerator from agents import Agent, Runner -from agents import RunHooks, RunContextWrapper +from agents import RunHooks, ModelResponse from openai.types.responses import ResponseTextDeltaEvent -class MyHooks(RunHooks): - """Custom hooks to track usage after each model request.""" - async def on_model_response(self, context: RunContextWrapper, agent: Agent, output: Any) -> None: - ctx_usage = context.usage - if ctx_usage: - print(f"\n🪙{agent.name} → {ctx_usage.requests} requests") - print(f"\n🪙Input tokens: {ctx_usage.input_tokens}") - print(f"\n🪙Output tokens: {ctx_usage.output_tokens}") - print(f"\n🪙Total tokens: {ctx_usage.total_tokens}") +class MyRunHooks(RunHooks): + """RunHooks for logging the lifecycle of an agent.""" + async def on_agent_start(self, context, agent) -> None: + print(f"\n 🐾[RunHooks] {agent.name} started.\n") + async def on_agent_end(self, context, agent, output: Any) -> None: + print(f"\n 🚪[RunHooks] {agent.name} ended.\n") -async def stream_response(agent: Agent, prompt: str): +async def stream_response(agent: Agent, prompt: str) -> AsyncGenerator[str, None]: """Stream agent response and update the UI.""" - hooks = MyHooks() try: - result = Runner.run_streamed(agent, input=prompt, hooks=hooks) + result = Runner.run_streamed(agent, input=prompt, hooks=MyRunHooks()) async for event in result.stream_events(): if event.type == "raw_response_event" and isinstance(event.data, ResponseTextDeltaEvent): yield event.data.delta # <-- sends the next piece of response text + except Exception as e: yield f"⚠️🖨️ Error: {e}" diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..bd7ca55 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,13 @@ +version: "3.8" + +services: + legal-ai-assistant: + build: . + ports: + - "8000:8000" + volumes: + - .:/main + environment: + - PYTHONPATH=/main + - PYTHONUNBUFFERED=1 + restart: unless-stopped \ No newline at end of file