add docker's file, README.md, change .gitignore

This commit is contained in:
G0DSEND016 2025-12-19 06:27:29 +01:00
parent 76c10668a2
commit b7d735697f
10 changed files with 71 additions and 15 deletions

16
.dockerignore Normal file
View File

@ -0,0 +1,16 @@
.git
.gitignore
.venv
venv
env/
__pycache__/
*.pyc
*.pyo
*.pyd
.Python
test/
README.md
docker-compose.yml
Dockerfile
.dockerignore
.files/

BIN
.gitignore vendored

Binary file not shown.

18
Dockerfile Normal file
View File

@ -0,0 +1,18 @@
FROM python:3.11-slim
WORKDIR /main
RUN apt-get update && apt-get install -y \
gcc \
g++ \
make \
pkg-config \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
ENTRYPOINT ["chainlit", "run", "app.py", "--host", "0.0.0.0", "--port", "8000"]

View File

@ -1,4 +1,5 @@
from api.fetch_api_data import fetch_api_data, docstring_from_model from api.fetch_api_data import fetch_api_data, docstring_from_model
from api.schemas import (AdministrativeProceedings, from api.schemas import (AdministrativeProceedings,
AdministrativeProceedingsID, AdministrativeProceedingsID,
AdministrativeProceedingsAutocomplete) AdministrativeProceedingsAutocomplete)

6
app.py
View File

@ -1,8 +1,12 @@
import os
import chainlit as cl import chainlit as cl
from core.initialize_agent import assistant_agent from core.initialize_agent import assistant_agent
from core.stream_response import stream_response from core.stream_response import stream_response
def clear_screen():
os.system('cls' if os.name == 'nt' else 'clear')
@cl.set_starters @cl.set_starters
async def set_starters(): async def set_starters():
return [ return [
@ -47,6 +51,8 @@ async def chat_profile():
@cl.on_chat_start @cl.on_chat_start
async def start(): async def start():
clear_screen()
cl.user_session.set("message_history", []) cl.user_session.set("message_history", [])
profile = cl.user_session.get("chat_profile") profile = cl.user_session.get("chat_profile")

0
core/__init__ Normal file
View File

View File

@ -1,4 +1,4 @@
from agents import Agent from agents import Agent, AgentHooks
from agents import OpenAIChatCompletionsModel, AsyncOpenAI, ModelSettings from agents import OpenAIChatCompletionsModel, AsyncOpenAI, ModelSettings
from agents import set_tracing_disabled, enable_verbose_stdout_logging from agents import set_tracing_disabled, enable_verbose_stdout_logging
@ -16,6 +16,10 @@ enable_verbose_stdout_logging()
class BaseAgent(Agent[None]): pass class BaseAgent(Agent[None]): pass
class MyAgentHooks(AgentHooks):
async def on_start(self, context, agent): print(f"\n🏃‍♂️‍➡️[AgentHooks] {agent.name} started.")
async def on_end(self, context, agent, output): print(f"\n🏁[AgentHooks] {agent.name} ended.")
def assistant_agent(model_name: str = "qwen3-next:80b-cloud") -> BaseAgent: def assistant_agent(model_name: str = "qwen3-next:80b-cloud") -> BaseAgent:
judge_api = JudgesAPI() judge_api = JudgesAPI()
@ -50,6 +54,7 @@ def assistant_agent(model_name: str = "qwen3-next:80b-cloud") -> BaseAgent:
], ],
tool_use_behavior="run_llm_again", tool_use_behavior="run_llm_again",
reset_tool_choice=True, reset_tool_choice=True,
hooks=MyAgentHooks(),
) )
return agent return agent

View File

@ -1,26 +1,23 @@
from typing import Any from typing import Any, AsyncGenerator
from agents import Agent, Runner from agents import Agent, Runner
from agents import RunHooks, RunContextWrapper from agents import RunHooks, ModelResponse
from openai.types.responses import ResponseTextDeltaEvent from openai.types.responses import ResponseTextDeltaEvent
class MyHooks(RunHooks): class MyRunHooks(RunHooks):
"""Custom hooks to track usage after each model request.""" """RunHooks for logging the lifecycle of an agent."""
async def on_model_response(self, context: RunContextWrapper, agent: Agent, output: Any) -> None: async def on_agent_start(self, context, agent) -> None:
ctx_usage = context.usage print(f"\n 🐾[RunHooks] {agent.name} started.\n")
if ctx_usage:
print(f"\n🪙{agent.name}{ctx_usage.requests} requests")
print(f"\n🪙Input tokens: {ctx_usage.input_tokens}")
print(f"\n🪙Output tokens: {ctx_usage.output_tokens}")
print(f"\n🪙Total tokens: {ctx_usage.total_tokens}")
async def on_agent_end(self, context, agent, output: Any) -> None:
print(f"\n 🚪[RunHooks] {agent.name} ended.\n")
async def stream_response(agent: Agent, prompt: str): async def stream_response(agent: Agent, prompt: str) -> AsyncGenerator[str, None]:
"""Stream agent response and update the UI.""" """Stream agent response and update the UI."""
hooks = MyHooks()
try: try:
result = Runner.run_streamed(agent, input=prompt, hooks=hooks) result = Runner.run_streamed(agent, input=prompt, hooks=MyRunHooks())
async for event in result.stream_events(): async for event in result.stream_events():
if event.type == "raw_response_event" and isinstance(event.data, ResponseTextDeltaEvent): if event.type == "raw_response_event" and isinstance(event.data, ResponseTextDeltaEvent):
yield event.data.delta # <-- sends the next piece of response text yield event.data.delta # <-- sends the next piece of response text
except Exception as e: except Exception as e:
yield f"⚠️🖨️ Error: {e}" yield f"⚠️🖨️ Error: {e}"

13
docker-compose.yml Normal file
View File

@ -0,0 +1,13 @@
version: "3.8"
services:
legal-ai-assistant:
build: .
ports:
- "8000:8000"
volumes:
- .:/main
environment:
- PYTHONPATH=/main
- PYTHONUNBUFFERED=1
restart: unless-stopped