15.3.2026 change configuration

This commit is contained in:
G0DSEND016 2026-03-15 20:11:01 +01:00
parent 4197ba38ae
commit 7ded74b65e
12 changed files with 54 additions and 193 deletions

View File

@ -13,4 +13,6 @@ README.md
docker-compose.yml
Dockerfile
.dockerignore
.chainlit/translations/*
!.chainlit/translations/en-US.json
.files/

5
.gitignore vendored
View File

@ -181,5 +181,10 @@ fabric.properties
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
# Chainlit correction
.chainlit/translations/*
!.chainlit/translations/en-US.json
.files/
# idea folder, uncomment if you don't need it
.idea

109
README.md
View File

@ -1,109 +0,0 @@
# Právny AI Asistent integrácia s API
## POPIS
Právny AI Asistent je inteligentný asistent integrovaný s oficiálnymi verejnými API Ministerstva spravodlivosti
Slovenskej republiky. Systém poskytuje užívateľom prístup k štruktúrovaným právnym informáciám prostredníctvom
prirodzeného jazyka.
![PROFILE](public/img/project.png)
**Hlavné funkcie:**
- Interpretácia užívateľských otázok v prirodzenom jazyku
- Automatická extrakcia a validácia parametrov
- Načítavanie dát výlučne cez oficiálne API
- Prezentácia výsledkov v zrozumiteľnom formáte v slovenskom jazyku
## POUŽITÉ KNIŽNICE A TECHNOLÓGIE
- **Python** hlavný programovací jazyk
- **openai-agent** tvorba AI agentov od OpenAI
- **httpx** asynchrónna komunikácia s API
- **Chainlit** framework pre chat rozhranie
- **Ollama** lokálne používanie AI modelov
- **Docker** kontajnerizácia aplikácie
- a **iné**
## ŠTRUKTÚRA PROJEKTU
```
ai-lawyer-agent/
├── api/ # API logika
│ └── tools/ # API nástroje
│ ├── __init__.py
│ ├── fetch_api_data.py # Spracovanie API
│ └── schemas.py # Pydantic schémy
├── core/
│ ├── __init__.py
│ ├── initialize_agent.py # Inicializácia AI agenta
│ ├── stream_response.py # Streamovanie odpovedí
│ └── system_prompt.py # Systémový prompt
├── public/ # Verejné zdroje
│ ├── icon/ # Ikony a logá
│ │ ├── favicon.png
│ │ ├── logo_dark.png
│ │ └── logo_light.png
│ ├── img/
│ ├── styles.css # Štýly rozhrania
│ └── theme.json # Nastavenia témy
├── .chainlit/ # Markdown pre Chainlit
└── app.py # Hlavný súbor aplikácie (Сhainlit + spracovanie API)
```
## INŠTRUKCIA PRE INŠTALÁCIU
### **Inštalácia cez Git**
### 1. **Naklonovanie repozitára**
```bash
git clone git@git.kemt.fei.tuke.sk:od059jr/ai-lawyer-agent.git
```
### 2. **Vytvorenie virtuálneho prostredia**
#### Pre Windows
```bash
python -m venv venv
venv\Scripts\activate
```
#### Pre macOS/Linux
```
python3 -m venv venv
source venv/bin/activate
```
### 3. **Inštalácia závislostí**
```bash
pip install -r requirements.txt
```
### 4. **Sťahovanie inštalátora Ollama**
https://ollama.com/download
### 5. **Sťahovanie modelov**
```bash
ollama pull qwen3-next:80b-cloud
```
```bash
ollama pull gpt-oss:20b-cloud
```
```bash
ollama pull deepseek-v3.1:671b-cloud
```
### **Inštalácia cez DOCKER**
### 1. Inštalácia Docker a Docker Desktop
### 2. Nainštalujte a spustite aplikáciu
https://hub.docker.com/r/alexgott0616/ai-lawyer-agent

View File

@ -1,6 +1,7 @@
import httpx
import logging
from pydantic import BaseModel
from tenacity import retry, stop_after_attempt, wait_exponential
logging.basicConfig(
level=logging.INFO,
@ -20,11 +21,12 @@ def docstring_from_model(model: type[BaseModel]):
return func
return decorator
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=5))
async def fetch_api_data(icon: str, url: str, params: dict, remove_keys: list = None) -> dict:
try:
logger.info(f"\n🔨{icon}Input parameters: {params}\n")
async with httpx.AsyncClient() as client:
response = await client.get(url, params=params, timeout=10.0)
async with httpx.AsyncClient(timeout=httpx.Timeout(10.0)) as client:
response = await client.get(url, params=params)
response.raise_for_status()
logger.debug(f"\n🖇️{icon}Request URL: {response.url}\n")
@ -32,14 +34,12 @@ async def fetch_api_data(icon: str, url: str, params: dict, remove_keys: list =
if remove_keys and isinstance(data, dict):
for key in remove_keys:
data.pop(key, None)
logger.info(f"\n🚮{icon}Successfully removed key: {key}\n")
logger.info(f"\n{icon}Successfully fetched data from {url}\n")
logger.info(f"\n🚮{icon}Successfully removed key: {key}\n")
return data
except httpx.HTTPStatusError as e:
logger.error(f"\n{icon}HTTP error: {e.response.status_code} - {e.response.text}\n")
return {"error": "http_error", "status_code": e.response.status_code, "detail": e.response.text}
return {"error": "http_error", "status_code": e.response.status_code}
except httpx.RequestError as e:
logger.error(f"\n{icon}Request error: {str(e)}\n")
return {"error": "request_error", "status_code": str(e)}

74
app.py
View File

@ -1,76 +1,54 @@
import os
import chainlit as cl
from core.initialize_agent import assistant_agent
from core.init_agent import assistant_agent
from core.stream_response import stream_response
def clear_screen():
os.system('cls' if os.name == 'nt' else 'clear')
DEFAULT_MODEL = "qwen3.5:cloud"
STARTERS = [
("What legal data can the agent find?","magnifying_glass"),
("What is the agent not allowed to do or use?","ban"),
("What are the details of your AI model?","hexagon"),
("What data sources does the agent rely on?","database"),
]
PROFILES = [
("qwen3.5:cloud","Qwen 3.5 CLOUD"),
("gpt-oss:20b-cloud","GPT-OSS 20B CLOUD"),
]
@cl.set_starters
async def set_starters():
return [
cl.Starter(
label="What legal data can the agent find?",
message="What legal data can the agent find?",
icon="/public/icon/magnifying_glass.svg",
),
cl.Starter(
label="What is the agent not allowed to do or use?",
message="What is the agent not allowed to do or use?",
icon="/public/icon/ban.svg",
),
cl.Starter(
label="What are the details of your AI model?",
message="What are the details of your AI model?",
icon="/public/icon/hexagon.svg",
),
cl.Starter(
label="What data sources does the agent rely on?",
message="What data sources does the agent rely on?",
icon="/public/icon/database.svg",
)
cl.Starter(label=label, message=label, icon=f"/public/icon/{icon}.svg")
for label, icon in STARTERS
]
@cl.set_chat_profiles
async def chat_profile():
return [
cl.ChatProfile(
name="qwen3-next:80b-cloud",
markdown_description="Uses **Qwen 3 Next 80B**",
),
cl.ChatProfile(
name="gpt-oss:20b-cloud",
markdown_description="Uses **GPT-OSS 20B CLOUD**",
),
cl.ChatProfile(
name="deepseek-v3.1:671b-cloud",
markdown_description="Uses **DEEPSEEK v3.1 671B CLOUD**",
),
cl.ChatProfile(name=name, markdown_description=f"Uses **{desc}**")
for name, desc in PROFILES
]
@cl.on_chat_start
async def start():
clear_screen()
model_name = cl.user_session.get("chat_profile") or DEFAULT_MODEL
cl.user_session.set("agent", assistant_agent(model_name))
cl.user_session.set("message_history", [])
profile = cl.user_session.get("chat_profile")
model_name = profile or "qwen3-next:80b-cloud"
agent = assistant_agent(model_name)
cl.user_session.set("agent", agent)
@cl.on_message
async def main(message: cl.Message):
agent = cl.user_session.get("agent")
message_history = cl.user_session.get("message_history")
history = cl.user_session.get("message_history")
message_history.append({"role": "user", "content": message.content})
history.append({"role": "user", "content": message.content})
msg = cl.Message(content="")
async for token in stream_response(agent, message_history):
async for token in stream_response(agent, history):
await msg.stream_token(token)
await msg.update()
message_history.append({"role": "assistant", "content": msg.content})
await msg.update()
if msg.content:
history.append({"role": "assistant", "content": msg.content})

View File

@ -1,10 +1,9 @@
import os
from agents import Agent, AgentHooks
from agents import OpenAIChatCompletionsModel, AsyncOpenAI, ModelSettings
from agents import set_tracing_disabled, enable_verbose_stdout_logging
from agents import set_tracing_disabled
from core.system_prompt import get_system_prompt
from api.tools.judges import JudgesAPI
from api.tools.coutrs import CourtsAPI
from api.tools.contracts import ContractsAPI
@ -12,18 +11,16 @@ from api.tools.decisions import DecisionsAPI
from api.tools.admin_proceedings import AdminProceedingsAPI
from api.tools.civil_proceedings import CivilProceedingsAPI
ollama_host = os.getenv("OLLAMA_HOST", "host.docker.internal:11434")
set_tracing_disabled(True)
enable_verbose_stdout_logging()
class BaseAgent(Agent[None]): pass
class MyAgentHooks(AgentHooks):
async def on_start(self, context, agent): print(f"\n🏃‍♂️‍➡️[AgentHooks] {agent.name} started.")
async def on_end(self, context, agent, output): print(f"\n🏁[AgentHooks] {agent.name} ended.")
async def on_start(self, context, agent):
print(f"\n🏃‍♂️‍➡️ [AgentHooks] {agent.name} started.")
def assistant_agent(model_name: str = "qwen3-next:80b-cloud") -> BaseAgent:
async def on_end(self, context, agent, output):
print(f"🏁 [AgentHooks] {agent.name} ended.")
def assistant_agent(model_name: str = "qwen3.5:cloud") -> Agent:
judge_api = JudgesAPI()
court_api = CourtsAPI()
@ -32,19 +29,14 @@ def assistant_agent(model_name: str = "qwen3-next:80b-cloud") -> BaseAgent:
admin_proceedings_api = AdminProceedingsAPI()
civil_proceedings_api = CivilProceedingsAPI()
client = AsyncOpenAI(base_url=f"http://{ollama_host}/v1", api_key="ollama")
client = AsyncOpenAI(base_url=f"http://localhost:11434/v1", api_key="ollama", timeout=120.0, max_retries=0)
model = OpenAIChatCompletionsModel(model=model_name, openai_client=client)
agent = BaseAgent(
agent = Agent(
name="Assistant",
instructions=get_system_prompt(model_name),
model=model,
model_settings=ModelSettings(
temperature=0.7,
tool_choice="auto",
parallel_tool_calls=False,
),
model_settings=ModelSettings(temperature=0.7, tool_choice="auto", parallel_tool_calls=False),
tools=[
judge_api.judge, judge_api.judge_id, judge_api.judge_autocomplete,
court_api.court, court_api.court_id, court_api.court_autocomplete,

View File

@ -1,23 +1,13 @@
from typing import Any, AsyncGenerator
from typing import AsyncGenerator
from agents import Agent, Runner
from agents import RunHooks, ModelResponse
from openai.types.responses import ResponseTextDeltaEvent
class MyRunHooks(RunHooks):
"""RunHooks for logging the lifecycle of an agent."""
async def on_agent_start(self, context, agent) -> None:
print(f"\n 🐾[RunHooks] {agent.name} started.\n")
async def on_agent_end(self, context, agent, output: Any) -> None:
print(f"\n 🚪[RunHooks] {agent.name} ended.\n")
async def stream_response(agent: Agent, prompt: str) -> AsyncGenerator[str, None]:
"""Stream agent response and update the UI."""
try:
result = Runner.run_streamed(agent, input=prompt, hooks=MyRunHooks())
result = Runner.run_streamed(agent, input=prompt)
async for event in result.stream_events():
if event.type == "raw_response_event" and isinstance(event.data, ResponseTextDeltaEvent):
yield event.data.delta # <-- sends the next piece of response text
except Exception as e:
yield f"⚠️🖨️ Error: {e}"

View File

@ -10,4 +10,7 @@ services:
environment:
- PYTHONPATH=/main
- PYTHONUNBUFFERED=1
- OLLAMA_HOST=ollama:11434
depends_on:
- ollama
restart: unless-stopped

Binary file not shown.

Before

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

Binary file not shown.