76 lines
2.3 KiB
Python
76 lines
2.3 KiB
Python
import os
|
|
import chainlit as cl
|
|
|
|
from core.initialize_agent import assistant_agent
|
|
from core.stream_response import stream_response
|
|
|
|
def clear_screen():
|
|
os.system('cls' if os.name == 'nt' else 'clear')
|
|
|
|
@cl.set_starters
|
|
async def set_starters():
|
|
return [
|
|
cl.Starter(
|
|
label="What legal data can the agent find?",
|
|
message="What legal data can the agent find?",
|
|
icon="/public/icon/magnifying_glass.svg",
|
|
),
|
|
cl.Starter(
|
|
label="What is the agent not allowed to do or use?",
|
|
message="What is the agent not allowed to do or use?",
|
|
icon="/public/icon/ban.svg",
|
|
),
|
|
cl.Starter(
|
|
label="What are the details of your AI model?",
|
|
message="What are the details of your AI model?",
|
|
icon="/public/icon/hexagon.svg",
|
|
),
|
|
cl.Starter(
|
|
label="What data sources does the agent rely on?",
|
|
message="What data sources does the agent rely on?",
|
|
icon="/public/icon/database.svg",
|
|
)
|
|
]
|
|
|
|
@cl.set_chat_profiles
|
|
async def chat_profile():
|
|
return [
|
|
cl.ChatProfile(
|
|
name="qwen3-next:80b-cloud",
|
|
markdown_description="Uses **Qwen 3 Next 80B**",
|
|
),
|
|
cl.ChatProfile(
|
|
name="gpt-oss:20b-cloud",
|
|
markdown_description="Uses **GPT-OSS 20B CLOUD**",
|
|
),
|
|
cl.ChatProfile(
|
|
name="deepseek-v3.1:671b-cloud",
|
|
markdown_description="Uses **DEEPSEEK v3.1 671B CLOUD**",
|
|
),
|
|
]
|
|
|
|
@cl.on_chat_start
|
|
async def start():
|
|
clear_screen()
|
|
|
|
cl.user_session.set("message_history", [])
|
|
|
|
profile = cl.user_session.get("chat_profile")
|
|
model_name = profile or "qwen3-next:80b-cloud"
|
|
agent = assistant_agent(model_name)
|
|
cl.user_session.set("agent", agent)
|
|
|
|
|
|
@cl.on_message
|
|
async def main(message: cl.Message):
|
|
agent = cl.user_session.get("agent")
|
|
message_history = cl.user_session.get("message_history")
|
|
|
|
message_history.append({"role": "user", "content": message.content})
|
|
|
|
msg = cl.Message(content="")
|
|
async for token in stream_response(agent, message_history):
|
|
await msg.stream_token(token)
|
|
|
|
message_history.append({"role": "assistant", "content": msg.content})
|
|
await msg.update() |