import chainlit as cl from core.init_agent import assistant_agent from core.stream_response import stream_response DEFAULT_MODEL = "qwen3.5:cloud" STARTERS = [ ("What legal data can the agent find?","magnifying_glass"), ("What is the agent not allowed to do or use?","ban"), ("What are the details of your AI model?","hexagon"), ("What data sources does the agent rely on?","database"), ] PROFILES = [ ("qwen3.5:cloud","Qwen 3.5 CLOUD"), ("gpt-oss:20b-cloud","GPT-OSS 20B CLOUD"), ] @cl.set_starters async def set_starters(): return [ cl.Starter(label=label, message=label, icon=f"/public/icon/{icon}.svg") for label, icon in STARTERS ] @cl.set_chat_profiles async def chat_profile(): return [ cl.ChatProfile(name=name, markdown_description=f"Uses **{desc}**") for name, desc in PROFILES ] @cl.on_chat_start async def start(): model_name = cl.user_session.get("chat_profile") or DEFAULT_MODEL cl.user_session.set("agent", assistant_agent(model_name)) cl.user_session.set("message_history", []) @cl.on_message async def main(message: cl.Message): agent = cl.user_session.get("agent") history = cl.user_session.get("message_history") history.append({"role": "user", "content": message.content}) msg = cl.Message(content="") async for token in stream_response(agent, history): await msg.stream_token(token) await msg.update() if msg.content: history.append({"role": "assistant", "content": msg.content})