ai-lawyer-agent/core/stream_response.py
2025-12-19 02:44:43 +01:00

27 lines
1.2 KiB
Python

from typing import Any
from agents import Agent, Runner
from agents import RunHooks, RunContextWrapper
from openai.types.responses import ResponseTextDeltaEvent
class MyHooks(RunHooks):
"""Custom hooks to track usage after each model request."""
async def on_model_response(self, context: RunContextWrapper, agent: Agent, output: Any) -> None:
ctx_usage = context.usage
if ctx_usage:
print(f"\n🪙{agent.name}{ctx_usage.requests} requests")
print(f"\n🪙Input tokens: {ctx_usage.input_tokens}")
print(f"\n🪙Output tokens: {ctx_usage.output_tokens}")
print(f"\n🪙Total tokens: {ctx_usage.total_tokens}")
async def stream_response(agent: Agent, prompt: str):
"""Stream agent response and update the UI."""
hooks = MyHooks()
try:
result = Runner.run_streamed(agent, input=prompt, hooks=hooks)
async for event in result.stream_events():
if event.type == "raw_response_event" and isinstance(event.data, ResponseTextDeltaEvent):
yield event.data.delta # <-- sends the next piece of response text
except Exception as e:
yield f"⚠️🖨️ Error: {e}"