This file will contain the logic that handles the LLM. In a production app,
this will likely be a much bigger module with classes, routing, etc.For now, we’ll just have a function that handles an OpenAI call.
llm.py
Copy
from dotenv import load_dotenvfrom lmnr import observefrom openai import OpenAIfrom schemas import Ticket, TicketCategory, TicketClassificationimport osload_dotenv(override=True)client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))@observe()def model_classify_ticket(ticket: Ticket) -> TicketClassification: response = client.beta.chat.completions.parse( model="gpt-4.1-nano", response_format=TicketClassification, messages=[ { "role": "system", "content": """You are a support ticket classifier.Classify the ticket that a user sends you""", }, { "role": "user", "content": f"""Title: {ticket.title}Description: {ticket.description}Customer Email: {ticket.customer_email}""", }, ], ) return response.choices[0].message.parsed or TicketClassification( category=TicketCategory.OTHER, reasoning=response.choices[0].message.content, )
curl --location 'localhost:8011/api/v1/tickets/classify' \--header 'Content-Type: application/json' \--data-raw '{ "title": "Can'\''t access my account", "description": "I'\''ve been trying to log in for the past hour but keep getting an error message", "customer_email": "user@example.com"}'