import { LaminarClient, Laminar } from "@lmnr-ai/laminar";
const laminarClient = new LaminarClient({
apiKey: "your-project-api-key"
});
// First, capture your LLM calls
await laminarClient.observe({
name: "chat_completion",
input: { messages: [...] },
output: { content: "AI response" }
});
// IMPORTANT: Flush data to ensure it reaches the backend
await laminarClient.flush();
// Score by trace ID (attaches to root span)
const traceId = Laminar.getTraceId();
if (!traceId) {
throw new Error("No active trace found");
}
await laminarClient.evaluators.score({
name: "quality",
traceId: traceId,
score: 0.95,
metadata: { model: "gpt-4" }
});
// Score by span ID
// IMPORTANT: This code must be run after span is already recorded
// In production, ensure this runs later or add sleep to ensure span reaches backend
await new Promise(resolve => setTimeout(resolve, 1000));
const spanContext = Laminar.getLaminarSpanContext();
if (!spanContext) {
throw new Error("No active span found");
}
await laminarClient.evaluators.score({
name: "relevance",
spanId: spanContext.spanId,
score: 0.87
});