import { LaminarClient, Laminar, observe } from "@lmnr-ai/lmnr";
const laminarClient = new LaminarClient({
apiKey: "your-project-api-key"
});
// First, capture your LLM calls
await observe(
{
name: "chat_completion",
input: { messages: [...] },
},
() => {
// Your LLM call here
return {
output: { content: "AI response" }
};
}
);
// IMPORTANT: Flush data to ensure it reaches the backend
await Laminar.flush();
// Score by trace ID (attaches to root span)
const traceId = Laminar.getTraceId();
if (!traceId) {
// To avoid this, we must be inside an observed function
throw new Error("No active trace found");
}
await laminarClient.evaluators.score({
name: "quality",
traceId: traceId,
score: 0.95,
metadata: { model: "gpt-4" }
});
// Score by span ID
// IMPORTANT: This code must be run after span is already recorded
// In production, ensure this runs later
await new Promise(resolve => setTimeout(resolve, 1000));
const spanContext = Laminar.getLaminarSpanContext();
if (!spanContext) {
throw new Error("No active span found");
}
await laminarClient.evaluators.score({
name: "relevance",
spanId: spanContext.spanId,
score: 0.87
});