When running browser agent tasks through the Index API, you can receive real-time updates as the agent executes rather than waiting for the entire task to complete. This is particularly useful for:
Providing immediate feedback to users
Showing progress indicators for long-running tasks
To receive real-time updates as the browser agent executes:
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: "YOUR_PROJECT_API_KEY",});async function runStreamingBrowserAgent() { const response = await client.agent.run({ prompt: "Lookup the current weather for New York, " + "London, and Tokyo and compare them.", stream: true, modelProvider: "gemini", model: "gemini-2.5-pro-preview-03-25", }); for await (const chunk of response) { if (chunk.chunkType === 'step') { // Print step-by-step actions as they happen console.log(`Step: ${chunk.summary}`); } else if (chunk.chunkType === 'finalOutput') { // Print the final result console.log(`Final output: ${chunk.content.result.content}`); } else if (chunk.chunkType === 'timeout') { console.log("Final step before timeout:"); console.log(chunk.summary); } else if (chunk.chunkType === 'error') { console.log(`Error: ${chunk.error}`); } }}runStreamingBrowserAgent();
To receive real-time updates as the browser agent executes:
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: "YOUR_PROJECT_API_KEY",});async function runStreamingBrowserAgent() { const response = await client.agent.run({ prompt: "Lookup the current weather for New York, " + "London, and Tokyo and compare them.", stream: true, modelProvider: "gemini", model: "gemini-2.5-pro-preview-03-25", }); for await (const chunk of response) { if (chunk.chunkType === 'step') { // Print step-by-step actions as they happen console.log(`Step: ${chunk.summary}`); } else if (chunk.chunkType === 'finalOutput') { // Print the final result console.log(`Final output: ${chunk.content.result.content}`); } else if (chunk.chunkType === 'timeout') { console.log("Final step before timeout:"); console.log(chunk.summary); } else if (chunk.chunkType === 'error') { console.log(`Error: ${chunk.error}`); } }}runStreamingBrowserAgent();
from lmnr import LaminarClientclient = LaminarClient(project_api_key="YOUR_PROJECT_API_KEY")# Stream the browser agent's executionfor chunk in client.agent.run( prompt="Lookup the current weather for New York, " + "London, and Tokyo and compare them.", stream=True, model_provider="gemini", model="gemini-2.5-pro-preview-03-25",): if chunk.chunk_type == 'step': # Print step-by-step actions as they happen print(f"Step: {chunk.summary}") elif chunk.chunk_type == 'finalOutput': # Print the final result print(f"Final output: {chunk.content.result.content}") elif chunk.chunk_type == 'timeout': print("Final step before timeout:") print(chunk.summary) elif chunk.chunk_type == 'error': print(f"Error: {chunk.error}")
import asynciofrom lmnr import AsyncLaminarClientasync def run_streaming_agent(): client = AsyncLaminarClient(project_api_key="YOUR_PROJECT_API_KEY") # Stream the browser agent's execution asynchronously async for chunk in await client.agent.run( prompt="Lookup the current weather for New York, " + "London, and Tokyo and compare them.", stream=True, model_provider="gemini", model="gemini-2.5-pro-preview-03-25", ): if chunk.chunk_type == 'step': print(f"Step: {chunk.summary}") elif chunk.chunk_type == 'finalOutput': print(f"Final output: {chunk.content.result.content}") elif chunk.chunk_type == 'timeout': print("Final step before timeout:") print(chunk.summary) elif chunk.chunk_type == 'error': print(f"Error: {chunk.error}")# Run the async functionasyncio.run(run_streaming_agent())