Laminar SDKs are available for JavaScript/TypeScript and Python. Index SDK is the SDK for calling
the Index API. All index calls from SDK can be traced as
described in the index tracing docs.
SDKs call the streaming API, regardless of the stream parameter in the SDK request.
This is because the non-streaming API has a timeout limitation. SDKs will transform the streaming
response into a non-streaming response.
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.')print(result)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.' ) print(result)asyncio.run(main())
Whether to stream the response from the agent. This affects the response shape.
Type
Required
Default
boolean
No
false
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', stream: true, }); for await (const chunk of result) { console.log(chunk); }};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', stream: true, }); for await (const chunk of result) { console.log(chunk); }};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', stream=True,)for chunk in result: print(chunk)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): # Note this await, similar to major LLM SDKs. result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', stream=True, ) async for chunk in result: print(chunk)asyncio.run(main())
The stringified Laminar span context that can be used to place the trace in an existing trace.
Type
Required
string
No
Read more about LaminarSpanContext in our tracing docs.
import { LaminarClient, Laminar } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', // This will serialize the current span context, assuming there is an active // span. parentSpanContext: Laminar.serializeLaminarSpanContext(), }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient, Laminar } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', // This will serialize the current span context, assuming there is an active // span. parentSpanContext: Laminar.serializeLaminarSpanContext(), }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClient, Laminarclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', # This will serialize the current span context, assuming there is an active # span. parent_span_context=Laminar.serialize_span_context(),)print(result)
from lmnr import AsyncLaminarClient, Laminarclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', # This will serialize the current span context, assuming there is an active # span. parent_span_context=Laminar.serialize_span_context(), ) print(result)asyncio.run(main())
It is required to set the modelProvider if you set the model.
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', modelProvider: 'anthropic', model: 'claude-3-7-sonnet-20250219', }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', modelProvider: 'anthropic', model: 'claude-3-7-sonnet-20250219', }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', model_provider='anthropic', model='claude-3-7-sonnet-20250219',)print(result)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', model_provider='anthropic', model='claude-3-7-sonnet-20250219', ) print(result)asyncio.run(main())
It is required to set the modelProvider if you set the model.
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', modelProvider: 'anthropic', model: 'claude-3-7-sonnet-20250219', }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', modelProvider: 'anthropic', model: 'claude-3-7-sonnet-20250219', }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', model_provider='anthropic', model='claude-3-7-sonnet-20250219',)print(result)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', model_provider='anthropic', model='claude-3-7-sonnet-20250219', ) print(result)asyncio.run(main())
The param is passed to the underlying LLM provider. Only used for Anthropic.
Type
Required
Default
boolean
No
false
Gemini models always set this to true.
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', modelProvider: 'anthropic', model: 'claude-3-7-sonnet-20250219', enableThinking: true, }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', modelProvider: 'anthropic', model: 'claude-3-7-sonnet-20250219', enableThinking: true, }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', model_provider='anthropic', model='claude-3-7-sonnet-20250219', enable_thinking=True,)print(result)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', model_provider='anthropic', model='claude-3-7-sonnet-20250219', enable_thinking=True, ) print(result)asyncio.run(main())
Timeout in seconds. This is a soft timeout, the agent will continue its current step until completion after the timeout. Also, initialization of the agent is not included in the timeout.
Type
Required
integer
No
In non-streaming mode, agent timing out will throw an error.
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', timeout: 600, // 10 minutes }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', timeout: 600, // 10 minutes }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', timeout=600, # 10 minutes)print(result)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', timeout=600, # 10 minutes ) print(result)asyncio.run(main())
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', max_steps: 10, }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', max_steps: 10, }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', max_steps=10,)print(result)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', max_steps=10, ) print(result)asyncio.run(main())
If you have a running browser with CDP, you can pass the URL to the browser here. By default, Laminar will start and manage its own browser instance.
Type
Required
string
No
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', cdpUrl: 'wss://localhost:9222', }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', cdpUrl: 'wss://localhost:9222', }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', cdp_url='wss://localhost:9222',)print(result)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', cdp_url='wss://localhost:9222', ) print(result)asyncio.run(main())
The maximum number of tokens the agent will use for thinking. Passed to the underlying LLM provider.
Currently, there is a heuristic that converts the token budget to a reasoning effort parameter for OpenAI.
Type
Required
integer
No
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', thinkingTokenBudget: 2048, }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', thinkingTokenBudget: 2048, }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', thinking_token_budget=2048,)print(result)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', thinking_token_budget=2048, ) print(result)asyncio.run(main())
The stringified agent state as returned by a previous agent run. This is useful for continuing the agent run in a subsequent call.
Type
Required
string
No
Agent state is a very large object.
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', agentState: '...', }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', agentState: '...', }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', agent_state='...',)print(result)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', agent_state='...', ) print(result)asyncio.run(main())
The stringified browser storage state (auth, cookies, etc.) as returned by a previous agent run. This is useful for continuing the agent run in a subsequent call.
Type
Required
string
No
This may be a very large object.
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', storageState: '...', }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', storageState: '...', }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', storage_state='...',)print(result)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', storage_state='...', ) print(result)asyncio.run(main())
Whether to return a base64 encoded screenshot of the page with each step.
Type
Required
Default
boolean
No
false
This will not have any effect on non-streaming runs.
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', stream: true, returnScreenshots: true, }); for await (const chunk of result) { if (chunk.chunkType === 'step') { if (chunk.screenshot) { console.log("Screenshot in base64: ", chunk.screenshot); } } }};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', stream: true, returnScreenshots: true, }); for await (const chunk of result) { if (chunk.chunkType === 'step') { if (chunk.screenshot) { console.log("Screenshot in base64: ", chunk.screenshot); } } }};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', stream=True, return_screenshots=True,)for chunk in result: if chunk.chunk_type == 'step': if chunk.screenshot: print("Screenshot in base64: ", chunk.screenshot)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): # Note this await, similar to major LLM SDKs. result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', stream=True, return_screenshots=True, ) async for chunk in result: if chunk.chunk_type == 'step': if chunk.screenshot: print("Screenshot in base64: ", chunk.screenshot)asyncio.run(main())
Whether to return the agent state after the run. This is useful for continuing the agent run in a subsequent call.
Type
Required
Default
boolean
No
false
Agent state is a very large object
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', returnAgentState: true, }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', returnAgentState: true, }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', returnAgentState=True,)print(result)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', return_agent_state=True, ) print(result)asyncio.run(main())
Whether to return the browser storage state (auth, cookies, etc.) after the run. This is useful for continuing the agent run in a subsequent call.
Type
Required
Default
boolean
No
false
Storage state may be a very large object
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', returnStorageState: true, }); console.log(result);};main().then(() => { console.log('Done');});
import { LaminarClient } from '@lmnr-ai/lmnr';const client = new LaminarClient({ projectApiKey: 'lmnr-project-api-key',});const main = async () => { const result = await client.agent.run({ prompt: 'Go to www.lmnr.ai and summarize their homepage.', returnStorageState: true, }); console.log(result);};main().then(() => { console.log('Done');});
from lmnr import LaminarClientclient = LaminarClient(project_api_key='lmnr-project-api-key')result = client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', return_storage_state=True,)print(result)
from lmnr import AsyncLaminarClientclient = AsyncLaminarClient(project_api_key='lmnr-project-api-key')async def main(): result = await client.agent.run( prompt='Go to www.lmnr.ai and summarize their homepage.', return_storage_state=True, ) print(result)asyncio.run(main())