Langfuse JS/TS SDKs
    Preparing search index...

    Function startActiveObservation

    Starts an active observation and executes a function within its context with automatic lifecycle management.

    This function creates an observation, sets it as the active span in the OpenTelemetry context, executes your function with the observation instance, and automatically handles cleanup. It supports all observation types with full TypeScript type inference based on asType.

    • Automatic Context Management: Sets the observation as active in the current execution context
    • Lifecycle Automation: Creates, activates, and ends observations automatically
    • Type Safety: Function parameter is strongly typed based on asType
    • Promise Support: Handles both synchronous and asynchronous functions seamlessly
    • Error Handling: Automatically sets error status and ends observations on exceptions
    • Nested Observations: Child observations created within the function inherit the context
    • When you want automatic observation lifecycle management
    • For function-scoped operations where the observation maps to the function's execution
    • When you need the observation to be active for child operations
    • For async operations where manual .end() calls are error-prone

    Descriptive name for the observation

    Function to execute within the observation context (receives typed observation instance)

    Configuration options including observation type and lifecycle settings

    The exact return value of the executed function (preserves type and async behavior)

    import { startActiveObservation } from '@langfuse/tracing';

    // Span for general operations (default)
    const result = startActiveObservation('user-checkout', (span) => {
    span.update({ input: { userId: '123', cart: items } });

    // Any child observations created here inherit this span's context
    const validation = processPayment(paymentData);

    span.update({ output: { orderId: 'ord_456', success: true } });
    return validation;
    });

    // Generation for LLM interactions with automatic error handling
    const response = await startActiveObservation(
    'openai-completion',
    async (generation) => {
    generation.update({
    input: { messages: [{ role: 'user', content: 'Explain AI ethics' }] },
    model: 'gpt-4-turbo',
    modelParameters: { temperature: 0.7, maxTokens: 500 }
    });

    try {
    const result = await openai.chat.completions.create({
    model: 'gpt-4-turbo',
    messages: [{ role: 'user', content: 'Explain AI ethics' }],
    temperature: 0.7,
    max_tokens: 500
    });

    generation.update({
    output: result.choices[0].message,
    usageDetails: {
    promptTokens: result.usage?.prompt_tokens,
    completionTokens: result.usage?.completion_tokens,
    totalTokens: result.usage?.total_tokens
    },
    costDetails: { totalCost: 0.002, currency: 'USD' }
    });

    return result.choices[0].message.content;
    } catch (error) {
    generation.update({
    level: 'ERROR',
    statusMessage: error.message,
    output: { error: error.message }
    });
    throw error;
    }
    },
    { asType: 'generation' }
    );

    // Agent workflow with nested tool calls
    const agentResult = await startActiveObservation(
    'research-agent',
    async (agent) => {
    agent.update({
    input: { query: 'Latest climate change research' },
    metadata: { tools: ['web-search', 'arxiv-search'], model: 'gpt-4' }
    });

    // Tool calls inherit the agent context automatically
    const webResults = await startActiveObservation(
    'web-search-tool',
    async (tool) => {
    tool.update({ input: { query: 'climate change 2024' } });
    const results = await searchWeb('climate change 2024');
    tool.update({ output: results });
    return results;
    },
    { asType: 'tool' }
    );

    const analysis = await analyzeResults(webResults);

    agent.update({
    output: { analysis, sources: webResults.length },
    metadata: { processingTime: Date.now() }
    });

    return analysis;
    },
    { asType: 'agent' }
    );

    // RAG Chain with retriever and generation steps
    const answer = await startActiveObservation(
    'rag-qa-chain',
    async (chain) => {
    chain.update({
    input: { question: 'How does photosynthesis work?' },
    metadata: { vectorDb: 'pinecone', model: 'gpt-4' }
    });

    // Retrieval step
    const docs = await startActiveObservation(
    'vector-retrieval',
    async (retriever) => {
    retriever.update({
    input: { query: 'photosynthesis mechanism', topK: 5 },
    metadata: { similarity: 'cosine' }
    });
    const results = await vectorSearch('photosynthesis mechanism');
    retriever.update({ output: { documents: results } });
    return results;
    },
    { asType: 'retriever' }
    );

    // Generation step
    const response = await startActiveObservation(
    'answer-generation',
    async (generation) => {
    const context = docs.map(d => d.content).join('\n');
    generation.update({
    input: { question: 'How does photosynthesis work?', context },
    model: 'gpt-4'
    });

    const answer = await generateAnswer(context);
    generation.update({ output: { answer } });
    return answer;
    },
    { asType: 'generation' }
    );

    chain.update({
    output: { answer: response, sources: docs.length }
    });

    return response;
    },
    { asType: 'chain' }
    );

    // Quality evaluation with automatic metrics
    const evaluation = startActiveObservation(
    'response-evaluator',
    (evaluator) => {
    evaluator.update({
    input: {
    response: 'Paris is the capital of France.',
    reference: 'The capital city of France is Paris.'
    },
    metadata: { metric: 'semantic-similarity' }
    });

    const score = calculateSimilarity(response, reference);
    const passed = score > 0.8;

    evaluator.update({
    output: { score, passed, grade: passed ? 'excellent' : 'needs_improvement' }
    });

    return { score, passed };
    },
    { asType: 'evaluator' }
    );

    // Content filtering with guardrails
    const safetyCheck = startActiveObservation(
    'content-guardrail',
    (guardrail) => {
    guardrail.update({
    input: { text: userMessage, policies: ['no-profanity', 'no-pii'] },
    metadata: { strictMode: true }
    });

    const violations = checkContent(userMessage);
    const allowed = violations.length === 0;

    guardrail.update({
    output: { allowed, violations, confidence: 0.95 }
    });

    return { allowed, violations };
    },
    { asType: 'guardrail' }
    );

    // Text embedding generation
    const embeddings = await startActiveObservation(
    'text-embeddings',
    async (embedding) => {
    const texts = ['Hello world', 'Machine learning'];
    embedding.update({
    input: { texts },
    model: 'text-embedding-ada-002',
    metadata: { dimensions: 1536 }
    });

    const vectors = await generateEmbeddings(texts);

    embedding.update({
    output: { embeddings: vectors },
    usageDetails: { totalTokens: texts.join(' ').split(' ').length }
    });

    return vectors;
    },
    { asType: 'embedding' }
    );

    // Disable automatic ending (advanced use case)
    const longRunningSpan = await startActiveObservation(
    'background-process',
    async (span) => {
    span.update({ input: { taskId: '123' } });

    // Process continues after function returns
    startBackgroundTask(span);

    return 'process-started';
    },
    { asType: 'span', endOnExit: false } // Manual ending required
    );
    • Type Parameters

      Parameters

      • name: string
      • fn: F
      • options: StartObservationOptions & { endOnExit?: boolean } & {
            asType?: LangfuseObservationType;
        } & { asType: "generation" }
        • OptionalendOnExit?: boolean

          Whether to automatically end the observation when exiting the context. Default is true

        • asType: "generation"

      Returns ReturnType<F>

    • Type Parameters

      Parameters

      • name: string
      • fn: F
      • options: StartObservationOptions & { endOnExit?: boolean } & {
            asType?: LangfuseObservationType;
        } & { asType: "embedding" }
        • OptionalendOnExit?: boolean

          Whether to automatically end the observation when exiting the context. Default is true

        • asType: "embedding"

      Returns ReturnType<F>

    • Type Parameters

      Parameters

      • name: string
      • fn: F
      • options: StartObservationOptions & { endOnExit?: boolean } & {
            asType?: LangfuseObservationType;
        } & { asType: "agent" }
        • OptionalendOnExit?: boolean

          Whether to automatically end the observation when exiting the context. Default is true

        • asType: "agent"

      Returns ReturnType<F>

    • Type Parameters

      Parameters

      • name: string
      • fn: F
      • options: StartObservationOptions & { endOnExit?: boolean } & {
            asType?: LangfuseObservationType;
        } & { asType: "tool" }
        • OptionalendOnExit?: boolean

          Whether to automatically end the observation when exiting the context. Default is true

        • asType: "tool"

      Returns ReturnType<F>

    • Type Parameters

      Parameters

      • name: string
      • fn: F
      • options: StartObservationOptions & { endOnExit?: boolean } & {
            asType?: LangfuseObservationType;
        } & { asType: "chain" }
        • OptionalendOnExit?: boolean

          Whether to automatically end the observation when exiting the context. Default is true

        • asType: "chain"

      Returns ReturnType<F>

    • Type Parameters

      Parameters

      • name: string
      • fn: F
      • options: StartObservationOptions & { endOnExit?: boolean } & {
            asType?: LangfuseObservationType;
        } & { asType: "retriever" }
        • OptionalendOnExit?: boolean

          Whether to automatically end the observation when exiting the context. Default is true

        • asType: "retriever"

      Returns ReturnType<F>

    • Type Parameters

      Parameters

      • name: string
      • fn: F
      • options: StartObservationOptions & { endOnExit?: boolean } & {
            asType?: LangfuseObservationType;
        } & { asType: "evaluator" }
        • OptionalendOnExit?: boolean

          Whether to automatically end the observation when exiting the context. Default is true

        • asType: "evaluator"

      Returns ReturnType<F>

    • Type Parameters

      Parameters

      • name: string
      • fn: F
      • options: StartObservationOptions & { endOnExit?: boolean } & {
            asType?: LangfuseObservationType;
        } & { asType: "guardrail" }
        • OptionalendOnExit?: boolean

          Whether to automatically end the observation when exiting the context. Default is true

        • asType: "guardrail"

      Returns ReturnType<F>

    • Type Parameters

      Parameters

      • name: string
      • fn: F
      • Optionaloptions: StartObservationOptions & { endOnExit?: boolean } & {
            asType?: LangfuseObservationType;
        } & { asType?: "span" }
        • OptionalendOnExit?: boolean

          Whether to automatically end the observation when exiting the context. Default is true

        • OptionalasType?: "span"

      Returns ReturnType<F>