The function to wrap with observability (preserves original signature)
Configuration for observation behavior and capture settings
An instrumented version of the function with identical behavior plus tracing
import { observe } from '@langfuse/tracing';
// Basic function wrapping with automatic I/O capture
const processOrder = observe(
  async (orderId: string, items: CartItem[]) => {
    const validation = await validateOrder(orderId, items);
    const payment = await processPayment(validation);
    const shipping = await scheduleShipping(payment);
    return { orderId, status: 'confirmed', trackingId: shipping.id };
  },
  {
    name: 'process-order',
    asType: 'span',
    captureInput: true,
    captureOutput: true
  }
);
// LLM function with generation tracking
const generateSummary = observe(
  async (document: string, maxWords: number = 100) => {
    const response = await openai.chat.completions.create({
      model: 'gpt-4-turbo',
      messages: [
        { role: 'system', content: `Summarize in ${maxWords} words or less` },
        { role: 'user', content: document }
      ],
      max_tokens: maxWords * 2
    });
    return response.choices[0].message.content;
  },
  {
    name: 'document-summarizer',
    asType: 'generation',
    captureInput: true,
    captureOutput: true
  }
);
// Database query with automatic error tracking
const fetchUserProfile = observe(
  async (userId: string) => {
    const user = await db.users.findUnique({ where: { id: userId } });
    if (!user) throw new Error(`User ${userId} not found`);
    const preferences = await db.preferences.findMany({
      where: { userId }
    });
    return { ...user, preferences };
  },
  {
    name: 'fetch-user-profile',
    asType: 'span',
    captureInput: false, // Don't capture sensitive user IDs
    captureOutput: true
  }
);
// Vector search with retriever semantics
const searchDocuments = observe(
  async (query: string, topK: number = 5) => {
    const embedding = await embedText(query);
    const results = await vectorDb.search(embedding, topK);
    return results.map(r => ({
      content: r.metadata.content,
      score: r.score,
      source: r.metadata.source
    }));
  },
  {
    name: 'document-search',
    asType: 'retriever',
    captureInput: true,
    captureOutput: true
  }
);
// Quality evaluation function
const evaluateResponse = observe(
  (response: string, reference: string, metric: string = 'similarity') => {
    let score: number;
    switch (metric) {
      case 'similarity':
        score = calculateCosineSimilarity(response, reference);
        break;
      case 'bleu':
        score = calculateBleuScore(response, reference);
        break;
      default:
        throw new Error(`Unknown metric: ${metric}`);
    }
    return {
      score,
      passed: score > 0.8,
      metric,
      grade: score > 0.9 ? 'excellent' : score > 0.7 ? 'good' : 'needs_improvement'
    };
  },
  {
    name: 'response-evaluator',
    asType: 'evaluator',
    captureInput: true,
    captureOutput: true
  }
);
// Content moderation with guardrails
const moderateContent = observe(
  async (text: string, policies: string[] = ['profanity', 'spam']) => {
    const violations = [];
    for (const policy of policies) {
      const result = await checkPolicy(text, policy);
      if (result.violation) {
        violations.push({ policy, severity: result.severity });
      }
    }
    return {
      allowed: violations.length === 0,
      violations,
      confidence: 0.95
    };
  },
  {
    name: 'content-moderator',
    asType: 'guardrail',
    captureInput: true,
    captureOutput: true
  }
);
// AI agent function with tool usage
const researchAgent = observe(
  async (query: string, maxSources: number = 3) => {
    // Search for relevant documents
    const documents = await searchDocuments(query, maxSources * 2);
    // Filter and rank results
    const topDocs = documents
      .filter(d => d.score > 0.7)
      .slice(0, maxSources);
    // Generate comprehensive answer
    const context = topDocs.map(d => d.content).join('\n\n');
    const answer = await generateSummary(
      `Based on: ${context}\n\nQuestion: ${query}`,
      200
    );
    return {
      answer,
      sources: topDocs.map(d => d.source),
      confidence: Math.min(...topDocs.map(d => d.score))
    };
  },
  {
    name: 'research-agent',
    asType: 'agent',
    captureInput: true,
    captureOutput: true
  }
);
// Class method decoration
class UserService {
  private db: Database;
  // Wrap methods during class construction
  constructor(database: Database) {
    this.db = database;
    this.createUser = observe(this.createUser.bind(this), {
      name: 'create-user',
      asType: 'span',
      captureInput: false, // Sensitive data
      captureOutput: true
    });
  }
  async createUser(userData: UserData) {
    // Implementation automatically traced
    return await this.db.users.create(userData);
  }
}
// Chain composition - functions remain composable
const processDocument = observe(
  async (document: string) => {
    const summary = await generateSummary(document, 150);
    const moderation = await moderateContent(summary);
    const evaluation = evaluateResponse(summary, document, 'similarity');
    return {
      summary: moderation.allowed ? summary : '[Content Filtered]',
      safe: moderation.allowed,
      quality: evaluation.score
    };
  },
  {
    name: 'document-processor',
    asType: 'chain',
    captureInput: true,
    captureOutput: true
  }
);
// Usage - functions work exactly as before, just with observability
const order = await processOrder('ord_123', cartItems);
const profile = await fetchUserProfile('user_456');
const research = await researchAgent('What is quantum computing?');
const processed = await processDocument(documentText);
Decorator function that automatically wraps any function with Langfuse observability.
This higher-order function creates a traced version of your function that automatically handles observation lifecycle, input/output capture, and error tracking. It's perfect for instrumenting existing functions without modifying their internal logic.
Key Features
Use Cases