Documentation

Providers & Reference

Built-in wrappers for popular LLM providers, Langfuse observability, error types, utility functions, and TypeScript type definitions.

LLM Provider Wrappers

Drop-in wrappers that add automatic RAIL scoring to your existing LLM calls. All wrappers return { response, content, railScore, evaluation }.

OpenAI OpenAI

import { RAILOpenAI } from '@responsible-ai-labs/rail-score';
import OpenAI from 'openai';

const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const railOpenAI = new RAILOpenAI(client, openai, {
  thresholds: { safety: 7.0 },
});

const result = await railOpenAI.chat({
  model: "gpt-4o",
  messages: [{ role: "user", content: "Explain quantum computing simply." }],
});

console.log(result.content);            // LLM response text
console.log(result.railScore.score);    // RAIL score
console.log(result.evaluation);         // Full EvalResult

Anthropic Anthropic

import { RAILAnthropic } from '@responsible-ai-labs/rail-score';
import Anthropic from '@anthropic-ai/sdk';

const anthropic = new Anthropic();
const railAnthropic = new RAILAnthropic(client, anthropic, {
  thresholds: { safety: 7.0 },
});

const result = await railAnthropic.message({
  model: "claude-sonnet-4-6",
  max_tokens: 1024,
  messages: [{ role: "user", content: "Explain quantum computing simply." }],
});

console.log(result.content);
console.log(result.railScore.score);

Gemini Google Gemini

import { RAILGemini } from '@responsible-ai-labs/rail-score';
import { GoogleGenerativeAI } from '@google/generative-ai';

const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY);
const model = genAI.getGenerativeModel({ model: "gemini-2.0-flash" });
const railGemini = new RAILGemini(client, model, {
  thresholds: { safety: 7.0 },
});

const result = await railGemini.generate("Explain quantum computing simply.");
console.log(result.content);
console.log(result.railScore.score);

Observability

Langfuse Langfuse

import { RAILLangfuse } from '@responsible-ai-labs/rail-score';
import { Langfuse } from 'langfuse';

const langfuse = new Langfuse({ publicKey: "...", secretKey: "..." });
const railLangfuse = new RAILLangfuse(client, langfuse);

// Evaluate content and push scores to a Langfuse trace
const result = await railLangfuse.traceEvaluation("trace-id", "Content to evaluate");

// Push an existing evaluation result to a trace
await railLangfuse.scoreTrace("trace-id", existingResult);

Guardrail Handler

import { RAILGuardrail } from '@responsible-ai-labs/rail-score';

const guardrail = new RAILGuardrail(client, {
  inputThresholds:  { safety: 7.0 },
  outputThresholds: { safety: 7.0, fairness: 7.0 },
});

const preResult = await guardrail.preCall("User message");
if (!preResult.allowed) {
  console.log("Input blocked:", preResult.failedDimensions);
}

const postResult = await guardrail.postCall("LLM response");
if (!postResult.allowed) {
  console.log("Output blocked:", postResult.failedDimensions);
}

Error Handling

import {
  AuthenticationError,
  InsufficientCreditsError,
  InsufficientTierError,
  ValidationError,
  ContentTooLongError,
  SessionExpiredError,
  ContentTooHarmfulError,
  RateLimitError,
  RAILBlockedError
} from '@responsible-ai-labs/rail-score';

try {
  const result = await client.eval({ content: "Content to evaluate" });
} catch (error) {
  if (error instanceof AuthenticationError) {
    console.error("Invalid API key");
  } else if (error instanceof InsufficientCreditsError) {
    console.error(`Need ${error.required} credits, have ${error.balance}`);
  } else if (error instanceof RateLimitError) {
    console.error(`Rate limited. Retry after ${error.retryAfter}s`);
  } else if (error instanceof ContentTooHarmfulError) {
    console.error("Content too harmful to regenerate (avg score < 3.0)");
  } else if (error instanceof SessionExpiredError) {
    console.error("Safe-regenerate session expired (15 min TTL)");
  } else if (error instanceof RAILBlockedError) {
    console.error(`Blocked by policy: ${error.policyMode}`);
  }
}
ErrorStatusWhen
AuthenticationError401Invalid or missing API key
InsufficientCreditsError402Not enough credits
InsufficientTierError403Feature requires higher plan
ValidationError400Invalid parameters
ContentTooLongError400Content exceeds max length
SessionExpiredError410Safe-regenerate session expired
ContentTooHarmfulError422Content avg score < 3.0
RateLimitError429Rate limit exceeded
RAILBlockedErrorContent blocked by policy engine

Utility Functions

import {
  getScoreLabel, getScoreColor, getScoreGrade, formatScore,
  formatDimensionName, normalizeDimensionName, resolveFrameworkAlias,
  validateWeights, normalizeWeights, calculateWeightedScore,
  isPassing, getDimensionsBelowThreshold, getLowestScoringDimension,
  getHighestScoringDimension, aggregateScores
} from '@responsible-ai-labs/rail-score';

getScoreLabel(8.5);                      // "Excellent"
getScoreColor(8.5);                      // "green"
getScoreGrade(8.5);                      // "A-"
formatScore(8.567, 2);                   // "8.57"
formatDimensionName("user_impact");      // "User Impact"
normalizeDimensionName("legal_compliance"); // "inclusivity"
resolveFrameworkAlias("ai_act");         // "eu_ai_act"

const weakAreas = getDimensionsBelowThreshold(result, 7.0);
const lowest = getLowestScoringDimension(result);
const stats = aggregateScores([result1, result2, result3]);
console.log(stats.averageScore, stats.minScore, stats.maxScore);

TypeScript Types

import type {
  // Client
  RailScoreConfig,

  // Evaluation
  EvalParams, EvalResult, EvalIssue, DimensionScore,
  Dimension, EvaluationMode, ContentDomain, ScoreLabel,

  // Safe Regeneration
  SafeRegenerateParams, SafeRegenerateResult, SafeRegenerateContinueParams,

  // Compliance
  ComplianceCheckSingleParams, ComplianceCheckMultiParams,
  ComplianceResult, MultiComplianceResult, ComplianceFramework,

  // Session & Policy
  SessionConfig, SessionMetrics, PolicyMode, PolicyConfig, MiddlewareConfig,

  // Observability
  GuardResult, RAILGuardrailConfig,
} from '@responsible-ai-labs/rail-score';

Next Steps