interface RunOnDatasetParams {
    client?: Client;
    evaluationConfig?: RunEvalConfig;
    evaluators?: RunEvalType<"criteria" | "labeled_criteria" | "embedding_distance", RunEvaluatorLike | RunEvaluator>[];
    formatEvaluatorInputs?: EvaluatorInputFormatter;
    maxConcurrency?: number;
    projectMetadata?: Record<string, unknown>;
    projectName?: string;
}

Hierarchy

Properties

client?: Client

Client instance for LangSmith service interaction.

evaluationConfig?: RunEvalConfig

Pass keys directly to the RunOnDatasetParams instead

evaluators?: RunEvalType<"criteria" | "labeled_criteria" | "embedding_distance", RunEvaluatorLike | RunEvaluator>[]

Evaluators to apply to a dataset run. You can optionally specify these by name, or by configuring them with an EvalConfig object.

formatEvaluatorInputs?: EvaluatorInputFormatter

Convert the evaluation data into formats that can be used by the evaluator. This should most commonly be a string. Parameters are the raw input from the run, the raw output, raw reference output, and the raw run.

// Chain input: { input: "some string" }
// Chain output: { output: "some output" }
// Reference example output format: { output: "some reference output" }
const formatEvaluatorInputs = ({
rawInput,
rawPrediction,
rawReferenceOutput,
}) => {
return {
input: rawInput.input,
prediction: rawPrediction.output,
reference: rawReferenceOutput.output,
};
};

The prepared data.

maxConcurrency?: number

Maximum concurrency level for dataset processing.

projectMetadata?: Record<string, unknown>

Additional metadata for the project.

projectName?: string

Name of the project for logging and tracking.