interface EvalConfig {
    agentTools?: StructuredToolInterface<ZodObjectAny>[];
    chainOptions?: Partial<Omit<LLMEvalChainInput<EvalOutputType, BaseLanguageModelInterface<any, BaseLanguageModelCallOptions>>, "llm">>;
    criteria?: CriteriaLike;
    distanceMetric?: EmbeddingDistanceType;
    embedding?: EmbeddingsInterface;
    evaluatorType: keyof EvaluatorType;
    feedbackKey?: string;
    formatEvaluatorInputs: EvaluatorInputFormatter;
    llm?: BaseLanguageModelInterface<any, BaseLanguageModelCallOptions>;
}

Hierarchy (view full)

Properties

agentTools?: StructuredToolInterface<ZodObjectAny>[]

A list of tools available to the agent, for TrajectoryEvalChain.

chainOptions?: Partial<Omit<LLMEvalChainInput<EvalOutputType, BaseLanguageModelInterface<any, BaseLanguageModelCallOptions>>, "llm">>
criteria?: CriteriaLike

The criteria to use for the evaluator.

distanceMetric?: EmbeddingDistanceType

The distance metric to use for comparing the embeddings.

embedding?: EmbeddingsInterface

The embedding objects to vectorize the outputs.

evaluatorType: keyof EvaluatorType

The name of the evaluator to use. Example: labeled_criteria, criteria, etc.

feedbackKey?: string

The feedback (or metric) name to use for the logged evaluation results. If none provided, we default to the evaluationName.

formatEvaluatorInputs: EvaluatorInputFormatter

Convert the evaluation data into formats that can be used by the evaluator. This should most commonly be a string. Parameters are the raw input from the run, the raw output, raw reference output, and the raw run.

// Chain input: { input: "some string" }
// Chain output: { output: "some output" }
// Reference example output format: { output: "some reference output" }
const formatEvaluatorInputs = ({
rawInput,
rawPrediction,
rawReferenceOutput,
}) => {
return {
input: rawInput.input,
prediction: rawPrediction.output,
reference: rawReferenceOutput.output,
};
};

The prepared data.

llm?: BaseLanguageModelInterface<any, BaseLanguageModelCallOptions>