LemurSummaryParams: {
    answer_format?: undefined | string;
    context?: undefined | string | {
        [key: string]: unknown;
    };
    final_model?: undefined | LiteralUnion<LemurModel, string>;
    input_text?: undefined | string;
    max_output_size?: undefined | number;
    temperature?: undefined | number;
    transcript_ids?: undefined | string[];
}

Type declaration

  • Optionalanswer_format?: undefined | string

    How you want the summary to be returned. This can be any text. Examples: "TLDR", "bullet points"

  • Optionalcontext?: undefined | string | {
        [key: string]: unknown;
    }

    Context to provide the model. This can be a string or a free-form JSON value.

  • Optionalfinal_model?: undefined | LiteralUnion<LemurModel, string>

    The model that is used for the final prompt after compression is performed.

    "default
    
  • Optionalinput_text?: undefined | string

    Custom formatted transcript data. Maximum size is the context limit of the selected model, which defaults to 100000". Use either transcript_ids or input_text as input into LeMUR.

  • Optionalmax_output_size?: undefined | number

    Max output size in tokens, up to 4000

  • Optionaltemperature?: undefined | number

    The temperature to use for the model. Higher values result in answers that are more creative, lower values are more conservative. Can be any value between 0.0 and 1.0 inclusive.

  • Optionaltranscript_ids?: undefined | string[]

    A list of completed transcripts with text. Up to a maximum of 100 files or 100 hours, whichever is lower. Use either transcript_ids or input_text as input into LeMUR.

{
"transcript_ids": [
"47b95ba5-8889-44d8-bc80-5de38306e582"
],
"context": "This is an interview about wildfires.",
"final_model": "default",
"temperature": 0,
"max_output_size": 3000
}