44 lines
1.4 KiB
TypeScript
44 lines
1.4 KiB
TypeScript
// https://platform.openai.com/docs/models
|
|
export type OpenAIChatModelId =
|
|
| 'gpt-4-turbo'
|
|
| 'gpt-4-turbo-2024-04-09'
|
|
| 'gpt-4-turbo-preview'
|
|
| 'gpt-4-0125-preview'
|
|
| 'gpt-4-1106-preview'
|
|
| 'gpt-4-vision-preview'
|
|
| 'gpt-4'
|
|
| 'gpt-4-0613'
|
|
| 'gpt-4-32k'
|
|
| 'gpt-4-32k-0613'
|
|
| 'gpt-3.5-turbo-0125'
|
|
| 'gpt-3.5-turbo'
|
|
| 'gpt-3.5-turbo-1106'
|
|
| 'gpt-3.5-turbo-16k'
|
|
| 'gpt-3.5-turbo-0613'
|
|
| 'gpt-3.5-turbo-16k-0613'
|
|
| (string & {});
|
|
|
|
export interface OpenAIChatSettings {
|
|
/**
|
|
* Modify the likelihood of specified tokens appearing in the completion.
|
|
*
|
|
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
* the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
* can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
* the bias is added to the logits generated by the model prior to sampling.
|
|
* The exact effect will vary per model, but values between -1 and 1 should
|
|
* decrease or increase likelihood of selection; values like -100 or 100
|
|
* should result in a ban or exclusive selection of the relevant token.
|
|
*
|
|
* As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
* token from being generated.
|
|
*/
|
|
logitBias?: Record<number, number>;
|
|
|
|
/**
|
|
* A unique identifier representing your end-user, which can help OpenAI to
|
|
* monitor and detect abuse. Learn more.
|
|
*/
|
|
user?: string;
|
|
}
|