mirror of
https://github.com/ikechan8370/chatgpt-plugin.git
synced 2025-12-17 05:47:11 +00:00
fix: 2.7 dev start
This commit is contained in:
parent
fbe8953667
commit
4a4dceec18
7 changed files with 5459 additions and 6 deletions
|
|
@ -1,5 +1,5 @@
|
||||||

|

|
||||||
<div align=center> <h1>云崽QQ机器人的ChatGPT插件</h1> </div>
|
<div align=center> <h1>云崽QQ机器人的ChatGPT插件(开发分支请勿使用)</h1> </div>
|
||||||
<div align=center>
|
<div align=center>
|
||||||
|
|
||||||
<img src ="https://img.shields.io/github/issues/ikechan8370/chatgpt-plugin?logo=github"/>
|
<img src ="https://img.shields.io/github/issues/ikechan8370/chatgpt-plugin?logo=github"/>
|
||||||
|
|
|
||||||
13
package.json
13
package.json
|
|
@ -7,11 +7,12 @@
|
||||||
"@fastify/cors": "^8.2.0",
|
"@fastify/cors": "^8.2.0",
|
||||||
"@fastify/static": "^6.9.0",
|
"@fastify/static": "^6.9.0",
|
||||||
"@slack/bolt": "^3.13.0",
|
"@slack/bolt": "^3.13.0",
|
||||||
"@waylaidwanderer/chatgpt-api": "^1.33.2",
|
"@waylaidwanderer/chatgpt-api": "^1.36.0",
|
||||||
"asn1.js": "^5.0.0",
|
"asn1.js": "^5.0.0",
|
||||||
"chatgpt": "^5.1.1",
|
"chatgpt": "^5.2.4",
|
||||||
"delay": "^5.0.0",
|
"delay": "^5.0.0",
|
||||||
"diff": "^5.1.0",
|
"diff": "^5.1.0",
|
||||||
|
"emoji-strip": "^1.0.1",
|
||||||
"eventsource": "^2.0.2",
|
"eventsource": "^2.0.2",
|
||||||
"eventsource-parser": "^1.0.0",
|
"eventsource-parser": "^1.0.0",
|
||||||
"fastify": "^4.13.0",
|
"fastify": "^4.13.0",
|
||||||
|
|
@ -20,13 +21,13 @@
|
||||||
"keyv": "^4.5.2",
|
"keyv": "^4.5.2",
|
||||||
"keyv-file": "^0.2.0",
|
"keyv-file": "^0.2.0",
|
||||||
"microsoft-cognitiveservices-speech-sdk": "^1.27.0",
|
"microsoft-cognitiveservices-speech-sdk": "^1.27.0",
|
||||||
"emoji-strip": "^1.0.1",
|
|
||||||
"node-fetch": "^3.3.1",
|
"node-fetch": "^3.3.1",
|
||||||
"openai": "^3.2.1",
|
"openai": "^3.2.1",
|
||||||
"random": "^4.1.0",
|
"random": "^4.1.0",
|
||||||
"undici": "^5.21.0",
|
"undici": "^5.21.0",
|
||||||
"uuid": "^9.0.0",
|
"uuid": "^9.0.0",
|
||||||
"ws": "^8.13.0"
|
"ws": "^8.13.0",
|
||||||
|
"js-tiktoken": "^1.0.5"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@node-rs/jieba": "^1.6.2",
|
"@node-rs/jieba": "^1.6.2",
|
||||||
|
|
@ -36,5 +37,9 @@
|
||||||
"puppeteer-extra-plugin-recaptcha": "^3.6.8",
|
"puppeteer-extra-plugin-recaptcha": "^3.6.8",
|
||||||
"puppeteer-extra-plugin-stealth": "^2.11.2",
|
"puppeteer-extra-plugin-stealth": "^2.11.2",
|
||||||
"sharp": "^0.31.3"
|
"sharp": "^0.31.3"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"ts-node": "^10.9.1",
|
||||||
|
"ts-node-register": "^1.0.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
470
utils/openai/chatgpt-api.ts
Normal file
470
utils/openai/chatgpt-api.ts
Normal file
|
|
@ -0,0 +1,470 @@
|
||||||
|
import Keyv from 'keyv'
|
||||||
|
import pTimeout from 'p-timeout'
|
||||||
|
import QuickLRU from 'quick-lru'
|
||||||
|
import { v4 as uuidv4 } from 'uuid'
|
||||||
|
|
||||||
|
import * as tokenizer from './tokenizer'
|
||||||
|
import * as types from './types'
|
||||||
|
import globalFetch from 'node-fetch'
|
||||||
|
import { fetchSSE } from './fetch-sse'
|
||||||
|
|
||||||
|
const CHATGPT_MODEL = 'gpt-3.5-turbo'
|
||||||
|
|
||||||
|
const USER_LABEL_DEFAULT = 'User'
|
||||||
|
const ASSISTANT_LABEL_DEFAULT = 'ChatGPT'
|
||||||
|
|
||||||
|
export class ChatGPTAPI {
|
||||||
|
protected _apiKey: string
|
||||||
|
protected _apiBaseUrl: string
|
||||||
|
protected _apiOrg?: string
|
||||||
|
protected _debug: boolean
|
||||||
|
|
||||||
|
protected _systemMessage: string
|
||||||
|
protected _completionParams: Omit<
|
||||||
|
types.openai.CreateChatCompletionRequest,
|
||||||
|
'messages' | 'n'
|
||||||
|
>
|
||||||
|
protected _maxModelTokens: number
|
||||||
|
protected _maxResponseTokens: number
|
||||||
|
protected _fetch: types.FetchFn
|
||||||
|
|
||||||
|
protected _getMessageById: types.GetMessageByIdFunction
|
||||||
|
protected _upsertMessage: types.UpsertMessageFunction
|
||||||
|
|
||||||
|
protected _messageStore: Keyv<types.ChatMessage>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
|
||||||
|
*
|
||||||
|
* @param apiKey - OpenAI API key (required).
|
||||||
|
* @param apiOrg - Optional OpenAI API organization (optional).
|
||||||
|
* @param apiBaseUrl - Optional override for the OpenAI API base URL.
|
||||||
|
* @param debug - Optional enables logging debugging info to stdout.
|
||||||
|
* @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
|
||||||
|
* @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.
|
||||||
|
* @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.
|
||||||
|
* @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.
|
||||||
|
* @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).
|
||||||
|
* @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).
|
||||||
|
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
|
||||||
|
*/
|
||||||
|
constructor(opts: types.ChatGPTAPIOptions) {
|
||||||
|
const {
|
||||||
|
apiKey,
|
||||||
|
apiOrg,
|
||||||
|
apiBaseUrl = 'https://api.openai.com/v1',
|
||||||
|
debug = false,
|
||||||
|
messageStore,
|
||||||
|
completionParams,
|
||||||
|
systemMessage,
|
||||||
|
maxModelTokens = 4000,
|
||||||
|
maxResponseTokens = 1000,
|
||||||
|
getMessageById,
|
||||||
|
upsertMessage,
|
||||||
|
fetch = globalFetch
|
||||||
|
} = opts
|
||||||
|
|
||||||
|
this._apiKey = apiKey
|
||||||
|
this._apiOrg = apiOrg
|
||||||
|
this._apiBaseUrl = apiBaseUrl
|
||||||
|
this._debug = !!debug
|
||||||
|
this._fetch = fetch
|
||||||
|
|
||||||
|
this._completionParams = {
|
||||||
|
model: CHATGPT_MODEL,
|
||||||
|
temperature: 0.8,
|
||||||
|
top_p: 1.0,
|
||||||
|
presence_penalty: 1.0,
|
||||||
|
...completionParams
|
||||||
|
}
|
||||||
|
|
||||||
|
this._systemMessage = systemMessage
|
||||||
|
|
||||||
|
if (this._systemMessage === undefined) {
|
||||||
|
const currentDate = new Date().toISOString().split('T')[0]
|
||||||
|
this._systemMessage = `You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ${currentDate}`
|
||||||
|
}
|
||||||
|
|
||||||
|
this._maxModelTokens = maxModelTokens
|
||||||
|
this._maxResponseTokens = maxResponseTokens
|
||||||
|
|
||||||
|
this._getMessageById = getMessageById ?? this._defaultGetMessageById
|
||||||
|
this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage
|
||||||
|
|
||||||
|
if (messageStore) {
|
||||||
|
this._messageStore = messageStore
|
||||||
|
} else {
|
||||||
|
this._messageStore = new Keyv<types.ChatMessage, any>({
|
||||||
|
store: new QuickLRU<string, types.ChatMessage>({ maxSize: 10000 })
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this._apiKey) {
|
||||||
|
throw new Error('OpenAI missing required apiKey')
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this._fetch) {
|
||||||
|
throw new Error('Invalid environment; fetch is not defined')
|
||||||
|
}
|
||||||
|
|
||||||
|
if (typeof this._fetch !== 'function') {
|
||||||
|
throw new Error('Invalid "fetch" is not a function')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sends a message to the OpenAI chat completions endpoint, waits for the response
|
||||||
|
* to resolve, and returns the response.
|
||||||
|
*
|
||||||
|
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
|
||||||
|
*
|
||||||
|
* If you want to receive a stream of partial responses, use `opts.onProgress`.
|
||||||
|
*
|
||||||
|
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
|
||||||
|
*
|
||||||
|
* @param message - The prompt message to send
|
||||||
|
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
|
||||||
|
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
|
||||||
|
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
|
||||||
|
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
|
||||||
|
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
|
||||||
|
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
|
||||||
|
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
|
||||||
|
* @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
|
||||||
|
*
|
||||||
|
* @returns The response from ChatGPT
|
||||||
|
*/
|
||||||
|
async sendMessage(
|
||||||
|
text: string,
|
||||||
|
opts: types.SendMessageOptions = {}
|
||||||
|
): Promise<types.ChatMessage> {
|
||||||
|
const {
|
||||||
|
parentMessageId,
|
||||||
|
messageId = uuidv4(),
|
||||||
|
timeoutMs,
|
||||||
|
onProgress,
|
||||||
|
stream = onProgress ? true : false,
|
||||||
|
completionParams,
|
||||||
|
conversationId
|
||||||
|
} = opts
|
||||||
|
|
||||||
|
let { abortSignal } = opts
|
||||||
|
|
||||||
|
let abortController: AbortController = null
|
||||||
|
if (timeoutMs && !abortSignal) {
|
||||||
|
abortController = new AbortController()
|
||||||
|
abortSignal = abortController.signal
|
||||||
|
}
|
||||||
|
|
||||||
|
const message: types.ChatMessage = {
|
||||||
|
role: 'user',
|
||||||
|
id: messageId,
|
||||||
|
conversationId,
|
||||||
|
parentMessageId,
|
||||||
|
text
|
||||||
|
}
|
||||||
|
|
||||||
|
const latestQuestion = message
|
||||||
|
|
||||||
|
const { messages, maxTokens, numTokens } = await this._buildMessages(
|
||||||
|
text,
|
||||||
|
opts
|
||||||
|
)
|
||||||
|
|
||||||
|
const result: types.ChatMessage = {
|
||||||
|
role: 'assistant',
|
||||||
|
id: uuidv4(),
|
||||||
|
conversationId,
|
||||||
|
parentMessageId: messageId,
|
||||||
|
text: ''
|
||||||
|
}
|
||||||
|
|
||||||
|
const responseP = new Promise<types.ChatMessage>(
|
||||||
|
async (resolve, reject) => {
|
||||||
|
const url = `${this._apiBaseUrl}/chat/completions`
|
||||||
|
const headers = {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
Authorization: `Bearer ${this._apiKey}`
|
||||||
|
}
|
||||||
|
const body = {
|
||||||
|
max_tokens: maxTokens,
|
||||||
|
...this._completionParams,
|
||||||
|
...completionParams,
|
||||||
|
messages,
|
||||||
|
stream
|
||||||
|
}
|
||||||
|
|
||||||
|
// Support multiple organizations
|
||||||
|
// See https://platform.openai.com/docs/api-reference/authentication
|
||||||
|
if (this._apiOrg) {
|
||||||
|
headers['OpenAI-Organization'] = this._apiOrg
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this._debug) {
|
||||||
|
console.log(`sendMessage (${numTokens} tokens)`, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stream) {
|
||||||
|
fetchSSE(
|
||||||
|
url,
|
||||||
|
{
|
||||||
|
method: 'POST',
|
||||||
|
headers,
|
||||||
|
body: JSON.stringify(body),
|
||||||
|
signal: abortSignal,
|
||||||
|
onMessage: (data: string) => {
|
||||||
|
if (data === '[DONE]') {
|
||||||
|
result.text = result.text.trim()
|
||||||
|
return resolve(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response: types.openai.CreateChatCompletionDeltaResponse =
|
||||||
|
JSON.parse(data)
|
||||||
|
|
||||||
|
if (response.id) {
|
||||||
|
result.id = response.id
|
||||||
|
}
|
||||||
|
|
||||||
|
if (response.choices?.length) {
|
||||||
|
const delta = response.choices[0].delta
|
||||||
|
result.delta = delta.content
|
||||||
|
if (delta?.content) result.text += delta.content
|
||||||
|
|
||||||
|
if (delta.role) {
|
||||||
|
result.role = delta.role
|
||||||
|
}
|
||||||
|
|
||||||
|
result.detail = response
|
||||||
|
onProgress?.(result)
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
console.warn('OpenAI stream SEE event unexpected error', err)
|
||||||
|
return reject(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
this._fetch
|
||||||
|
).catch(reject)
|
||||||
|
} else {
|
||||||
|
try {
|
||||||
|
const res = await this._fetch(url, {
|
||||||
|
method: 'POST',
|
||||||
|
headers,
|
||||||
|
body: JSON.stringify(body),
|
||||||
|
signal: abortSignal
|
||||||
|
})
|
||||||
|
|
||||||
|
if (!res.ok) {
|
||||||
|
const reason = await res.text()
|
||||||
|
const msg = `OpenAI error ${
|
||||||
|
res.status || res.statusText
|
||||||
|
}: ${reason}`
|
||||||
|
const error = new types.ChatGPTError(msg, { cause: res })
|
||||||
|
error.statusCode = res.status
|
||||||
|
error.statusText = res.statusText
|
||||||
|
return reject(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
const response: types.openai.CreateChatCompletionResponse =
|
||||||
|
await res.json()
|
||||||
|
if (this._debug) {
|
||||||
|
console.log(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (response?.id) {
|
||||||
|
result.id = response.id
|
||||||
|
}
|
||||||
|
|
||||||
|
if (response?.choices?.length) {
|
||||||
|
const message = response.choices[0].message
|
||||||
|
result.text = message.content
|
||||||
|
if (message.role) {
|
||||||
|
result.role = message.role
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const res = response as any
|
||||||
|
return reject(
|
||||||
|
new Error(
|
||||||
|
`OpenAI error: ${
|
||||||
|
res?.detail?.message || res?.detail || 'unknown'
|
||||||
|
}`
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
result.detail = response
|
||||||
|
|
||||||
|
return resolve(result)
|
||||||
|
} catch (err) {
|
||||||
|
return reject(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
).then(async (message) => {
|
||||||
|
if (message.detail && !message.detail.usage) {
|
||||||
|
try {
|
||||||
|
const promptTokens = numTokens
|
||||||
|
const completionTokens = await this._getTokenCount(message.text)
|
||||||
|
message.detail.usage = {
|
||||||
|
prompt_tokens: promptTokens,
|
||||||
|
completion_tokens: completionTokens,
|
||||||
|
total_tokens: promptTokens + completionTokens,
|
||||||
|
estimated: true
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
// TODO: this should really never happen, but if it does,
|
||||||
|
// we should handle notify the user gracefully
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Promise.all([
|
||||||
|
this._upsertMessage(latestQuestion),
|
||||||
|
this._upsertMessage(message)
|
||||||
|
]).then(() => message)
|
||||||
|
})
|
||||||
|
|
||||||
|
if (timeoutMs) {
|
||||||
|
if (abortController) {
|
||||||
|
// This will be called when a timeout occurs in order for us to forcibly
|
||||||
|
// ensure that the underlying HTTP request is aborted.
|
||||||
|
;(responseP as any).cancel = () => {
|
||||||
|
abortController.abort()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pTimeout(responseP, {
|
||||||
|
milliseconds: timeoutMs,
|
||||||
|
message: 'OpenAI timed out waiting for response'
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
return responseP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
get apiKey(): string {
|
||||||
|
return this._apiKey
|
||||||
|
}
|
||||||
|
|
||||||
|
set apiKey(apiKey: string) {
|
||||||
|
this._apiKey = apiKey
|
||||||
|
}
|
||||||
|
|
||||||
|
get apiOrg(): string {
|
||||||
|
return this._apiOrg
|
||||||
|
}
|
||||||
|
|
||||||
|
set apiOrg(apiOrg: string) {
|
||||||
|
this._apiOrg = apiOrg
|
||||||
|
}
|
||||||
|
|
||||||
|
protected async _buildMessages(text: string, opts: types.SendMessageOptions) {
|
||||||
|
const { systemMessage = this._systemMessage } = opts
|
||||||
|
let { parentMessageId } = opts
|
||||||
|
|
||||||
|
const userLabel = USER_LABEL_DEFAULT
|
||||||
|
const assistantLabel = ASSISTANT_LABEL_DEFAULT
|
||||||
|
|
||||||
|
const maxNumTokens = this._maxModelTokens - this._maxResponseTokens
|
||||||
|
let messages: types.openai.ChatCompletionRequestMessage[] = []
|
||||||
|
|
||||||
|
if (systemMessage) {
|
||||||
|
messages.push({
|
||||||
|
role: 'system',
|
||||||
|
content: systemMessage
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const systemMessageOffset = messages.length
|
||||||
|
let nextMessages = text
|
||||||
|
? messages.concat([
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: text,
|
||||||
|
name: opts.name
|
||||||
|
}
|
||||||
|
])
|
||||||
|
: messages
|
||||||
|
let numTokens = 0
|
||||||
|
|
||||||
|
do {
|
||||||
|
const prompt = nextMessages
|
||||||
|
.reduce((prompt, message) => {
|
||||||
|
switch (message.role) {
|
||||||
|
case 'system':
|
||||||
|
return prompt.concat([`Instructions:\n${message.content}`])
|
||||||
|
case 'user':
|
||||||
|
return prompt.concat([`${userLabel}:\n${message.content}`])
|
||||||
|
default:
|
||||||
|
return prompt.concat([`${assistantLabel}:\n${message.content}`])
|
||||||
|
}
|
||||||
|
}, [] as string[])
|
||||||
|
.join('\n\n')
|
||||||
|
|
||||||
|
const nextNumTokensEstimate = await this._getTokenCount(prompt)
|
||||||
|
const isValidPrompt = nextNumTokensEstimate <= maxNumTokens
|
||||||
|
|
||||||
|
if (prompt && !isValidPrompt) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
messages = nextMessages
|
||||||
|
numTokens = nextNumTokensEstimate
|
||||||
|
|
||||||
|
if (!isValidPrompt) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!parentMessageId) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
const parentMessage = await this._getMessageById(parentMessageId)
|
||||||
|
if (!parentMessage) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
const parentMessageRole = parentMessage.role || 'user'
|
||||||
|
|
||||||
|
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
|
||||||
|
{
|
||||||
|
role: parentMessageRole,
|
||||||
|
content: parentMessage.text,
|
||||||
|
name: parentMessage.name
|
||||||
|
},
|
||||||
|
...nextMessages.slice(systemMessageOffset)
|
||||||
|
])
|
||||||
|
|
||||||
|
parentMessageId = parentMessage.parentMessageId
|
||||||
|
} while (true)
|
||||||
|
|
||||||
|
// Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens
|
||||||
|
// for the response.
|
||||||
|
const maxTokens = Math.max(
|
||||||
|
1,
|
||||||
|
Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)
|
||||||
|
)
|
||||||
|
|
||||||
|
return { messages, maxTokens, numTokens }
|
||||||
|
}
|
||||||
|
|
||||||
|
protected async _getTokenCount(text: string) {
|
||||||
|
// TODO: use a better fix in the tokenizer
|
||||||
|
text = text.replace(/<\|endoftext\|>/g, '')
|
||||||
|
|
||||||
|
return tokenizer.encode(text).length
|
||||||
|
}
|
||||||
|
|
||||||
|
protected async _defaultGetMessageById(
|
||||||
|
id: string
|
||||||
|
): Promise<types.ChatMessage> {
|
||||||
|
const res = await this._messageStore.get(id)
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
protected async _defaultUpsertMessage(
|
||||||
|
message: types.ChatMessage
|
||||||
|
): Promise<void> {
|
||||||
|
await this._messageStore.set(message.id, message)
|
||||||
|
}
|
||||||
|
}
|
||||||
89
utils/openai/fetch-sse.ts
Normal file
89
utils/openai/fetch-sse.ts
Normal file
|
|
@ -0,0 +1,89 @@
|
||||||
|
import { createParser } from 'eventsource-parser'
|
||||||
|
|
||||||
|
import * as types from './types'
|
||||||
|
import { fetch as nodefetch } from 'node-fetch'
|
||||||
|
import { streamAsyncIterable } from './stream-async-iterable'
|
||||||
|
|
||||||
|
export async function fetchSSE(
|
||||||
|
url: string,
|
||||||
|
options: Parameters<typeof fetch>[1] & {
|
||||||
|
onMessage: (data: string) => void
|
||||||
|
onError?: (error: any) => void
|
||||||
|
},
|
||||||
|
fetch: types.FetchFn = nodefetch
|
||||||
|
) {
|
||||||
|
const { onMessage, onError, ...fetchOptions } = options
|
||||||
|
const res = await fetch(url, fetchOptions)
|
||||||
|
if (!res.ok) {
|
||||||
|
let reason: string
|
||||||
|
|
||||||
|
try {
|
||||||
|
reason = await res.text()
|
||||||
|
} catch (err) {
|
||||||
|
reason = res.statusText
|
||||||
|
}
|
||||||
|
|
||||||
|
const msg = `ChatGPT error ${res.status}: ${reason}`
|
||||||
|
const error = new types.ChatGPTError(msg, { cause: res })
|
||||||
|
error.statusCode = res.status
|
||||||
|
error.statusText = res.statusText
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
const parser = createParser((event) => {
|
||||||
|
if (event.type === 'event') {
|
||||||
|
onMessage(event.data)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// handle special response errors
|
||||||
|
const feed = (chunk: string) => {
|
||||||
|
let response = null
|
||||||
|
|
||||||
|
try {
|
||||||
|
response = JSON.parse(chunk)
|
||||||
|
} catch {
|
||||||
|
// ignore
|
||||||
|
}
|
||||||
|
|
||||||
|
if (response?.detail?.type === 'invalid_request_error') {
|
||||||
|
const msg = `ChatGPT error ${response.detail.message}: ${response.detail.code} (${response.detail.type})`
|
||||||
|
const error = new types.ChatGPTError(msg, { cause: response })
|
||||||
|
error.statusCode = response.detail.code
|
||||||
|
error.statusText = response.detail.message
|
||||||
|
|
||||||
|
if (onError) {
|
||||||
|
onError(error)
|
||||||
|
} else {
|
||||||
|
console.error(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// don't feed to the event parser
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
parser.feed(chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!res.body.getReader) {
|
||||||
|
// Vercel polyfills `fetch` with `node-fetch`, which doesn't conform to
|
||||||
|
// web standards, so this is a workaround...
|
||||||
|
const body: NodeJS.ReadableStream = res.body as any
|
||||||
|
|
||||||
|
if (!body.on || !body.read) {
|
||||||
|
throw new types.ChatGPTError('unsupported "fetch" implementation')
|
||||||
|
}
|
||||||
|
|
||||||
|
body.on('readable', () => {
|
||||||
|
let chunk: string | Buffer
|
||||||
|
while (null !== (chunk = body.read())) {
|
||||||
|
feed(chunk.toString())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
for await (const chunk of streamAsyncIterable(res.body)) {
|
||||||
|
const str = new TextDecoder().decode(chunk)
|
||||||
|
feed(str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
8
utils/openai/tokenizer.ts
Normal file
8
utils/openai/tokenizer.ts
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
import { getEncoding } from 'js-tiktoken'
|
||||||
|
|
||||||
|
// TODO: make this configurable
|
||||||
|
const tokenizer = getEncoding('cl100k_base')
|
||||||
|
|
||||||
|
export function encode(input: string): Uint32Array {
|
||||||
|
return new Uint32Array(tokenizer.encode(input))
|
||||||
|
}
|
||||||
446
utils/openai/types.ts
Normal file
446
utils/openai/types.ts
Normal file
|
|
@ -0,0 +1,446 @@
|
||||||
|
import Keyv from 'keyv'
|
||||||
|
|
||||||
|
export type Role = 'user' | 'assistant' | 'system'
|
||||||
|
|
||||||
|
export type FetchFn = typeof fetch
|
||||||
|
|
||||||
|
export type ChatGPTAPIOptions = {
|
||||||
|
apiKey: string
|
||||||
|
|
||||||
|
/** @defaultValue `'https://api.openai.com'` **/
|
||||||
|
apiBaseUrl?: string
|
||||||
|
|
||||||
|
apiOrg?: string
|
||||||
|
|
||||||
|
/** @defaultValue `false` **/
|
||||||
|
debug?: boolean
|
||||||
|
|
||||||
|
completionParams?: Partial<
|
||||||
|
Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
|
||||||
|
>
|
||||||
|
|
||||||
|
systemMessage?: string
|
||||||
|
|
||||||
|
/** @defaultValue `4096` **/
|
||||||
|
maxModelTokens?: number
|
||||||
|
|
||||||
|
/** @defaultValue `1000` **/
|
||||||
|
maxResponseTokens?: number
|
||||||
|
|
||||||
|
messageStore?: Keyv
|
||||||
|
getMessageById?: GetMessageByIdFunction
|
||||||
|
upsertMessage?: UpsertMessageFunction
|
||||||
|
|
||||||
|
fetch?: FetchFn
|
||||||
|
}
|
||||||
|
|
||||||
|
export type SendMessageOptions = {
|
||||||
|
/** The name of a user in a multi-user chat. */
|
||||||
|
name?: string
|
||||||
|
parentMessageId?: string
|
||||||
|
conversationId?: string
|
||||||
|
messageId?: string
|
||||||
|
stream?: boolean
|
||||||
|
systemMessage?: string
|
||||||
|
timeoutMs?: number
|
||||||
|
onProgress?: (partialResponse: ChatMessage) => void
|
||||||
|
abortSignal?: AbortSignal
|
||||||
|
completionParams?: Partial<
|
||||||
|
Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
|
||||||
|
>
|
||||||
|
}
|
||||||
|
|
||||||
|
export type MessageActionType = 'next' | 'variant'
|
||||||
|
|
||||||
|
export type SendMessageBrowserOptions = {
|
||||||
|
conversationId?: string
|
||||||
|
parentMessageId?: string
|
||||||
|
messageId?: string
|
||||||
|
action?: MessageActionType
|
||||||
|
timeoutMs?: number
|
||||||
|
onProgress?: (partialResponse: ChatMessage) => void
|
||||||
|
abortSignal?: AbortSignal
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ChatMessage {
|
||||||
|
id: string
|
||||||
|
text: string
|
||||||
|
role: Role
|
||||||
|
name?: string
|
||||||
|
delta?: string
|
||||||
|
detail?:
|
||||||
|
| openai.CreateChatCompletionResponse
|
||||||
|
| CreateChatCompletionStreamResponse
|
||||||
|
|
||||||
|
// relevant for both ChatGPTAPI and ChatGPTUnofficialProxyAPI
|
||||||
|
parentMessageId?: string
|
||||||
|
|
||||||
|
// only relevant for ChatGPTUnofficialProxyAPI (optional for ChatGPTAPI)
|
||||||
|
conversationId?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
export class ChatGPTError extends Error {
|
||||||
|
statusCode?: number
|
||||||
|
statusText?: string
|
||||||
|
isFinal?: boolean
|
||||||
|
accountId?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns a chat message from a store by it's ID (or null if not found). */
|
||||||
|
export type GetMessageByIdFunction = (id: string) => Promise<ChatMessage>
|
||||||
|
|
||||||
|
/** Upserts a chat message to a store. */
|
||||||
|
export type UpsertMessageFunction = (message: ChatMessage) => Promise<void>
|
||||||
|
|
||||||
|
export interface CreateChatCompletionStreamResponse
|
||||||
|
extends openai.CreateChatCompletionDeltaResponse {
|
||||||
|
usage: CreateCompletionStreamResponseUsage
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface CreateCompletionStreamResponseUsage
|
||||||
|
extends openai.CreateCompletionResponseUsage {
|
||||||
|
estimated: true
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* https://chat.openapi.com/backend-api/conversation
|
||||||
|
*/
|
||||||
|
export type ConversationJSONBody = {
|
||||||
|
/**
|
||||||
|
* The action to take
|
||||||
|
*/
|
||||||
|
action: string
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The ID of the conversation
|
||||||
|
*/
|
||||||
|
conversation_id?: string
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prompts to provide
|
||||||
|
*/
|
||||||
|
messages: Prompt[]
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The model to use
|
||||||
|
*/
|
||||||
|
model: string
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The parent message ID
|
||||||
|
*/
|
||||||
|
parent_message_id: string
|
||||||
|
}
|
||||||
|
|
||||||
|
export type Prompt = {
|
||||||
|
/**
|
||||||
|
* The content of the prompt
|
||||||
|
*/
|
||||||
|
content: PromptContent
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The ID of the prompt
|
||||||
|
*/
|
||||||
|
id: string
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The role played in the prompt
|
||||||
|
*/
|
||||||
|
role: Role
|
||||||
|
}
|
||||||
|
|
||||||
|
export type ContentType = 'text'
|
||||||
|
|
||||||
|
export type PromptContent = {
|
||||||
|
/**
|
||||||
|
* The content type of the prompt
|
||||||
|
*/
|
||||||
|
content_type: ContentType
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The parts to the prompt
|
||||||
|
*/
|
||||||
|
parts: string[]
|
||||||
|
}
|
||||||
|
|
||||||
|
export type ConversationResponseEvent = {
|
||||||
|
message?: Message
|
||||||
|
conversation_id?: string
|
||||||
|
error?: string | null
|
||||||
|
}
|
||||||
|
|
||||||
|
export type Message = {
|
||||||
|
id: string
|
||||||
|
content: MessageContent
|
||||||
|
role: Role
|
||||||
|
user: string | null
|
||||||
|
create_time: string | null
|
||||||
|
update_time: string | null
|
||||||
|
end_turn: null
|
||||||
|
weight: number
|
||||||
|
recipient: string
|
||||||
|
metadata: MessageMetadata
|
||||||
|
}
|
||||||
|
|
||||||
|
export type MessageContent = {
|
||||||
|
content_type: string
|
||||||
|
parts: string[]
|
||||||
|
}
|
||||||
|
|
||||||
|
export type MessageMetadata = any
|
||||||
|
|
||||||
|
export namespace openai {
|
||||||
|
export interface CreateChatCompletionDeltaResponse {
|
||||||
|
id: string
|
||||||
|
object: 'chat.completion.chunk'
|
||||||
|
created: number
|
||||||
|
model: string
|
||||||
|
choices: [
|
||||||
|
{
|
||||||
|
delta: {
|
||||||
|
role: Role
|
||||||
|
content?: string
|
||||||
|
}
|
||||||
|
index: number
|
||||||
|
finish_reason: string | null
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @export
|
||||||
|
* @interface ChatCompletionRequestMessage
|
||||||
|
*/
|
||||||
|
export interface ChatCompletionRequestMessage {
|
||||||
|
/**
|
||||||
|
* The role of the author of this message.
|
||||||
|
* @type {string}
|
||||||
|
* @memberof ChatCompletionRequestMessage
|
||||||
|
*/
|
||||||
|
role: ChatCompletionRequestMessageRoleEnum
|
||||||
|
/**
|
||||||
|
* The contents of the message
|
||||||
|
* @type {string}
|
||||||
|
* @memberof ChatCompletionRequestMessage
|
||||||
|
*/
|
||||||
|
content: string
|
||||||
|
/**
|
||||||
|
* The name of the user in a multi-user chat
|
||||||
|
* @type {string}
|
||||||
|
* @memberof ChatCompletionRequestMessage
|
||||||
|
*/
|
||||||
|
name?: string
|
||||||
|
}
|
||||||
|
export declare const ChatCompletionRequestMessageRoleEnum: {
|
||||||
|
readonly System: 'system'
|
||||||
|
readonly User: 'user'
|
||||||
|
readonly Assistant: 'assistant'
|
||||||
|
}
|
||||||
|
export declare type ChatCompletionRequestMessageRoleEnum =
|
||||||
|
(typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum]
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @export
|
||||||
|
* @interface ChatCompletionResponseMessage
|
||||||
|
*/
|
||||||
|
export interface ChatCompletionResponseMessage {
|
||||||
|
/**
|
||||||
|
* The role of the author of this message.
|
||||||
|
* @type {string}
|
||||||
|
* @memberof ChatCompletionResponseMessage
|
||||||
|
*/
|
||||||
|
role: ChatCompletionResponseMessageRoleEnum
|
||||||
|
/**
|
||||||
|
* The contents of the message
|
||||||
|
* @type {string}
|
||||||
|
* @memberof ChatCompletionResponseMessage
|
||||||
|
*/
|
||||||
|
content: string
|
||||||
|
}
|
||||||
|
export declare const ChatCompletionResponseMessageRoleEnum: {
|
||||||
|
readonly System: 'system'
|
||||||
|
readonly User: 'user'
|
||||||
|
readonly Assistant: 'assistant'
|
||||||
|
}
|
||||||
|
export declare type ChatCompletionResponseMessageRoleEnum =
|
||||||
|
(typeof ChatCompletionResponseMessageRoleEnum)[keyof typeof ChatCompletionResponseMessageRoleEnum]
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @export
|
||||||
|
* @interface CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
export interface CreateChatCompletionRequest {
|
||||||
|
/**
|
||||||
|
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
|
||||||
|
* @type {string}
|
||||||
|
* @memberof CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
model: string
|
||||||
|
/**
|
||||||
|
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
|
||||||
|
* @type {Array<ChatCompletionRequestMessage>}
|
||||||
|
* @memberof CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
messages: Array<ChatCompletionRequestMessage>
|
||||||
|
/**
|
||||||
|
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
|
||||||
|
* @type {number}
|
||||||
|
* @memberof CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
temperature?: number | null
|
||||||
|
/**
|
||||||
|
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
|
||||||
|
* @type {number}
|
||||||
|
* @memberof CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
top_p?: number | null
|
||||||
|
/**
|
||||||
|
* How many chat completion choices to generate for each input message.
|
||||||
|
* @type {number}
|
||||||
|
* @memberof CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
n?: number | null
|
||||||
|
/**
|
||||||
|
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
|
||||||
|
* @type {boolean}
|
||||||
|
* @memberof CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
stream?: boolean | null
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {CreateChatCompletionRequestStop}
|
||||||
|
* @memberof CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
stop?: CreateChatCompletionRequestStop
|
||||||
|
/**
|
||||||
|
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
|
||||||
|
* @type {number}
|
||||||
|
* @memberof CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
max_tokens?: number
|
||||||
|
/**
|
||||||
|
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
|
||||||
|
* @type {number}
|
||||||
|
* @memberof CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
presence_penalty?: number | null
|
||||||
|
/**
|
||||||
|
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
|
||||||
|
* @type {number}
|
||||||
|
* @memberof CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
frequency_penalty?: number | null
|
||||||
|
/**
|
||||||
|
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
|
||||||
|
* @type {object}
|
||||||
|
* @memberof CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
logit_bias?: object | null
|
||||||
|
/**
|
||||||
|
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
|
||||||
|
* @type {string}
|
||||||
|
* @memberof CreateChatCompletionRequest
|
||||||
|
*/
|
||||||
|
user?: string
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* @type CreateChatCompletionRequestStop
|
||||||
|
* Up to 4 sequences where the API will stop generating further tokens.
|
||||||
|
* @export
|
||||||
|
*/
|
||||||
|
export declare type CreateChatCompletionRequestStop = Array<string> | string
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @export
|
||||||
|
* @interface CreateChatCompletionResponse
|
||||||
|
*/
|
||||||
|
export interface CreateChatCompletionResponse {
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {string}
|
||||||
|
* @memberof CreateChatCompletionResponse
|
||||||
|
*/
|
||||||
|
id: string
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {string}
|
||||||
|
* @memberof CreateChatCompletionResponse
|
||||||
|
*/
|
||||||
|
object: string
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {number}
|
||||||
|
* @memberof CreateChatCompletionResponse
|
||||||
|
*/
|
||||||
|
created: number
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {string}
|
||||||
|
* @memberof CreateChatCompletionResponse
|
||||||
|
*/
|
||||||
|
model: string
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {Array<CreateChatCompletionResponseChoicesInner>}
|
||||||
|
* @memberof CreateChatCompletionResponse
|
||||||
|
*/
|
||||||
|
choices: Array<CreateChatCompletionResponseChoicesInner>
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {CreateCompletionResponseUsage}
|
||||||
|
* @memberof CreateChatCompletionResponse
|
||||||
|
*/
|
||||||
|
usage?: CreateCompletionResponseUsage
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @export
|
||||||
|
* @interface CreateChatCompletionResponseChoicesInner
|
||||||
|
*/
|
||||||
|
export interface CreateChatCompletionResponseChoicesInner {
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {number}
|
||||||
|
* @memberof CreateChatCompletionResponseChoicesInner
|
||||||
|
*/
|
||||||
|
index?: number
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {ChatCompletionResponseMessage}
|
||||||
|
* @memberof CreateChatCompletionResponseChoicesInner
|
||||||
|
*/
|
||||||
|
message?: ChatCompletionResponseMessage
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {string}
|
||||||
|
* @memberof CreateChatCompletionResponseChoicesInner
|
||||||
|
*/
|
||||||
|
finish_reason?: string
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @export
|
||||||
|
* @interface CreateCompletionResponseUsage
|
||||||
|
*/
|
||||||
|
export interface CreateCompletionResponseUsage {
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {number}
|
||||||
|
* @memberof CreateCompletionResponseUsage
|
||||||
|
*/
|
||||||
|
prompt_tokens: number
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {number}
|
||||||
|
* @memberof CreateCompletionResponseUsage
|
||||||
|
*/
|
||||||
|
completion_tokens: number
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @type {number}
|
||||||
|
* @memberof CreateCompletionResponseUsage
|
||||||
|
*/
|
||||||
|
total_tokens: number
|
||||||
|
}
|
||||||
|
}
|
||||||
Loading…
Add table
Add a link
Reference in a new issue