feat: 增加通义千问支持

This commit is contained in:
ikechan8370 2023-10-25 21:50:44 +08:00
parent d528840297
commit c0936e6e2a
13 changed files with 1380 additions and 72 deletions

View file

@ -42,6 +42,7 @@
* 2023-07 支持智能模式,机器人可以实现禁言、群名片/头衔(需给机器人管理员/群主、分享音乐视频、主动发音频、对接ap,sr和喵喵等插件、联网搜索等需api模式0613系列模型。智能模式所需的额外api和搜索api分别可以参考[chatgpt-plugin-extras](https://github.com/ikechan8370/chatgpt-plugin-extras) 和 [search-api](https://github.com/ikechan8370/search-api) 自行搭建,其中后者提供了一个公益版本,前者可使用[huggingface](https://huggingface.co/spaces/ikechan8370/cp-extra)部署
* 2023-09-10 支持来自claude.ai的claude-2模型
* 2023-10-19 支持读取文件目前适配必应模式和Claude2模式
* 2023-10-25 增加支持通义千问官方API
### 如果觉得这个插件有趣或者对你有帮助请点一个star吧
## 版本要求

View file

@ -1,85 +1,82 @@
import plugin from '../../../lib/plugins/plugin.js'
import _ from 'lodash'
import { Config, defaultOpenAIAPI } from '../utils/config.js'
import { v4 as uuid } from 'uuid'
import {Config, defaultOpenAIAPI} from '../utils/config.js'
import {v4 as uuid} from 'uuid'
import delay from 'delay'
import { ChatGPTAPI } from '../utils/openai/chatgpt-api.js'
import { BingAIClient } from '@waylaidwanderer/chatgpt-api'
import {ChatGPTAPI} from '../utils/openai/chatgpt-api.js'
import {BingAIClient} from '@waylaidwanderer/chatgpt-api'
import SydneyAIClient from '../utils/SydneyAIClient.js'
import { PoeClient } from '../utils/poe/index.js'
import {PoeClient} from '../utils/poe/index.js'
import AzureTTS from '../utils/tts/microsoft-azure.js'
import VoiceVoxTTS from '../utils/tts/voicevox.js'
import Version from '../utils/version.js'
import {
render,
renderUrl,
getMessageById,
makeForwardMsg,
upsertMessage,
randomString,
completeJSON,
isImage,
getUserData,
extractContentFromFile,
formatDate,
formatDate2,
generateAudio,
getDefaultReplySetting,
isCN,
getMasterQQ,
getUserReplySetting,
getImageOcrText,
getImg,
getMasterQQ,
getMaxModelTokens,
formatDate,
generateAudio,
formatDate2,
mkdirs,
getMessageById,
getUin,
downloadFile,
isPureText,
extractContentFromFile
getUserData,
getUserReplySetting,
isCN,
isImage,
makeForwardMsg,
randomString,
render,
renderUrl,
upsertMessage
} from '../utils/common.js'
import { ChatGPTPuppeteer } from '../utils/browser.js'
import { KeyvFile } from 'keyv-file'
import { OfficialChatGPTClient } from '../utils/message.js'
import {ChatGPTPuppeteer} from '../utils/browser.js'
import {KeyvFile} from 'keyv-file'
import {OfficialChatGPTClient} from '../utils/message.js'
import fetch from 'node-fetch'
import { deleteConversation, getConversations, getLatestMessageIdByConversationId } from '../utils/conversation.js'
import { convertSpeaker, speakers } from '../utils/tts.js'
import {deleteConversation, getConversations, getLatestMessageIdByConversationId} from '../utils/conversation.js'
import {convertSpeaker, speakers} from '../utils/tts.js'
import ChatGLMClient from '../utils/chatglm.js'
import { convertFaces } from '../utils/face.js'
import { SlackClaudeClient } from '../utils/slack/slackClient.js'
import { getPromptByName } from '../utils/prompts.js'
import {convertFaces} from '../utils/face.js'
import {SlackClaudeClient} from '../utils/slack/slackClient.js'
import {getPromptByName} from '../utils/prompts.js'
import BingDrawClient from '../utils/BingDraw.js'
import XinghuoClient from '../utils/xinghuo/xinghuo.js'
import Bard from '../utils/bard.js'
import { JinyanTool } from '../utils/tools/JinyanTool.js'
import { SendVideoTool } from '../utils/tools/SendBilibiliTool.js'
import { KickOutTool } from '../utils/tools/KickOutTool.js'
import { EditCardTool } from '../utils/tools/EditCardTool.js'
import { SearchVideoTool } from '../utils/tools/SearchBilibiliTool.js'
import { SearchMusicTool } from '../utils/tools/SearchMusicTool.js'
import { QueryStarRailTool } from '../utils/tools/QueryStarRailTool.js'
import { WebsiteTool } from '../utils/tools/WebsiteTool.js'
import { WeatherTool } from '../utils/tools/WeatherTool.js'
import { SerpTool } from '../utils/tools/SerpTool.js'
import { SerpIkechan8370Tool } from '../utils/tools/SerpIkechan8370Tool.js'
import { SendPictureTool } from '../utils/tools/SendPictureTool.js'
import { SerpImageTool } from '../utils/tools/SearchImageTool.js'
import { ImageCaptionTool } from '../utils/tools/ImageCaptionTool.js'
import { SendAudioMessageTool } from '../utils/tools/SendAudioMessageTool.js'
import { ProcessPictureTool } from '../utils/tools/ProcessPictureTool.js'
import { APTool } from '../utils/tools/APTool.js'
import { QueryGenshinTool } from '../utils/tools/QueryGenshinTool.js'
import { HandleMessageMsgTool } from '../utils/tools/HandleMessageMsgTool.js'
import { QueryUserinfoTool } from '../utils/tools/QueryUserinfoTool.js'
import { EliMovieTool } from '../utils/tools/EliMovieTool.js'
import { EliMusicTool } from '../utils/tools/EliMusicTool.js'
import { SendMusicTool } from '../utils/tools/SendMusicTool.js'
import { SendDiceTool } from '../utils/tools/SendDiceTool.js'
import { SendAvatarTool } from '../utils/tools/SendAvatarTool.js'
import { SendMessageToSpecificGroupOrUserTool } from '../utils/tools/SendMessageToSpecificGroupOrUserTool.js'
import { SetTitleTool } from '../utils/tools/SetTitleTool.js'
import { solveCaptchaOneShot } from '../utils/bingCaptcha.js'
import { ClaudeAIClient } from '../utils/claude.ai/index.js'
import fs from 'fs'
import { getProxy } from '../utils/proxy.js'
import {JinyanTool} from '../utils/tools/JinyanTool.js'
import {SendVideoTool} from '../utils/tools/SendBilibiliTool.js'
import {KickOutTool} from '../utils/tools/KickOutTool.js'
import {EditCardTool} from '../utils/tools/EditCardTool.js'
import {SearchVideoTool} from '../utils/tools/SearchBilibiliTool.js'
import {SearchMusicTool} from '../utils/tools/SearchMusicTool.js'
import {QueryStarRailTool} from '../utils/tools/QueryStarRailTool.js'
import {WebsiteTool} from '../utils/tools/WebsiteTool.js'
import {WeatherTool} from '../utils/tools/WeatherTool.js'
import {SerpTool} from '../utils/tools/SerpTool.js'
import {SerpIkechan8370Tool} from '../utils/tools/SerpIkechan8370Tool.js'
import {SendPictureTool} from '../utils/tools/SendPictureTool.js'
import {SerpImageTool} from '../utils/tools/SearchImageTool.js'
import {ImageCaptionTool} from '../utils/tools/ImageCaptionTool.js'
import {SendAudioMessageTool} from '../utils/tools/SendAudioMessageTool.js'
import {ProcessPictureTool} from '../utils/tools/ProcessPictureTool.js'
import {APTool} from '../utils/tools/APTool.js'
import {QueryGenshinTool} from '../utils/tools/QueryGenshinTool.js'
import {HandleMessageMsgTool} from '../utils/tools/HandleMessageMsgTool.js'
import {QueryUserinfoTool} from '../utils/tools/QueryUserinfoTool.js'
import {EliMovieTool} from '../utils/tools/EliMovieTool.js'
import {EliMusicTool} from '../utils/tools/EliMusicTool.js'
import {SendMusicTool} from '../utils/tools/SendMusicTool.js'
import {SendDiceTool} from '../utils/tools/SendDiceTool.js'
import {SendAvatarTool} from '../utils/tools/SendAvatarTool.js'
import {SendMessageToSpecificGroupOrUserTool} from '../utils/tools/SendMessageToSpecificGroupOrUserTool.js'
import {SetTitleTool} from '../utils/tools/SetTitleTool.js'
import {solveCaptchaOneShot} from '../utils/bingCaptcha.js'
import {ClaudeAIClient} from '../utils/claude.ai/index.js'
import {getProxy} from '../utils/proxy.js'
import {QwenApi} from '../utils/alibaba/qwen-api.js'
try {
await import('@azure/openai')
@ -187,6 +184,12 @@ export class chatgpt extends plugin {
reg: '^#星火(搜索|查找)助手',
fnc: 'searchxhBot'
},
{
/** 命令正则匹配 */
reg: '^#qwen[sS]*',
/** 执行方法 */
fnc: 'qwen'
},
{
/** 命令正则匹配 */
reg: toggleMode === 'at' ? '^[^#][sS]*' : '^#chat[^gpt][sS]*',
@ -374,6 +377,14 @@ export class chatgpt extends plugin {
await redis.del(`CHATGPT:CONVERSATIONS:${e.sender.user_id}`)
await this.reply('已结束当前对话,请@我进行聊天以开启新的对话', true)
}
} else if (use === 'qwen') {
let c = await redis.get(`CHATGPT:CONVERSATIONS_QWEN:${e.sender.user_id}`)
if (!c) {
await this.reply('当前没有开启对话', true)
} else {
await redis.del(`CHATGPT:CONVERSATIONS_QWEN:${e.sender.user_id}`)
await this.reply('已结束当前对话,请@我进行聊天以开启新的对话', true)
}
} else if (use === 'bing') {
let c = await redis.get(`CHATGPT:CONVERSATIONS_BING:${e.sender.user_id}`)
if (!c) {
@ -435,6 +446,14 @@ export class chatgpt extends plugin {
await redis.del(`CHATGPT:CONVERSATIONS:${qq}`)
await this.reply(`已结束${atUser}的对话TA仍可以@我进行聊天以开启新的对话`, true)
}
} else if (use === 'qwen') {
let c = await redis.get(`CHATGPT:CONVERSATIONS_QWEN:${qq}`)
if (!c) {
await this.reply(`当前${atUser}没有开启对话`, true)
} else {
await redis.del(`CHATGPT:CONVERSATIONS_QWEN:${qq}`)
await this.reply(`已结束${atUser}的对话TA仍可以@我进行聊天以开启新的对话`, true)
}
} else if (use === 'bing') {
let c = await redis.get(`CHATGPT:CONVERSATIONS_BING:${qq}`)
if (!c) {
@ -1037,6 +1056,10 @@ export class chatgpt extends plugin {
key = `CHATGPT:CONVERSATIONS_AZURE:${e.sender.user_id}`
break
}
case 'qwen': {
key = `CHATGPT:CONVERSATIONS_QWEN:${(e.isGroup && Config.groupMerge) ? e.group_id.toString() : e.sender.user_id}`
break
}
}
let ctime = new Date()
previousConversation = (key ? await redis.get(key) : null) || JSON.stringify({
@ -1447,6 +1470,25 @@ export class chatgpt extends plugin {
return true
}
async qwen (e) {
if (!Config.allowOtherMode) {
return false
}
let ats = e.message.filter(m => m.type === 'at')
if (!(e.atme || e.atBot) && ats.length > 0) {
if (Config.debug) {
logger.mark('艾特别人了,没艾特我,忽略#xh')
}
return false
}
let prompt = _.replace(e.raw_message.trimStart(), '#qwen', '').trim()
if (prompt.length === 0) {
return false
}
await this.abstractChat(e, prompt, 'qwen')
return true
}
async xh (e) {
if (!Config.allowOtherMode) {
return false
@ -1988,6 +2030,57 @@ export class chatgpt extends plugin {
let completion = choices[0].message
return { text: completion.content, message: completion }
}
case 'qwen': {
let completionParams = {
parameters: {
top_p: Config.qwenTopP || 0.5,
top_k: Config.qwenTopK || 50,
seed: Config.qwenSeed > 0 ? Config.qwenSeed : Math.floor(Math.random() * 114514),
temperature: Config.qwenTemperature || 1,
enable_search: !!Config.qwenEnableSearch
}
}
if (Config.qwenModel) {
completionParams.model = Config.qwenModel
}
const currentDate = new Date().toISOString().split('T')[0]
async function um (message) {
return await upsertMessage(message, 'QWEN')
}
async function gm (id) {
return await getMessageById(id, 'QWEN')
}
let opts = {
apiKey: Config.qwenApiKey,
debug: false,
upsertMessage: um,
getMessageById: gm,
systemMessage: `You are ${Config.assistantLabel} ${useCast?.api || Config.promptPrefixOverride || defaultPropmtPrefix}
Current date: ${currentDate}`,
completionParams,
assistantLabel: Config.assistantLabel,
fetch: newFetch
}
this.qwenApi = new QwenApi(opts)
let option = {
timeoutMs: 600000,
completionParams
}
if (conversation) {
if (!conversation.conversationId) {
conversation.conversationId = uuid()
}
option = Object.assign(option, conversation)
}
let msg
try {
msg = await this.qwenApi.sendMessage(prompt, option)
} catch (err) {
logger.error(err)
throw new Error(err)
}
return msg
}
case 'bard': {
// 处理cookie
const matchesPSID = /__Secure-1PSID=([^;]+)/.exec(Config.bardPsid)
@ -2146,6 +2239,9 @@ export class chatgpt extends plugin {
}
option.systemMessage = system
if (conversation) {
if (!conversation.conversationId) {
conversation.conversationId = uuid()
}
option = Object.assign(option, conversation)
}
if (Config.smartMode) {

View file

@ -124,6 +124,11 @@ export class ChatgptManagement extends plugin {
fnc: 'useBardBasedSolution',
permission: 'master'
},
{
reg: '^#chatgpt切换(通义千问|qwen|千问)$',
fnc: 'useQwenSolution',
permission: 'master'
},
{
reg: '^#chatgpt(必应|Bing)切换',
fnc: 'changeBingTone',
@ -289,9 +294,9 @@ ${userSetting.useTTS === true ? '当前语音模式为' + Config.ttsMode : ''}`
const matchCommand = e.msg.match(/^#(chatgpt)?(vits|azure|vox)?语音(服务|角色列表)/)
if (matchCommand[3] === '服务') {
await this.reply(`当前支持vox、vits、azure语音服务可使用'#(vox|azure|vits)语音角色列表'查看支持的语音角色。
vits语音主要有赛马娘原神中文原神日语崩坏 3 的音色结果有随机性语调可能很奇怪
vox语音Voicevox 是一款由日本 DeNA 开发的语音合成软件它可以将文本转换为自然流畅的语音Voicevox 支持多种语言和声音可以用于制作各种语音内容如动画游戏广告等Voicevox 还提供了丰富的调整选项可以调整声音的音调速度音量等参数以满足不同需求除了桌面版软件外Voicevox 还提供了 Web 版本和 API 接口方便开发者在各种平台上使用
azure语音Azure 语音是微软 Azure 平台提供的一项语音服务它可以帮助开发者将语音转换为文本将文本转换为语音实现自然语言理解和对话等功能Azure 语音支持多种语言和声音可以用于构建各种语音应用程序如智能客服语音助手自动化电话系统等Azure 语音还提供了丰富的 API SDK方便开发者在各种平台上集成使用
@ -864,6 +869,7 @@ azure语音Azure 语音是微软 Azure 平台提供的一项语音服务,
await this.reply('当前已经是星火模式了')
}
}
async useAzureBasedSolution () {
let use = await redis.get('CHATGPT:USE')
if (use !== 'azure') {
@ -884,6 +890,16 @@ azure语音Azure 语音是微软 Azure 平台提供的一项语音服务,
}
}
async useQwenSolution () {
let use = await redis.get('CHATGPT:USE')
if (use !== 'qwen') {
await redis.set('CHATGPT:USE', 'qwen')
await this.reply('已切换到基于通义千问的解决方案')
} else {
await this.reply('当前已经是通义千问模式了')
}
}
async changeBingTone (e) {
let tongStyle = e.msg.replace(/^#chatgpt(必应|Bing)切换/, '')
if (!tongStyle) {
@ -1275,9 +1291,9 @@ Poe 模式会调用 Poe 中的 Claude-instant 进行对话。需要提供 Cookie
const viewHost = Config.serverHost ? `http://${Config.serverHost}/` : `http://${await getPublicIP()}:${Config.serverPort || 3321}/`
const otp = randomString(6)
await redis.set(
`CHATGPT:SERVER_QUICK`,
otp,
{ EX: 60000 }
'CHATGPT:SERVER_QUICK',
otp,
{ EX: 60000 }
)
await this.reply(`请登录http://tools.alcedogroup.com/login?server=${viewHost}&otp=${otp}`, true)
}

View file

@ -741,6 +741,53 @@ export function supportGuoba () {
bottomHelpMessage: '开启后将通过反代访问bard',
component: 'Switch'
},
{
label: '以下为通义千问API方式的配置',
component: 'Divider'
},
{
field: 'qwenApiKey',
label: '通义千问API Key',
component: 'InputPassword'
},
{
field: 'qwenModel',
label: '通义千问模型',
bottomHelpMessage: '指明需要调用的模型,目前可选 qwen-turbo 和 qwen-plus',
component: 'Input'
},
{
field: 'qwenTopP',
label: '通义千问topP',
bottomHelpMessage: '生成时核采样方法的概率阈值。例如取值为0.8时仅保留累计概率之和大于等于0.8的概率分布中的token作为随机采样的候选集。取值范围为0,1.0),取值越大,生成的随机性越高;取值越低,生成的随机性越低。默认值 0.5。注意取值不要大于等于1',
component: 'InputNumber'
},
{
field: 'qwenTopK',
label: '通义千问topK',
bottomHelpMessage: '生成时采样候选集的大小。例如取值为50时仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大生成的随机性越高取值越小生成的确定性越高。注意如果top_k的值大于100top_k将采用默认值0表示不启用top_k策略此时仅有top_p策略生效。',
component: 'InputNumber'
},
{
field: 'qwenSeed',
label: '通义千问Seed',
bottomHelpMessage: '生成时随机数的种子用于控制模型生成的随机性。如果使用相同的种子每次运行生成的结果都将相同当需要复现模型的生成结果时可以使用相同的种子。seed参数支持无符号64位整数类型。默认值 0, 表示每次随机生成',
component: 'InputNumber'
},
{
field: 'qwenTemperature',
label: '通义千问温度',
bottomHelpMessage: '用于控制随机性和多样性的程度。具体来说temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值使得更多的低概率词被选择生成结果更加多样化而较低的temperature值则会增强概率分布的峰值使得高概率词更容易被选择生成结果更加确定。\n' +
'\n' +
'取值范围: (0, 2),系统默认值1.0',
component: 'InputNumber'
},
{
field: 'qwenEnableSearch',
label: '通义千问允许搜索',
bottomHelpMessage: '生成时是否参考夸克搜索的结果。注意打开搜索并不意味着一定会使用搜索结果如果打开搜索模型会将搜索结果作为prompt进而“自行判断”是否生成结合搜索结果的文本默认为false',
component: 'Switch'
},
{
label: '以下为杂七杂八的配置',
component: 'Divider'

394
utils/alibaba/qwen-api.js Normal file
View file

@ -0,0 +1,394 @@
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
if (ar || !(i in from)) {
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
ar[i] = from[i];
}
}
return to.concat(ar || Array.prototype.slice.call(from));
};
import Keyv from 'keyv';
import pTimeout from 'p-timeout';
import QuickLRU from 'quick-lru';
import { v4 as uuidv4 } from 'uuid';
import * as tokenizer from './tokenizer.js';
import * as types from './types.js';
import globalFetch from 'node-fetch';
var CHATGPT_MODEL = 'qwen-turbo'; // qwen-plus
var USER_LABEL_DEFAULT = 'User';
var ASSISTANT_LABEL_DEFAULT = '同义千问';
var QwenApi = /** @class */ (function () {
/**
* Creates a new client wrapper around Qwen's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param opts
*/
function QwenApi(opts) {
var apiKey = opts.apiKey, _a = opts.apiBaseUrl, apiBaseUrl = _a === void 0 ? 'https://dashscope.aliyuncs.com/api/v1' : _a, _b = opts.debug, debug = _b === void 0 ? false : _b, messageStore = opts.messageStore, completionParams = opts.completionParams, parameters = opts.parameters, systemMessage = opts.systemMessage, getMessageById = opts.getMessageById, upsertMessage = opts.upsertMessage, _c = opts.fetch, fetch = _c === void 0 ? globalFetch : _c;
this._apiKey = apiKey;
this._apiBaseUrl = apiBaseUrl;
this._debug = !!debug;
this._fetch = fetch;
this._completionParams = __assign({ model: CHATGPT_MODEL, parameters: __assign({ top_p: 0.5, top_k: 50, temperature: 1.0, seed: 114514, enable_search: true, result_format: "text", incremental_output: false }, parameters) }, completionParams);
this._systemMessage = systemMessage;
if (this._systemMessage === undefined) {
var currentDate = new Date().toISOString().split('T')[0];
this._systemMessage = "You are ChatGPT, a large language model trained by Qwen. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ".concat(currentDate);
}
this._getMessageById = getMessageById !== null && getMessageById !== void 0 ? getMessageById : this._defaultGetMessageById;
this._upsertMessage = upsertMessage !== null && upsertMessage !== void 0 ? upsertMessage : this._defaultUpsertMessage;
if (messageStore) {
this._messageStore = messageStore;
}
else {
this._messageStore = new Keyv({
store: new QuickLRU({ maxSize: 10000 })
});
}
if (!this._apiKey) {
throw new Error('Qwen missing required apiKey');
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined');
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function');
}
}
/**
* Sends a message to the Qwen chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the Qwen chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param completionParams - Optional overrides to send to the [Qwen chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
QwenApi.prototype.sendMessage = function (text, opts, role) {
if (opts === void 0) { opts = {}; }
if (role === void 0) { role = 'user'; }
return __awaiter(this, void 0, void 0, function () {
var parentMessageId, _a, messageId, timeoutMs, completionParams, conversationId, abortSignal, abortController, message, latestQuestion, _b, messages, maxTokens, numTokens, result, responseP;
var _this = this;
return __generator(this, function (_c) {
switch (_c.label) {
case 0:
parentMessageId = opts.parentMessageId, _a = opts.messageId, messageId = _a === void 0 ? uuidv4() : _a, timeoutMs = opts.timeoutMs, completionParams = opts.completionParams, conversationId = opts.conversationId;
abortSignal = opts.abortSignal;
abortController = null;
if (timeoutMs && !abortSignal) {
abortController = new AbortController();
abortSignal = abortController.signal;
}
message = {
role: role,
id: messageId,
conversationId: conversationId,
parentMessageId: parentMessageId,
text: text,
};
latestQuestion = message;
return [4 /*yield*/, this._buildMessages(text, role, opts, completionParams)];
case 1:
_b = _c.sent(), messages = _b.messages, maxTokens = _b.maxTokens, numTokens = _b.numTokens;
console.log("maxTokens: ".concat(maxTokens, ", numTokens: ").concat(numTokens));
result = {
role: 'assistant',
id: uuidv4(),
conversationId: conversationId,
parentMessageId: messageId,
text: undefined,
};
this._completionParams.input = { messages: messages };
responseP = new Promise(function (resolve, reject) { return __awaiter(_this, void 0, void 0, function () {
var url, headers, body, res, reason, msg, error, response, err_1;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
url = "".concat(this._apiBaseUrl, "/services/aigc/text-generation/generation");
headers = {
'Content-Type': 'application/json',
Authorization: "Bearer ".concat(this._apiKey)
};
body = __assign(__assign({}, this._completionParams), completionParams);
if (this._debug) {
console.log(JSON.stringify(body));
}
if (this._debug) {
console.log("sendMessage (".concat(numTokens, " tokens)"), body);
}
_a.label = 1;
case 1:
_a.trys.push([1, 6, , 7]);
return [4 /*yield*/, this._fetch(url, {
method: 'POST',
headers: headers,
body: JSON.stringify(body),
signal: abortSignal
})];
case 2:
res = _a.sent();
if (!!res.ok) return [3 /*break*/, 4];
return [4 /*yield*/, res.text()];
case 3:
reason = _a.sent();
msg = "Qwen error ".concat(res.status || res.statusText, ": ").concat(reason);
error = new types.ChatGPTError(msg, { cause: res });
error.statusCode = res.status;
error.statusText = res.statusText;
return [2 /*return*/, reject(error)];
case 4: return [4 /*yield*/, res.json()];
case 5:
response = _a.sent();
if (this._debug) {
console.log(response);
}
if (response === null || response === void 0 ? void 0 : response.request_id) {
result.id = response.request_id;
}
result.detail = response;
result.text = response.output.text;
return [2 /*return*/, resolve(result)];
case 6:
err_1 = _a.sent();
return [2 /*return*/, reject(err_1)];
case 7: return [2 /*return*/];
}
});
}); }).then(function (message) { return __awaiter(_this, void 0, void 0, function () {
return __generator(this, function (_a) {
return [2 /*return*/, Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(function () { return message; })];
});
}); });
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;
responseP.cancel = function () {
abortController.abort();
};
}
return [2 /*return*/, pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'Qwen timed out waiting for response'
})];
}
else {
return [2 /*return*/, responseP];
}
return [2 /*return*/];
}
});
});
};
Object.defineProperty(QwenApi.prototype, "apiKey", {
get: function () {
return this._apiKey;
},
set: function (apiKey) {
this._apiKey = apiKey;
},
enumerable: false,
configurable: true
});
QwenApi.prototype._buildMessages = function (text, role, opts, completionParams) {
return __awaiter(this, void 0, void 0, function () {
var _a, systemMessage, parentMessageId, userLabel, assistantLabel, maxNumTokens, messages, systemMessageOffset, nextMessages, functionToken, numTokens, prompt_1, nextNumTokensEstimate, _i, nextMessages_1, m1, _b, isValidPrompt, parentMessage, parentMessageRole, maxTokens;
return __generator(this, function (_c) {
switch (_c.label) {
case 0:
_a = opts.systemMessage, systemMessage = _a === void 0 ? this._systemMessage : _a;
parentMessageId = opts.parentMessageId;
userLabel = USER_LABEL_DEFAULT;
assistantLabel = ASSISTANT_LABEL_DEFAULT;
maxNumTokens = 6000;
messages = [];
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
});
}
systemMessageOffset = messages.length;
nextMessages = text
? messages.concat([
{
role: role,
content: text
}
])
: messages;
functionToken = 0;
numTokens = functionToken;
_c.label = 1;
case 1:
prompt_1 = nextMessages
.reduce(function (prompt, message) {
switch (message.role) {
case 'system':
return prompt.concat(["Instructions:\n".concat(message.content)]);
case 'user':
return prompt.concat(["".concat(userLabel, ":\n").concat(message.content)]);
default:
return message.content ? prompt.concat(["".concat(assistantLabel, ":\n").concat(message.content)]) : prompt;
}
}, [])
.join('\n\n');
return [4 /*yield*/, this._getTokenCount(prompt_1)];
case 2:
nextNumTokensEstimate = _c.sent();
_i = 0, nextMessages_1 = nextMessages;
_c.label = 3;
case 3:
if (!(_i < nextMessages_1.length)) return [3 /*break*/, 6];
m1 = nextMessages_1[_i];
_b = nextNumTokensEstimate;
return [4 /*yield*/, this._getTokenCount('')];
case 4:
nextNumTokensEstimate = _b + _c.sent();
_c.label = 5;
case 5:
_i++;
return [3 /*break*/, 3];
case 6:
isValidPrompt = nextNumTokensEstimate + functionToken <= maxNumTokens;
if (prompt_1 && !isValidPrompt) {
return [3 /*break*/, 9];
}
messages = nextMessages;
numTokens = nextNumTokensEstimate + functionToken;
if (!isValidPrompt) {
return [3 /*break*/, 9];
}
if (!parentMessageId) {
return [3 /*break*/, 9];
}
return [4 /*yield*/, this._getMessageById(parentMessageId)];
case 7:
parentMessage = _c.sent();
if (!parentMessage) {
return [3 /*break*/, 9];
}
parentMessageRole = parentMessage.role || 'user';
nextMessages = nextMessages.slice(0, systemMessageOffset).concat(__spreadArray([
{
role: parentMessageRole,
content: parentMessage.text
}
], nextMessages.slice(systemMessageOffset), true));
parentMessageId = parentMessage.parentMessageId;
_c.label = 8;
case 8:
if (true) return [3 /*break*/, 1];
_c.label = 9;
case 9:
maxTokens = Math.max(1, Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens));
return [2 /*return*/, { messages: messages, maxTokens: maxTokens, numTokens: numTokens }];
}
});
});
};
QwenApi.prototype._getTokenCount = function (text) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
if (!text) {
return [2 /*return*/, 0];
}
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '');
return [2 /*return*/, tokenizer.encode(text).length];
});
});
};
QwenApi.prototype._defaultGetMessageById = function (id) {
return __awaiter(this, void 0, void 0, function () {
var res;
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.get(id)];
case 1:
res = _a.sent();
return [2 /*return*/, res];
}
});
});
};
QwenApi.prototype._defaultUpsertMessage = function (message) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.set(message.request_id, message)];
case 1:
_a.sent();
return [2 /*return*/];
}
});
});
};
return QwenApi;
}());
export { QwenApi };

382
utils/alibaba/qwen-api.ts Normal file
View file

@ -0,0 +1,382 @@
import Keyv from 'keyv'
import pTimeout from 'p-timeout'
import QuickLRU from 'quick-lru'
import { v4 as uuidv4 } from 'uuid'
import * as tokenizer from './tokenizer'
import * as types from './types'
import globalFetch from 'node-fetch'
import {qwen, Role} from "./types";
const CHATGPT_MODEL = 'qwen-turbo' // qwen-plus
const USER_LABEL_DEFAULT = 'User'
const ASSISTANT_LABEL_DEFAULT = '同义千问'
export class QwenApi {
protected _apiKey: string
protected _apiBaseUrl: string
protected _debug: boolean
protected _systemMessage: string
protected _completionParams: Omit<
types.qwen.CreateChatCompletionRequest,
'messages' | 'n'
>
protected _maxModelTokens: number
protected _maxResponseTokens: number
protected _fetch: types.FetchFn
protected _getMessageById: types.GetMessageByIdFunction
protected _upsertMessage: types.UpsertMessageFunction
protected _messageStore: Keyv<types.ChatMessage>
/**
* Creates a new client wrapper around Qwen's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param opts
*/
constructor(opts: types.QWenAPIOptions) {
const {
apiKey,
apiBaseUrl = 'https://dashscope.aliyuncs.com/api/v1',
debug = false,
messageStore,
completionParams,
parameters,
systemMessage,
getMessageById,
upsertMessage,
fetch = globalFetch
} = opts
this._apiKey = apiKey
this._apiBaseUrl = apiBaseUrl
this._debug = !!debug
this._fetch = fetch
this._completionParams = {
model: CHATGPT_MODEL,
parameters: {
top_p: 0.5,
top_k: 50,
temperature: 1.0,
seed: 114514,
enable_search: true,
result_format: "text",
incremental_output: false,
...parameters
},
...completionParams
}
this._systemMessage = systemMessage
if (this._systemMessage === undefined) {
const currentDate = new Date().toISOString().split('T')[0]
this._systemMessage = `You are ChatGPT, a large language model trained by Qwen. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ${currentDate}`
}
this._getMessageById = getMessageById ?? this._defaultGetMessageById
this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage
if (messageStore) {
this._messageStore = messageStore
} else {
this._messageStore = new Keyv<types.ChatMessage, any>({
store: new QuickLRU<string, types.ChatMessage>({ maxSize: 10000 })
})
}
if (!this._apiKey) {
throw new Error('Qwen missing required apiKey')
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined')
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function')
}
}
/**
* Sends a message to the Qwen chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the Qwen chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param completionParams - Optional overrides to send to the [Qwen chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
async sendMessage(
text: string,
opts: types.SendMessageOptions = {},
role: Role = 'user',
): Promise<types.ChatMessage> {
const {
parentMessageId,
messageId = uuidv4(),
timeoutMs,
completionParams,
conversationId
} = opts
let { abortSignal } = opts
let abortController: AbortController = null
if (timeoutMs && !abortSignal) {
abortController = new AbortController()
abortSignal = abortController.signal
}
const message: types.ChatMessage = {
role,
id: messageId,
conversationId,
parentMessageId,
text,
}
const latestQuestion = message
const { messages, maxTokens, numTokens } = await this._buildMessages(
text,
role,
opts,
completionParams
)
console.log(`maxTokens: ${maxTokens}, numTokens: ${numTokens}`)
const result: types.ChatMessage = {
role: 'assistant',
id: uuidv4(),
conversationId,
parentMessageId: messageId,
text: undefined,
}
this._completionParams.input = { messages }
const responseP = new Promise<types.ChatMessage>(
async (resolve, reject) => {
const url = `${this._apiBaseUrl}/services/aigc/text-generation/generation`
const headers = {
'Content-Type': 'application/json',
Authorization: `Bearer ${this._apiKey}`
}
const body = {
...this._completionParams,
...completionParams
}
if (this._debug) {
console.log(JSON.stringify(body))
}
if (this._debug) {
console.log(`sendMessage (${numTokens} tokens)`, body)
}
try {
const res = await this._fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: abortSignal
})
if (!res.ok) {
const reason = await res.text()
const msg = `Qwen error ${
res.status || res.statusText
}: ${reason}`
const error = new types.ChatGPTError(msg, { cause: res })
error.statusCode = res.status
error.statusText = res.statusText
return reject(error)
}
const response: types.qwen.CreateChatCompletionResponse =
await res.json()
if (this._debug) {
console.log(response)
}
if (response?.request_id) {
result.id = response.request_id
}
result.detail = response
result.text = response.output.text
return resolve(result)
} catch (err) {
return reject(err)
}
}
).then(async (message) => {
return Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(() => message)
})
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;(responseP as any).cancel = () => {
abortController.abort()
}
}
return pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'Qwen timed out waiting for response'
})
} else {
return responseP
}
}
get apiKey(): string {
return this._apiKey
}
set apiKey(apiKey: string) {
this._apiKey = apiKey
}
protected async _buildMessages(text: string, role: Role, opts: types.SendMessageOptions, completionParams: Partial<
Omit<qwen.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>) {
const { systemMessage = this._systemMessage } = opts
let { parentMessageId } = opts
const userLabel = USER_LABEL_DEFAULT
const assistantLabel = ASSISTANT_LABEL_DEFAULT
// fix number of qwen
const maxNumTokens = 6000
let messages: types.qwen.ChatCompletionRequestMessage[] = []
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
})
}
const systemMessageOffset = messages.length
let nextMessages = text
? messages.concat([
{
role,
content: text
}
])
: messages
let functionToken = 0
let numTokens = functionToken
do {
const prompt = nextMessages
.reduce((prompt, message) => {
switch (message.role) {
case 'system':
return prompt.concat([`Instructions:\n${message.content}`])
case 'user':
return prompt.concat([`${userLabel}:\n${message.content}`])
default:
return message.content ? prompt.concat([`${assistantLabel}:\n${message.content}`]) : prompt
}
}, [] as string[])
.join('\n\n')
let nextNumTokensEstimate = await this._getTokenCount(prompt)
for (const m1 of nextMessages) {
nextNumTokensEstimate += await this._getTokenCount('')
}
const isValidPrompt = nextNumTokensEstimate + functionToken <= maxNumTokens
if (prompt && !isValidPrompt) {
break
}
messages = nextMessages
numTokens = nextNumTokensEstimate + functionToken
if (!isValidPrompt) {
break
}
if (!parentMessageId) {
break
}
const parentMessage = await this._getMessageById(parentMessageId)
if (!parentMessage) {
break
}
const parentMessageRole = parentMessage.role || 'user'
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
{
role: parentMessageRole,
content: parentMessage.text
},
...nextMessages.slice(systemMessageOffset)
])
parentMessageId = parentMessage.parentMessageId
} while (true)
// Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens
// for the response.
const maxTokens = Math.max(
1,
Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)
)
return { messages, maxTokens, numTokens }
}
protected async _getTokenCount(text: string) {
if (!text) {
return 0
}
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '')
return tokenizer.encode(text).length
}
protected async _defaultGetMessageById(
id: string
): Promise<types.ChatMessage> {
const res = await this._messageStore.get(id)
return res
}
protected async _defaultUpsertMessage(
message: types.ChatMessage
): Promise<void> {
await this._messageStore.set(message.request_id, message)
}
}

View file

@ -0,0 +1,6 @@
import { getEncoding } from 'js-tiktoken';
// TODO: make this configurable
var tokenizer = getEncoding('cl100k_base');
export function encode(input) {
return new Uint32Array(tokenizer.encode(input));
}

View file

@ -0,0 +1,8 @@
import { getEncoding } from 'js-tiktoken'
// TODO: make this configurable
const tokenizer = getEncoding('cl100k_base')
export function encode(input: string): Uint32Array {
return new Uint32Array(tokenizer.encode(input))
}

View file

@ -0,0 +1,5 @@
{
"compilerOptions": {
"module": "es2020"
}
}

26
utils/alibaba/types.js Normal file
View file

@ -0,0 +1,26 @@
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
if (typeof b !== "function" && b !== null)
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var ChatGPTError = /** @class */ (function (_super) {
__extends(ChatGPTError, _super);
function ChatGPTError() {
return _super !== null && _super.apply(this, arguments) || this;
}
return ChatGPTError;
}(Error));
export { ChatGPTError };
export var qwen;
(function (qwen) {
})(qwen || (qwen = {}));

313
utils/alibaba/types.ts Normal file
View file

@ -0,0 +1,313 @@
import Keyv from 'keyv'
export type Role = 'user' | 'assistant' | 'system'
export type FetchFn = typeof fetch
export type QWenAPIOptions = {
apiKey: string
/** @defaultValue `'https://dashscope.aliyuncs.com/api/v1'` **/
apiBaseUrl?: string
apiOrg?: string
/** @defaultValue `false` **/
debug?: boolean
completionParams?: Partial<
Omit<qwen.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>
parameters?: qwen.QWenParameters,
systemMessage?: string
messageStore?: Keyv
getMessageById?: GetMessageByIdFunction
upsertMessage?: UpsertMessageFunction
fetch?: FetchFn
}
export type SendMessageOptions = {
/**
* function role name
*/
name?: string
messageId?: string
stream?: boolean
systemMessage?: string
parentMessageId?: string
conversationId?: string
timeoutMs?: number
onProgress?: (partialResponse: ChatMessage) => void
abortSignal?: AbortSignal
completionParams?: Partial<
Omit<qwen.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>
}
export type MessageActionType = 'next' | 'variant'
export type SendMessageBrowserOptions = {
conversationId?: string
parentMessageId?: string
messageId?: string
action?: MessageActionType
timeoutMs?: number
onProgress?: (partialResponse: ChatMessage) => void
abortSignal?: AbortSignal
}
export interface ChatMessage {
id: string
text: string
role: Role
parentMessageId?: string
conversationId?: string
detail?:
| qwen.CreateChatCompletionResponse
| CreateChatCompletionStreamResponse
}
export class ChatGPTError extends Error {
statusCode?: number
statusText?: string
isFinal?: boolean
accountId?: string
}
/** Returns a chat message from a store by it's ID (or null if not found). */
export type GetMessageByIdFunction = (id: string) => Promise<ChatMessage>
/** Upserts a chat message to a store. */
export type UpsertMessageFunction = (message: ChatMessage) => Promise<void>
export interface CreateChatCompletionStreamResponse
extends openai.CreateChatCompletionDeltaResponse {
usage: CreateCompletionStreamResponseUsage
}
export interface CreateCompletionStreamResponseUsage
extends openai.CreateCompletionResponseUsage {
estimated: true
}
/**
* https://chat.openapi.com/backend-api/conversation
*/
export type ConversationJSONBody = {
/**
* The action to take
*/
action: string
/**
* The ID of the conversation
*/
conversation_id?: string
/**
* Prompts to provide
*/
messages: Prompt[]
/**
* The model to use
*/
model: string
/**
* The parent message ID
*/
parent_message_id: string
}
export type Prompt = {
/**
* The content of the prompt
*/
content: PromptContent
/**
* The ID of the prompt
*/
id: string
/**
* The role played in the prompt
*/
role: Role
}
export type ContentType = 'text'
export type PromptContent = {
/**
* The content type of the prompt
*/
content_type: ContentType
/**
* The parts to the prompt
*/
parts: string[]
}
export type ConversationResponseEvent = {
message?: Message
conversation_id?: string
error?: string | null
}
export type Message = {
id: string
content: MessageContent
role: Role
user: string | null
create_time: string | null
update_time: string | null
end_turn: null
weight: number
recipient: string
metadata: MessageMetadata
}
export type MessageContent = {
content_type: string
parts: string[]
}
export type MessageMetadata = any
export namespace qwen {
export interface CreateChatCompletionDeltaResponse {
id: string
object: 'chat.completion.chunk'
created: number
model: string
choices: [
{
delta: {
role: Role
content?: string,
function_call?: {name: string, arguments: string}
}
index: number
finish_reason: string | null
}
]
}
/**
*
* @export
* @interface ChatCompletionRequestMessage
*/
export interface ChatCompletionRequestMessage {
/**
* The role of the author of this message.
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
role: ChatCompletionRequestMessageRoleEnum
/**
* The contents of the message
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
content: string
}
export declare const ChatCompletionRequestMessageRoleEnum: {
readonly System: 'system'
readonly User: 'user'
readonly Assistant: 'assistant'
}
export declare type ChatCompletionRequestMessageRoleEnum =
(typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum]
export interface QWenInput {
messages: Array<ChatCompletionRequestMessage>
}
export interface QWenParameters {
result_format: string
top_p: number
top_k: number
seed: number
temperature: number
enable_search: boolean
incremental_output: boolean
}
/**
*
* @export
* @interface CreateChatCompletionRequest
*/
export interface CreateChatCompletionRequest {
/**
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
* @type {string}
* @memberof CreateChatCompletionRequest
*/
model: string
/**
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
* @type {Array<ChatCompletionRequestMessage>}
* @memberof CreateChatCompletionRequest
*/
input?: QWenInput
parameters: QWenParameters
}
/**
*
* @export
* @interface CreateChatCompletionResponse
*/
export interface CreateChatCompletionResponse {
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
request_id: string
/**
*
* @type {QWenOutput}
* @memberof CreateChatCompletionResponse
*/
output: QWenOutput
/**
*
* @type {CreateCompletionResponseUsage}
* @memberof CreateChatCompletionResponse
*/
usage?: CreateCompletionResponseUsage
}
export interface QWenOutput {
finish_reason: string
text: string
}
/**
*
* @export
* @interface CreateCompletionResponseUsage
*/
export interface CreateCompletionResponseUsage {
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
input_tokens: number
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
output_tokens: number
}
}

View file

@ -73,12 +73,18 @@ export function randomString (length = 5) {
return str.substr(0, length)
}
export async function upsertMessage (message) {
await redis.set(`CHATGPT:MESSAGE:${message.id}`, JSON.stringify(message))
export async function upsertMessage (message, suffix = '') {
if (suffix) {
suffix = '_' + suffix
}
await redis.set(`CHATGPT:MESSAGE${suffix}:${message.id}`, JSON.stringify(message))
}
export async function getMessageById (id) {
let messageStr = await redis.get(`CHATGPT:MESSAGE:${id}`)
export async function getMessageById (id, suffix = '') {
if (suffix) {
suffix = '_' + suffix
}
let messageStr = await redis.get(`CHATGPT:MESSAGE${suffix}:${id}`)
return JSON.parse(messageStr)
}

View file

@ -154,7 +154,15 @@ const defaultConfig = {
claudeAIUA: 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
// trss配置
trssBotUin: '',
version: 'v2.7.6'
// 同义千问
qwenApiKey: '',
qwenModel: 'qwen-turbo',
qwenTopP: 0.5,
qwenTopK: 50,
qwenSeed: 0,
qwenTemperature: 1,
qwenEnableSearch: true,
version: 'v2.7.7'
}
const _path = process.cwd()
let config = {}