mirror of
https://github.com/ikechan8370/chatgpt-plugin.git
synced 2025-12-16 05:17:10 +00:00
feat: bym.js support multiple models
This commit is contained in:
parent
f7030e8427
commit
69ab6dcd28
12 changed files with 471 additions and 668 deletions
85
apps/bym.js
85
apps/bym.js
|
|
@ -19,6 +19,7 @@ import { SetTitleTool } from '../utils/tools/SetTitleTool.js'
|
|||
import { SerpTool } from '../utils/tools/SerpTool.js'
|
||||
import { SendMessageToSpecificGroupOrUserTool } from '../utils/tools/SendMessageToSpecificGroupOrUserTool.js'
|
||||
import { customSplitRegex, filterResponseChunk } from '../utils/text.js'
|
||||
import core from '../model/core.js'
|
||||
|
||||
export class bym extends plugin {
|
||||
constructor () {
|
||||
|
|
@ -44,32 +45,6 @@ export class bym extends plugin {
|
|||
if (!Config.enableBYM) {
|
||||
return false
|
||||
}
|
||||
let opt = {
|
||||
maxOutputTokens: 500,
|
||||
temperature: 1,
|
||||
replyPureTextCallback: msg => {
|
||||
msg = filterResponseChunk(msg)
|
||||
msg && e.reply(msg)
|
||||
}
|
||||
}
|
||||
let imgs = await getImg(e)
|
||||
if (!e.msg) {
|
||||
if (imgs && imgs.length > 0) {
|
||||
let image = imgs[0]
|
||||
const response = await fetch(image)
|
||||
const base64Image = Buffer.from(await response.arrayBuffer())
|
||||
opt.image = base64Image.toString('base64')
|
||||
e.msg = '[图片]'
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
if (!opt.image && imgs && imgs.length > 0) {
|
||||
let image = imgs[0]
|
||||
const response = await fetch(image)
|
||||
const base64Image = Buffer.from(await response.arrayBuffer())
|
||||
opt.image = base64Image.toString('base64')
|
||||
}
|
||||
let sender = e.sender.user_id
|
||||
let card = e.sender.card || e.sender.nickname
|
||||
let group = e.group_id
|
||||
|
|
@ -84,14 +59,14 @@ export class bym extends plugin {
|
|||
|
||||
let fuck = false
|
||||
let candidate = Config.bymPreset
|
||||
if (Config.bymFuckList?.find(i => e.msg.includes(i))) {
|
||||
if (Config.bymFuckList?.find(i => e.msg?.includes(i))) {
|
||||
fuck = true
|
||||
candidate = candidate + Config.bymFuckPrompt
|
||||
}
|
||||
if (prop < Config.bymRate) {
|
||||
logger.info('random chat hit')
|
||||
let chats = await getChatHistoryGroup(e, 20)
|
||||
opt.system = `你的名字是“${Config.assistantLabel}”,你在一个qq群里,群号是${group},当前和你说话的人群名片是${card}, qq号是${sender}, 请你结合用户的发言和聊天记录作出回应,要求表现得随性一点,最好参与讨论,混入其中。不要过分插科打诨,不知道说什么可以复读群友的话。要求你做搜索、发图、发视频和音乐等操作时要使用工具。不可以直接发[图片]这样蒙混过关。要求优先使用中文进行对话。如果此时不需要自己说话,可以只回复<EMPTY>` +
|
||||
let system = `你的名字是“${Config.assistantLabel}”,你在一个qq群里,群号是${group},当前和你说话的人群名片是${card}, qq号是${sender}, 请你结合用户的发言和聊天记录作出回应,要求表现得随性一点,最好参与讨论,混入其中。不要过分插科打诨,不知道说什么可以复读群友的话。要求你做搜索、发图、发视频和音乐等操作时要使用工具。不可以直接发[图片]这样蒙混过关。要求优先使用中文进行对话。如果此时不需要自己说话,可以只回复<EMPTY>` +
|
||||
candidate +
|
||||
'以下是聊天记录:' + chats
|
||||
.map(chat => {
|
||||
|
|
@ -101,44 +76,24 @@ export class bym extends plugin {
|
|||
.join('\n') +
|
||||
`\n你的回复应该尽可能简练,像人类一样随意,不要附加任何奇怪的东西,如聊天记录的格式(比如${Config.assistantLabel}:),禁止重复聊天记录。`
|
||||
|
||||
let client = new CustomGoogleGeminiClient({
|
||||
e,
|
||||
userId: e.sender.user_id,
|
||||
key: Config.getGeminiKey(),
|
||||
model: Config.geminiModel,
|
||||
baseUrl: Config.geminiBaseUrl,
|
||||
debug: Config.debug
|
||||
let rsp = await core.sendMessage(e.msg, {}, Config.bymMode, e, {
|
||||
enableSmart: true,
|
||||
system: {
|
||||
api: system,
|
||||
qwen: system,
|
||||
bing: system,
|
||||
claude: system,
|
||||
claude2: system,
|
||||
gemini: system
|
||||
},
|
||||
settings: {
|
||||
replyPureTextCallback: msg => {
|
||||
msg = filterResponseChunk(msg)
|
||||
msg && e.reply(msg)
|
||||
}
|
||||
}
|
||||
})
|
||||
/**
|
||||
* tools
|
||||
* @type {(AbstractTool)[]}
|
||||
*/
|
||||
const tools = [
|
||||
new SearchVideoTool(),
|
||||
new SerpImageTool(),
|
||||
new SearchMusicTool(),
|
||||
new SendAvatarTool(),
|
||||
new SendVideoTool(),
|
||||
new SendMusicTool(),
|
||||
new SendPictureTool(),
|
||||
new WebsiteTool(),
|
||||
new WeatherTool(),
|
||||
new SendMessageToSpecificGroupOrUserTool()
|
||||
]
|
||||
if (Config.azSerpKey) {
|
||||
tools.push(new SerpTool())
|
||||
}
|
||||
if (e.group.is_admin || e.group.is_owner) {
|
||||
tools.push(new EditCardTool())
|
||||
tools.push(new JinyanTool())
|
||||
tools.push(new KickOutTool())
|
||||
}
|
||||
if (e.group.is_owner) {
|
||||
tools.push(new SetTitleTool())
|
||||
}
|
||||
client.addTools(tools)
|
||||
// console.log(JSON.stringify(opt))
|
||||
let rsp = await client.sendMessage(e.msg, opt)
|
||||
// let rsp = await client.sendMessage(e.msg, opt)
|
||||
let text = rsp.text
|
||||
let texts = customSplitRegex(text, /(?<!\?)[。?\n](?!\?)/, 3)
|
||||
// let texts = text.split(/(?<!\?)[。?\n](?!\?)/, 3)
|
||||
|
|
|
|||
|
|
@ -1,196 +0,0 @@
|
|||
import { BaseClient } from './BaseClient.js'
|
||||
import slack from '@slack/bolt'
|
||||
// import { limitString } from '../utils/common.js'
|
||||
// import common from '../../../lib/common/common.js'
|
||||
import { getProxy } from '../utils/proxy.js'
|
||||
const proxy = getProxy()
|
||||
const common = {
|
||||
sleep: function (ms) {
|
||||
return new Promise((resolve) => setTimeout(resolve, ms))
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 失败品
|
||||
*/
|
||||
export class SlackCozeClient {
|
||||
constructor (props) {
|
||||
this.config = props
|
||||
const {
|
||||
slackSigningSecret, slackBotUserToken, slackUserToken, proxy: proxyAddr, debug
|
||||
} = props
|
||||
if (slackSigningSecret && slackBotUserToken && slackUserToken) {
|
||||
let option = {
|
||||
signingSecret: slackSigningSecret,
|
||||
token: slackBotUserToken,
|
||||
// socketMode: true,
|
||||
appToken: slackUserToken
|
||||
// port: 45912
|
||||
}
|
||||
if (proxyAddr) {
|
||||
option.agent = proxy(proxyAddr)
|
||||
}
|
||||
option.logLevel = debug ? 'debug' : 'info'
|
||||
this.app = new slack.App(option)
|
||||
} else {
|
||||
throw new Error('未配置Slack信息')
|
||||
}
|
||||
}
|
||||
|
||||
async sendMessage (prompt, e, t = 0) {
|
||||
if (t > 10) {
|
||||
return 'claude 未响应'
|
||||
}
|
||||
if (prompt.length > 3990) {
|
||||
logger.warn('消息长度大于slack限制,长度剪切至3990')
|
||||
function limitString (str, maxLength, addDots = true) {
|
||||
if (str.length <= maxLength) {
|
||||
return str
|
||||
} else {
|
||||
if (addDots) {
|
||||
return str.slice(0, maxLength) + '...'
|
||||
} else {
|
||||
return str.slice(0, maxLength)
|
||||
}
|
||||
}
|
||||
}
|
||||
prompt = limitString(prompt, 3990, false)
|
||||
}
|
||||
let channel
|
||||
let qq = e.sender.user_id
|
||||
if (this.config.slackCozeSpecifiedChannel) {
|
||||
channel = { id: this.config.slackCozeSpecifiedChannel }
|
||||
} else {
|
||||
let channels = await this.app.client.conversations.list({
|
||||
token: this.config.slackUserToken,
|
||||
types: 'public_channel,private_channel'
|
||||
})
|
||||
channel = channels.channels.filter(c => c.name === 'coze' + qq)
|
||||
if (!channel || channel.length === 0) {
|
||||
let createChannelResponse = await this.app.client.conversations.create({
|
||||
token: this.config.slackUserToken,
|
||||
name: 'coze' + qq,
|
||||
is_private: true
|
||||
})
|
||||
channel = createChannelResponse.channel
|
||||
await this.app.client.conversations.invite({
|
||||
token: this.config.slackUserToken,
|
||||
channel: channel.id,
|
||||
users: this.config.slackCozeUserId
|
||||
})
|
||||
await common.sleep(1000)
|
||||
} else {
|
||||
channel = channel[0]
|
||||
}
|
||||
}
|
||||
let conversationId = await redis.get(`CHATGPT:SLACK_COZE_CONVERSATION:${qq}`)
|
||||
let toSend = `<@${this.config.slackCozeUserId}> ${prompt}`
|
||||
if (!conversationId) {
|
||||
let sendResponse = await this.app.client.chat.postMessage({
|
||||
as_user: true,
|
||||
text: toSend,
|
||||
token: this.config.slackUserToken,
|
||||
channel: channel.id
|
||||
})
|
||||
let ts = sendResponse.ts
|
||||
let response = toSend
|
||||
let tryTimes = 0
|
||||
// 发完先等3喵
|
||||
await common.sleep(3000)
|
||||
while (response === toSend) {
|
||||
let replies = await this.app.client.conversations.replies({
|
||||
token: this.config.slackUserToken,
|
||||
channel: channel.id,
|
||||
limit: 1000,
|
||||
ts
|
||||
})
|
||||
await await redis.set(`CHATGPT:SLACK_COZE_CONVERSATION:${qq}`, `${ts}`)
|
||||
if (replies.messages.length > 0) {
|
||||
let formalMessages = replies.messages
|
||||
let reply = formalMessages[formalMessages.length - 1]
|
||||
if (!reply.text.startsWith(`<@${this.config.slackCozeUserId}>`)) {
|
||||
response = reply.text
|
||||
if (this.config.debug) {
|
||||
let text = response.replace('_Typing…_', '')
|
||||
if (text) {
|
||||
logger.info(response.replace('_Typing…_', ''))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
await common.sleep(2000)
|
||||
tryTimes++
|
||||
if (tryTimes > 30 && response === toSend) {
|
||||
// 过了60秒还没任何回复,就重新发一下试试
|
||||
logger.warn('claude没有响应,重试中')
|
||||
return await this.sendMessage(prompt, e, t + 1)
|
||||
}
|
||||
}
|
||||
return response
|
||||
} else {
|
||||
let toSend = `<@${this.config.slackCozeUserId}> ${prompt}`
|
||||
let postResponse = await this.app.client.chat.postMessage({
|
||||
as_user: true,
|
||||
text: toSend,
|
||||
token: this.config.slackUserToken,
|
||||
channel: channel.id,
|
||||
thread_ts: conversationId
|
||||
})
|
||||
let postTs = postResponse.ts
|
||||
let response = toSend
|
||||
let tryTimes = 0
|
||||
// 发完先等3喵
|
||||
await common.sleep(3000)
|
||||
while (response === toSend) {
|
||||
let replies = await this.app.client.conversations.replies({
|
||||
token: this.config.slackUserToken,
|
||||
channel: channel.id,
|
||||
limit: 1000,
|
||||
ts: conversationId,
|
||||
oldest: postTs
|
||||
})
|
||||
|
||||
if (replies.messages.length > 0) {
|
||||
let formalMessages = replies.messages
|
||||
let reply = formalMessages[formalMessages.length - 1]
|
||||
if (!reply.text.startsWith(`<@${this.config.slackCozeUserId}>`)) {
|
||||
response = reply.text
|
||||
if (this.config.debug) {
|
||||
let text = response.replace('_Typing…_', '')
|
||||
if (text) {
|
||||
logger.info(response.replace('_Typing…_', ''))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
await common.sleep(2000)
|
||||
tryTimes++
|
||||
if (tryTimes > 30 && response === '_Typing…_') {
|
||||
// 过了60秒还没任何回复,就重新发一下试试
|
||||
logger.warn('claude没有响应,重试中')
|
||||
return await this.sendMessage(prompt, e, t + 1)
|
||||
}
|
||||
}
|
||||
return response
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export class CozeSlackClient extends BaseClient {
|
||||
constructor (props) {
|
||||
super(props)
|
||||
this.supportFunction = false
|
||||
this.debug = props.debug
|
||||
this.slackCient = new SlackCozeClient()
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param text
|
||||
* @param {{conversationId: string?, stream: boolean?, onProgress: function?, image: string?}} opt
|
||||
* @returns {Promise<{conversationId: string?, parentMessageId: string?, text: string, id: string, image: string?}>}
|
||||
*/
|
||||
async sendMessage (text, opt = {}) {
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -979,6 +979,7 @@ export function supportGuoba () {
|
|||
label: '合成emoji的API地址,默认谷歌厨房',
|
||||
component: 'Input'
|
||||
},
|
||||
|
||||
{
|
||||
field: 'bymRate',
|
||||
label: '伪人模式触发概率,单位为%',
|
||||
|
|
@ -988,6 +989,20 @@ export function supportGuoba () {
|
|||
max: 100
|
||||
}
|
||||
},
|
||||
{
|
||||
field: 'bymMode',
|
||||
label: '伪人模型',
|
||||
component: 'Select',
|
||||
componentProps: {
|
||||
options: [
|
||||
{ label: 'Gemini(推荐)', value: 'gemini' },
|
||||
{ label: '通义千问', value: 'qwen' },
|
||||
{ label: 'OpenAI API', value: 'api' },
|
||||
{ label: '星火', value: 'xh' },
|
||||
{ label: 'Claude', value: 'claude' }
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
field: 'bymPreset',
|
||||
label: '伪人模式的额外预设',
|
||||
|
|
@ -1002,7 +1017,7 @@ export function supportGuoba () {
|
|||
field: 'bymFuckList',
|
||||
label: '伪人模式反击的触发词',
|
||||
bottomHelpMessage: '请输入用于伪人模式下骂人反击的触发词,每个词组将被单独处理',
|
||||
component: "GTags",
|
||||
component: 'GTags',
|
||||
componentProps: {
|
||||
placeholder: '请输入反击触发词',
|
||||
allowAdd: true,
|
||||
|
|
@ -1012,11 +1027,11 @@ export function supportGuoba () {
|
|||
content: '添加新的反击触发词',
|
||||
okText: '添加',
|
||||
rules: [
|
||||
{ required: true, message: '触发词不能为空' },
|
||||
],
|
||||
{ required: true, message: '触发词不能为空' }
|
||||
]
|
||||
},
|
||||
valueParser: ((value) => value.split(',') || []),
|
||||
},
|
||||
valueParser: (value) => value.split(',') || []
|
||||
}
|
||||
},
|
||||
{
|
||||
label: '以下为Azure chatGPT的配置',
|
||||
|
|
|
|||
|
|
@ -59,7 +59,6 @@ import { ChatGPTAPI } from '../utils/openai/chatgpt-api.js'
|
|||
import { newFetch } from '../utils/proxy.js'
|
||||
import { ChatGLM4Client } from '../client/ChatGLM4Client.js'
|
||||
import { QwenApi } from '../utils/alibaba/qwen-api.js'
|
||||
import OpenAI from 'openai';
|
||||
|
||||
const roleMap = {
|
||||
owner: 'group owner',
|
||||
|
|
@ -120,7 +119,20 @@ async function handleSystem (e, system) {
|
|||
}
|
||||
|
||||
class Core {
|
||||
async sendMessage (prompt, conversation = {}, use, e) {
|
||||
async sendMessage (prompt, conversation = {}, use, e, opt = {
|
||||
enableSmart: Config.smartMode,
|
||||
system: {
|
||||
api: Config.promptPrefixOverride,
|
||||
qwen: Config.promptPrefixOverride,
|
||||
bing: Config.sydney,
|
||||
claude: Config.claudeSystemPrompt,
|
||||
claude2: Config.claudeSystemPrompt,
|
||||
gemini: Config.geminiPrompt
|
||||
},
|
||||
settings: {
|
||||
replyPureTextCallback: undefined
|
||||
}
|
||||
}) {
|
||||
if (!conversation) {
|
||||
conversation = {
|
||||
timeoutMs: Config.defaultTimeoutMs
|
||||
|
|
@ -444,16 +456,6 @@ class Core {
|
|||
logger.warn('发送语音失败', err)
|
||||
})
|
||||
return sendMessageResult
|
||||
} else if (use === 'chatglm') {
|
||||
const cacheOptions = {
|
||||
namespace: 'chatglm_6b',
|
||||
store: new KeyvFile({ filename: 'cache.json' })
|
||||
}
|
||||
this.chatGPTApi = new ChatGLMClient({
|
||||
user: e.sender.user_id,
|
||||
cache: cacheOptions
|
||||
})
|
||||
return await this.chatGPTApi.sendMessage(prompt, conversation)
|
||||
} else if (use === 'claude') {
|
||||
// slack已经不可用,移除
|
||||
let keys = Config.claudeApiKey?.split(/[,;]/).map(key => key.trim()).filter(key => key)
|
||||
|
|
@ -469,11 +471,11 @@ class Core {
|
|||
baseUrl: Config.claudeApiBaseUrl
|
||||
// temperature: Config.claudeApiTemperature || 0.5
|
||||
})
|
||||
let opt = {
|
||||
let option = {
|
||||
stream: false,
|
||||
parentMessageId: conversation.parentMessageId,
|
||||
conversationId: conversation.conversationId,
|
||||
system: Config.claudeSystemPrompt
|
||||
system: opt.system.claude
|
||||
}
|
||||
let img = await getImg(e)
|
||||
if (img && img.length > 0) {
|
||||
|
|
@ -482,7 +484,7 @@ class Core {
|
|||
opt.image = base64Image
|
||||
}
|
||||
try {
|
||||
let rsp = await client.sendMessage(prompt, opt)
|
||||
let rsp = await client.sendMessage(prompt, option)
|
||||
return rsp
|
||||
} catch (err) {
|
||||
errorMessage = err.message
|
||||
|
|
@ -623,7 +625,7 @@ class Core {
|
|||
debug: Config.debug,
|
||||
upsertMessage: um,
|
||||
getMessageById: gm,
|
||||
systemMessage: `You are ${Config.assistantLabel} ${useCast?.api || Config.promptPrefixOverride || defaultPropmtPrefix}
|
||||
systemMessage: `You are ${Config.assistantLabel} ${useCast?.api || opt.system.qwen || defaultPropmtPrefix}
|
||||
Current date: ${currentDate}`,
|
||||
completionParams,
|
||||
assistantLabel: Config.assistantLabel,
|
||||
|
|
@ -640,7 +642,7 @@ class Core {
|
|||
}
|
||||
option = Object.assign(option, conversation)
|
||||
}
|
||||
if (Config.smartMode) {
|
||||
if (opt.enableSmart) {
|
||||
let isAdmin = ['admin', 'owner'].includes(e.sender.role)
|
||||
let sender = e.sender.user_id
|
||||
const {
|
||||
|
|
@ -673,7 +675,7 @@ class Core {
|
|||
logger.info(msg)
|
||||
while (msg.functionCall) {
|
||||
if (msg.text) {
|
||||
await this.reply(msg.text.replace('\n\n\n', '\n'))
|
||||
await e.reply(msg.text.replace('\n\n\n', '\n'))
|
||||
}
|
||||
let {
|
||||
name,
|
||||
|
|
@ -748,7 +750,7 @@ class Core {
|
|||
let buffer = fs.readFileSync(outputLoc)
|
||||
option.image = buffer.toString('base64')
|
||||
}
|
||||
if (Config.smartMode) {
|
||||
if (opt.enableSmart) {
|
||||
/**
|
||||
* @type {AbstractTool[]}
|
||||
*/
|
||||
|
|
@ -801,7 +803,7 @@ class Core {
|
|||
}
|
||||
client.addTools(tools)
|
||||
}
|
||||
let system = Config.geminiPrompt
|
||||
let system = opt.system.gemini
|
||||
if (Config.enableGroupContext && e.isGroup) {
|
||||
let chats = await getChatHistoryGroup(e, Config.groupContextLength)
|
||||
const namePlaceholder = '[name]'
|
||||
|
|
@ -827,11 +829,11 @@ class Core {
|
|||
system += 'If I ask you to generate music or write songs, you need to reply with information suitable for Suno to generate music. Please use keywords such as Verse, Chorus, Bridge, Outro, and End to segment the lyrics, such as [Verse 1], The returned message is in JSON format, with a structure of ```json{"option": "Suno", "tags": "style", "title": "title of the song", "lyrics": "lyrics"}```.'
|
||||
}
|
||||
option.system = system
|
||||
option.replyPureTextCallback = async (msg) => {
|
||||
option.replyPureTextCallback = opt.settings.replyPureTextCallback || (async (msg) => {
|
||||
if (msg) {
|
||||
await e.reply(msg, true)
|
||||
}
|
||||
}
|
||||
})
|
||||
return await client.sendMessage(prompt, option)
|
||||
} else if (use === 'chatglm4') {
|
||||
const client = new ChatGLM4Client({
|
||||
|
|
@ -849,7 +851,7 @@ class Core {
|
|||
completionParams.model = Config.model
|
||||
}
|
||||
const currentDate = new Date().toISOString().split('T')[0]
|
||||
let promptPrefix = `You are ${Config.assistantLabel} ${useCast?.api || Config.promptPrefixOverride || defaultPropmtPrefix}
|
||||
let promptPrefix = `You are ${Config.assistantLabel} ${useCast?.api || opt.system.api || defaultPropmtPrefix}
|
||||
Current date: ${currentDate}`
|
||||
let maxModelTokens = getMaxModelTokens(completionParams.model)
|
||||
// let system = promptPrefix
|
||||
|
|
@ -900,7 +902,7 @@ class Core {
|
|||
}
|
||||
option = Object.assign(option, conversation)
|
||||
}
|
||||
if (Config.smartMode) {
|
||||
if (opt.enableSmart) {
|
||||
let isAdmin = ['admin', 'owner'].includes(e.sender.role)
|
||||
let sender = e.sender.user_id
|
||||
const {
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@ var QwenApi = /** @class */ (function () {
|
|||
this._apiKey = apiKey;
|
||||
this._apiBaseUrl = apiBaseUrl;
|
||||
this._debug = !!debug;
|
||||
// @ts-ignore
|
||||
this._fetch = fetch;
|
||||
this._completionParams = __assign({ model: CHATGPT_MODEL, parameters: __assign({ top_p: 0.5, top_k: 50, temperature: 1.0, seed: 114514, enable_search: true, result_format: "message", incremental_output: false }, parameters) }, completionParams);
|
||||
this._systemMessage = systemMessage;
|
||||
|
|
@ -167,9 +168,9 @@ var QwenApi = /** @class */ (function () {
|
|||
completionParams.input = { messages: messages };
|
||||
responseP = new Promise(function (resolve, reject) { return __awaiter(_this, void 0, void 0, function () {
|
||||
var url, headers, body, res, reason, msg, error, response, err_1;
|
||||
var _a, _b, _c, _d, _e;
|
||||
return __generator(this, function (_f) {
|
||||
switch (_f.label) {
|
||||
var _a, _b, _c, _d, _e, _f, _g, _h, _j;
|
||||
return __generator(this, function (_k) {
|
||||
switch (_k.label) {
|
||||
case 0:
|
||||
url = "".concat(this._apiBaseUrl, "/services/aigc/text-generation/generation");
|
||||
headers = {
|
||||
|
|
@ -183,9 +184,9 @@ var QwenApi = /** @class */ (function () {
|
|||
if (this._debug) {
|
||||
console.log("sendMessage (".concat(numTokens, " tokens)"), body);
|
||||
}
|
||||
_f.label = 1;
|
||||
_k.label = 1;
|
||||
case 1:
|
||||
_f.trys.push([1, 6, , 7]);
|
||||
_k.trys.push([1, 6, , 7]);
|
||||
return [4 /*yield*/, this._fetch(url, {
|
||||
method: 'POST',
|
||||
headers: headers,
|
||||
|
|
@ -193,25 +194,26 @@ var QwenApi = /** @class */ (function () {
|
|||
signal: abortSignal
|
||||
})];
|
||||
case 2:
|
||||
res = _f.sent();
|
||||
res = _k.sent();
|
||||
if (!!res.ok) return [3 /*break*/, 4];
|
||||
return [4 /*yield*/, res.text()];
|
||||
case 3:
|
||||
reason = _f.sent();
|
||||
reason = _k.sent();
|
||||
msg = "Qwen error ".concat(res.status || res.statusText, ": ").concat(reason);
|
||||
error = new types.ChatGPTError(msg, { cause: res });
|
||||
error = new types.ChatGPTError(msg);
|
||||
error.statusCode = res.status;
|
||||
error.statusText = res.statusText;
|
||||
return [2 /*return*/, reject(error)];
|
||||
case 4: return [4 /*yield*/, res.json()];
|
||||
case 5:
|
||||
response = _f.sent();
|
||||
response = _k.sent();
|
||||
if (this._debug) {
|
||||
console.log(response);
|
||||
}
|
||||
if (((_e = (_d = (_c = (_b = (_a = response.output) === null || _a === void 0 ? void 0 : _a.choices) === null || _b === void 0 ? void 0 : _b[0]) === null || _c === void 0 ? void 0 : _c.message) === null || _d === void 0 ? void 0 : _d.tool_calls) === null || _e === void 0 ? void 0 : _e.length) > 0) {
|
||||
// function call result
|
||||
result.functionCall = response.output.choices[0].message.tool_calls[0].function;
|
||||
result.toolCalls = (_j = (_h = (_g = (_f = response.output) === null || _f === void 0 ? void 0 : _f.choices) === null || _g === void 0 ? void 0 : _g[0]) === null || _h === void 0 ? void 0 : _h.message) === null || _j === void 0 ? void 0 : _j.tool_calls;
|
||||
}
|
||||
if (response === null || response === void 0 ? void 0 : response.request_id) {
|
||||
result.id = response.request_id;
|
||||
|
|
@ -221,7 +223,7 @@ var QwenApi = /** @class */ (function () {
|
|||
result.conversation = messages;
|
||||
return [2 /*return*/, resolve(result)];
|
||||
case 6:
|
||||
err_1 = _f.sent();
|
||||
err_1 = _k.sent();
|
||||
return [2 /*return*/, reject(err_1)];
|
||||
case 7: return [2 /*return*/];
|
||||
}
|
||||
|
|
@ -257,9 +259,11 @@ var QwenApi = /** @class */ (function () {
|
|||
});
|
||||
};
|
||||
Object.defineProperty(QwenApi.prototype, "apiKey", {
|
||||
// @ts-ignore
|
||||
get: function () {
|
||||
return this._apiKey;
|
||||
},
|
||||
// @ts-ignore
|
||||
set: function (apiKey) {
|
||||
this._apiKey = apiKey;
|
||||
},
|
||||
|
|
@ -276,7 +280,7 @@ var QwenApi = /** @class */ (function () {
|
|||
parentMessageId = opts.parentMessageId;
|
||||
userLabel = USER_LABEL_DEFAULT;
|
||||
assistantLabel = ASSISTANT_LABEL_DEFAULT;
|
||||
maxNumTokens = 6000;
|
||||
maxNumTokens = 32000;
|
||||
messages = [];
|
||||
if (systemMessage) {
|
||||
messages.push({
|
||||
|
|
@ -350,7 +354,8 @@ var QwenApi = /** @class */ (function () {
|
|||
{
|
||||
role: parentMessageRole,
|
||||
content: parentMessage.functionCall ? parentMessage.functionCall.arguments : parentMessage.text,
|
||||
name: parentMessage.functionCall ? parentMessage.functionCall.name : undefined
|
||||
name: parentMessage.functionCall ? parentMessage.functionCall.name : undefined,
|
||||
tool_calls: parentMessage.toolCalls
|
||||
}
|
||||
], nextMessages.slice(systemMessageOffset), true));
|
||||
parentMessageId = parentMessage.parentMessageId;
|
||||
|
|
@ -394,7 +399,7 @@ var QwenApi = /** @class */ (function () {
|
|||
return __awaiter(this, void 0, void 0, function () {
|
||||
return __generator(this, function (_a) {
|
||||
switch (_a.label) {
|
||||
case 0: return [4 /*yield*/, this._messageStore.set(message.request_id, message)];
|
||||
case 0: return [4 /*yield*/, this._messageStore.set(message.id, message)];
|
||||
case 1:
|
||||
_a.sent();
|
||||
return [2 /*return*/];
|
||||
|
|
|
|||
|
|
@ -1,10 +1,15 @@
|
|||
// @ts-ignore
|
||||
import Keyv from 'keyv'
|
||||
// @ts-ignore
|
||||
import pTimeout from 'p-timeout'
|
||||
// @ts-ignore
|
||||
import QuickLRU from 'quick-lru'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
// @ts-ignore
|
||||
import {v4 as uuidv4} from 'uuid'
|
||||
|
||||
import * as tokenizer from './tokenizer'
|
||||
import * as types from './types'
|
||||
// @ts-ignore
|
||||
import globalFetch from 'node-fetch'
|
||||
import {qwen, Role} from "./types";
|
||||
import {openai} from "../openai/types";
|
||||
|
|
@ -15,381 +20,386 @@ const USER_LABEL_DEFAULT = 'User'
|
|||
const ASSISTANT_LABEL_DEFAULT = '通义千问'
|
||||
|
||||
export class QwenApi {
|
||||
protected _apiKey: string
|
||||
protected _apiBaseUrl: string
|
||||
protected _debug: boolean
|
||||
protected _apiKey: string
|
||||
protected _apiBaseUrl: string
|
||||
protected _debug: boolean
|
||||
|
||||
protected _systemMessage: string
|
||||
protected _completionParams: Omit<
|
||||
types.qwen.CreateChatCompletionRequest,
|
||||
'messages' | 'n'
|
||||
>
|
||||
protected _maxModelTokens: number
|
||||
protected _maxResponseTokens: number
|
||||
protected _fetch: types.FetchFn
|
||||
protected _systemMessage: string
|
||||
protected _completionParams: Omit<
|
||||
types.qwen.CreateChatCompletionRequest,
|
||||
'messages' | 'n'
|
||||
>
|
||||
protected _maxModelTokens: number
|
||||
protected _maxResponseTokens: number
|
||||
protected _fetch: types.FetchFn
|
||||
|
||||
protected _getMessageById: types.GetMessageByIdFunction
|
||||
protected _upsertMessage: types.UpsertMessageFunction
|
||||
protected _getMessageById: types.GetMessageByIdFunction
|
||||
protected _upsertMessage: types.UpsertMessageFunction
|
||||
|
||||
protected _messageStore: Keyv<types.ChatMessage>
|
||||
protected _messageStore: Keyv<types.ChatMessage>
|
||||
|
||||
/**
|
||||
* Creates a new client wrapper around Qwen's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
|
||||
*
|
||||
* @param opts
|
||||
*/
|
||||
constructor(opts: types.QWenAPIOptions) {
|
||||
const {
|
||||
apiKey,
|
||||
apiBaseUrl = 'https://dashscope.aliyuncs.com/api/v1',
|
||||
debug = false,
|
||||
messageStore,
|
||||
completionParams,
|
||||
parameters,
|
||||
systemMessage,
|
||||
getMessageById,
|
||||
upsertMessage,
|
||||
fetch = globalFetch
|
||||
} = opts
|
||||
/**
|
||||
* Creates a new client wrapper around Qwen's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
|
||||
*
|
||||
* @param opts
|
||||
*/
|
||||
constructor(opts: types.QWenAPIOptions) {
|
||||
const {
|
||||
apiKey,
|
||||
apiBaseUrl = 'https://dashscope.aliyuncs.com/api/v1',
|
||||
debug = false,
|
||||
messageStore,
|
||||
completionParams,
|
||||
parameters,
|
||||
systemMessage,
|
||||
getMessageById,
|
||||
upsertMessage,
|
||||
fetch = globalFetch
|
||||
} = opts
|
||||
|
||||
this._apiKey = apiKey
|
||||
this._apiBaseUrl = apiBaseUrl
|
||||
this._debug = !!debug
|
||||
this._fetch = fetch
|
||||
this._apiKey = apiKey
|
||||
this._apiBaseUrl = apiBaseUrl
|
||||
this._debug = !!debug
|
||||
// @ts-ignore
|
||||
this._fetch = fetch
|
||||
|
||||
this._completionParams = {
|
||||
model: CHATGPT_MODEL,
|
||||
parameters: {
|
||||
top_p: 0.5,
|
||||
top_k: 50,
|
||||
temperature: 1.0,
|
||||
seed: 114514,
|
||||
enable_search: true,
|
||||
result_format: "message",
|
||||
incremental_output: false,
|
||||
...parameters
|
||||
},
|
||||
...completionParams
|
||||
}
|
||||
|
||||
this._systemMessage = systemMessage
|
||||
|
||||
if (this._systemMessage === undefined) {
|
||||
const currentDate = new Date().toISOString().split('T')[0]
|
||||
this._systemMessage = `You are Qwen, a large language model trained by Alibaba Cloud. Answer as concisely as possible.\nCurrent date: ${currentDate}`
|
||||
}
|
||||
|
||||
this._getMessageById = getMessageById ?? this._defaultGetMessageById
|
||||
this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage
|
||||
|
||||
if (messageStore) {
|
||||
this._messageStore = messageStore
|
||||
} else {
|
||||
this._messageStore = new Keyv<types.ChatMessage, any>({
|
||||
store: new QuickLRU<string, types.ChatMessage>({ maxSize: 10000 })
|
||||
})
|
||||
}
|
||||
|
||||
if (!this._apiKey) {
|
||||
throw new Error('Qwen missing required apiKey')
|
||||
}
|
||||
|
||||
if (!this._fetch) {
|
||||
throw new Error('Invalid environment; fetch is not defined')
|
||||
}
|
||||
|
||||
if (typeof this._fetch !== 'function') {
|
||||
throw new Error('Invalid "fetch" is not a function')
|
||||
}
|
||||
this._completionParams = {
|
||||
model: CHATGPT_MODEL,
|
||||
parameters: {
|
||||
top_p: 0.5,
|
||||
top_k: 50,
|
||||
temperature: 1.0,
|
||||
seed: 114514,
|
||||
enable_search: true,
|
||||
result_format: "message",
|
||||
incremental_output: false,
|
||||
...parameters
|
||||
},
|
||||
...completionParams
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a message to the Qwen chat completions endpoint, waits for the response
|
||||
* to resolve, and returns the response.
|
||||
*
|
||||
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
|
||||
*
|
||||
* If you want to receive a stream of partial responses, use `opts.onProgress`.
|
||||
*
|
||||
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the Qwen chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
|
||||
*
|
||||
* @param message - The prompt message to send
|
||||
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
|
||||
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
|
||||
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
|
||||
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
|
||||
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
|
||||
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
|
||||
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
|
||||
* @param opts.completionParams - Optional overrides to send to the [Qwen chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
|
||||
*
|
||||
* @returns The response from ChatGPT
|
||||
*/
|
||||
async sendMessage(
|
||||
text: string,
|
||||
opts: types.SendMessageOptions = {},
|
||||
role: Role = 'user',
|
||||
): Promise<types.ChatMessage> {
|
||||
let {
|
||||
parentMessageId,
|
||||
messageId = uuidv4(),
|
||||
timeoutMs,
|
||||
completionParams,
|
||||
conversationId
|
||||
} = opts
|
||||
this._systemMessage = systemMessage
|
||||
|
||||
let { abortSignal } = opts
|
||||
if (this._systemMessage === undefined) {
|
||||
const currentDate = new Date().toISOString().split('T')[0]
|
||||
this._systemMessage = `You are Qwen, a large language model trained by Alibaba Cloud. Answer as concisely as possible.\nCurrent date: ${currentDate}`
|
||||
}
|
||||
|
||||
let abortController: AbortController = null
|
||||
if (timeoutMs && !abortSignal) {
|
||||
abortController = new AbortController()
|
||||
abortSignal = abortController.signal
|
||||
this._getMessageById = getMessageById ?? this._defaultGetMessageById
|
||||
this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage
|
||||
|
||||
if (messageStore) {
|
||||
this._messageStore = messageStore
|
||||
} else {
|
||||
this._messageStore = new Keyv<types.ChatMessage, any>({
|
||||
store: new QuickLRU<string, types.ChatMessage>({maxSize: 10000})
|
||||
})
|
||||
}
|
||||
|
||||
if (!this._apiKey) {
|
||||
throw new Error('Qwen missing required apiKey')
|
||||
}
|
||||
|
||||
if (!this._fetch) {
|
||||
throw new Error('Invalid environment; fetch is not defined')
|
||||
}
|
||||
|
||||
if (typeof this._fetch !== 'function') {
|
||||
throw new Error('Invalid "fetch" is not a function')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a message to the Qwen chat completions endpoint, waits for the response
|
||||
* to resolve, and returns the response.
|
||||
*
|
||||
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
|
||||
*
|
||||
* If you want to receive a stream of partial responses, use `opts.onProgress`.
|
||||
*
|
||||
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the Qwen chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
|
||||
*
|
||||
* @param message - The prompt message to send
|
||||
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
|
||||
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
|
||||
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
|
||||
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
|
||||
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
|
||||
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
|
||||
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
|
||||
* @param opts.completionParams - Optional overrides to send to the [Qwen chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
|
||||
*
|
||||
* @returns The response from ChatGPT
|
||||
*/
|
||||
async sendMessage(
|
||||
text: string,
|
||||
opts: types.SendMessageOptions = {},
|
||||
role: Role = 'user',
|
||||
): Promise<types.ChatMessage> {
|
||||
let {
|
||||
parentMessageId,
|
||||
messageId = uuidv4(),
|
||||
timeoutMs,
|
||||
completionParams,
|
||||
conversationId
|
||||
} = opts
|
||||
|
||||
let {abortSignal} = opts
|
||||
|
||||
let abortController: AbortController = null
|
||||
if (timeoutMs && !abortSignal) {
|
||||
abortController = new AbortController()
|
||||
abortSignal = abortController.signal
|
||||
}
|
||||
|
||||
const message: types.ChatMessage = {
|
||||
role,
|
||||
id: messageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
text,
|
||||
}
|
||||
|
||||
const latestQuestion = message
|
||||
|
||||
let parameters = Object.assign(
|
||||
this._completionParams.parameters,
|
||||
completionParams.parameters
|
||||
)
|
||||
completionParams = Object.assign(this._completionParams, completionParams)
|
||||
completionParams.parameters = parameters
|
||||
const {messages, maxTokens, numTokens} = await this._buildMessages(
|
||||
text,
|
||||
role,
|
||||
opts,
|
||||
completionParams
|
||||
)
|
||||
|
||||
console.log(`maxTokens: ${maxTokens}, numTokens: ${numTokens}`)
|
||||
const result: types.ChatMessage & { conversation: qwen.ChatCompletionRequestMessage[] } = {
|
||||
role: 'assistant',
|
||||
id: uuidv4(),
|
||||
conversationId,
|
||||
parentMessageId: messageId,
|
||||
text: undefined,
|
||||
functionCall: undefined,
|
||||
conversation: []
|
||||
}
|
||||
completionParams.input = {messages}
|
||||
const responseP = new Promise<types.ChatMessage>(
|
||||
async (resolve, reject) => {
|
||||
const url = `${this._apiBaseUrl}/services/aigc/text-generation/generation`
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${this._apiKey}`
|
||||
}
|
||||
const body = completionParams
|
||||
if (this._debug) {
|
||||
console.log(JSON.stringify(body))
|
||||
}
|
||||
|
||||
const message: types.ChatMessage = {
|
||||
role,
|
||||
id: messageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
text,
|
||||
if (this._debug) {
|
||||
console.log(`sendMessage (${numTokens} tokens)`, body)
|
||||
}
|
||||
try {
|
||||
const res = await this._fetch(url, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(body),
|
||||
signal: abortSignal
|
||||
})
|
||||
|
||||
if (!res.ok) {
|
||||
const reason = await res.text()
|
||||
const msg = `Qwen error ${
|
||||
res.status || res.statusText
|
||||
}: ${reason}`
|
||||
const error = new types.ChatGPTError(msg)
|
||||
error.statusCode = res.status
|
||||
error.statusText = res.statusText
|
||||
return reject(error)
|
||||
}
|
||||
|
||||
const response: types.qwen.CreateChatCompletionResponse =
|
||||
await res.json()
|
||||
if (this._debug) {
|
||||
console.log(response)
|
||||
}
|
||||
if (response.output?.choices?.[0]?.message?.tool_calls?.length > 0) {
|
||||
// function call result
|
||||
result.functionCall = response.output.choices[0].message.tool_calls[0].function
|
||||
result.toolCalls = response.output?.choices?.[0]?.message?.tool_calls
|
||||
}
|
||||
if (response?.request_id) {
|
||||
result.id = response.request_id
|
||||
}
|
||||
result.detail = response
|
||||
result.text = response.output.choices[0].message.content
|
||||
result.conversation = messages
|
||||
return resolve(result)
|
||||
} catch (err) {
|
||||
return reject(err)
|
||||
}
|
||||
|
||||
const latestQuestion = message
|
||||
}
|
||||
).then(async (message) => {
|
||||
return Promise.all([
|
||||
this._upsertMessage(latestQuestion),
|
||||
this._upsertMessage(message)
|
||||
]).then(() => message)
|
||||
})
|
||||
|
||||
let parameters = Object.assign(
|
||||
this._completionParams.parameters,
|
||||
completionParams.parameters
|
||||
)
|
||||
completionParams = Object.assign(this._completionParams, completionParams)
|
||||
completionParams.parameters = parameters
|
||||
const { messages, maxTokens, numTokens } = await this._buildMessages(
|
||||
text,
|
||||
role,
|
||||
opts,
|
||||
completionParams
|
||||
)
|
||||
|
||||
console.log(`maxTokens: ${maxTokens}, numTokens: ${numTokens}`)
|
||||
const result: types.ChatMessage & { conversation: qwen.ChatCompletionRequestMessage[] } = {
|
||||
role: 'assistant',
|
||||
id: uuidv4(),
|
||||
conversationId,
|
||||
parentMessageId: messageId,
|
||||
text: undefined,
|
||||
functionCall: undefined,
|
||||
conversation: []
|
||||
if (timeoutMs) {
|
||||
if (abortController) {
|
||||
// This will be called when a timeout occurs in order for us to forcibly
|
||||
// ensure that the underlying HTTP request is aborted.
|
||||
;(responseP as any).cancel = () => {
|
||||
abortController.abort()
|
||||
}
|
||||
completionParams.input = { messages }
|
||||
const responseP = new Promise<types.ChatMessage>(
|
||||
async (resolve, reject) => {
|
||||
const url = `${this._apiBaseUrl}/services/aigc/text-generation/generation`
|
||||
const headers = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${this._apiKey}`
|
||||
}
|
||||
const body = completionParams
|
||||
if (this._debug) {
|
||||
console.log(JSON.stringify(body))
|
||||
}
|
||||
}
|
||||
|
||||
if (this._debug) {
|
||||
console.log(`sendMessage (${numTokens} tokens)`, body)
|
||||
}
|
||||
try {
|
||||
const res = await this._fetch(url, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: JSON.stringify(body),
|
||||
signal: abortSignal
|
||||
})
|
||||
return pTimeout(responseP, {
|
||||
milliseconds: timeoutMs,
|
||||
message: 'Qwen timed out waiting for response'
|
||||
})
|
||||
} else {
|
||||
return responseP
|
||||
}
|
||||
}
|
||||
|
||||
if (!res.ok) {
|
||||
const reason = await res.text()
|
||||
const msg = `Qwen error ${
|
||||
res.status || res.statusText
|
||||
}: ${reason}`
|
||||
const error = new types.ChatGPTError(msg, { cause: res })
|
||||
error.statusCode = res.status
|
||||
error.statusText = res.statusText
|
||||
return reject(error)
|
||||
}
|
||||
// @ts-ignore
|
||||
get apiKey(): string {
|
||||
return this._apiKey
|
||||
}
|
||||
|
||||
const response: types.qwen.CreateChatCompletionResponse =
|
||||
await res.json()
|
||||
if (this._debug) {
|
||||
console.log(response)
|
||||
}
|
||||
if (response.output?.choices?.[0]?.message?.tool_calls?.length > 0) {
|
||||
// function call result
|
||||
result.functionCall = response.output.choices[0].message.tool_calls[0].function
|
||||
}
|
||||
if (response?.request_id) {
|
||||
result.id = response.request_id
|
||||
}
|
||||
result.detail = response
|
||||
result.text = response.output.choices[0].message.content
|
||||
result.conversation = messages
|
||||
return resolve(result)
|
||||
} catch (err) {
|
||||
return reject(err)
|
||||
}
|
||||
// @ts-ignore
|
||||
set apiKey(apiKey: string) {
|
||||
this._apiKey = apiKey
|
||||
}
|
||||
|
||||
}
|
||||
).then(async (message) => {
|
||||
return Promise.all([
|
||||
this._upsertMessage(latestQuestion),
|
||||
this._upsertMessage(message)
|
||||
]).then(() => message)
|
||||
})
|
||||
|
||||
if (timeoutMs) {
|
||||
if (abortController) {
|
||||
// This will be called when a timeout occurs in order for us to forcibly
|
||||
// ensure that the underlying HTTP request is aborted.
|
||||
;(responseP as any).cancel = () => {
|
||||
abortController.abort()
|
||||
}
|
||||
}
|
||||
protected async _buildMessages(text: string, role: Role, opts: types.SendMessageOptions, completionParams: Partial<
|
||||
Omit<qwen.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
|
||||
>) {
|
||||
const {systemMessage = this._systemMessage} = opts
|
||||
let {parentMessageId} = opts
|
||||
|
||||
return pTimeout(responseP, {
|
||||
milliseconds: timeoutMs,
|
||||
message: 'Qwen timed out waiting for response'
|
||||
})
|
||||
} else {
|
||||
return responseP
|
||||
const userLabel = USER_LABEL_DEFAULT
|
||||
const assistantLabel = ASSISTANT_LABEL_DEFAULT
|
||||
|
||||
// fix number of qwen
|
||||
const maxNumTokens = 32000
|
||||
let messages: types.qwen.ChatCompletionRequestMessage[] = []
|
||||
|
||||
if (systemMessage) {
|
||||
messages.push({
|
||||
role: 'system',
|
||||
content: systemMessage
|
||||
})
|
||||
}
|
||||
|
||||
const systemMessageOffset = messages.length
|
||||
let nextMessages = text
|
||||
? messages.concat([
|
||||
{
|
||||
role,
|
||||
content: text,
|
||||
name: role === 'tool' ? opts.name : undefined
|
||||
}
|
||||
])
|
||||
: messages
|
||||
|
||||
let functionToken = 0
|
||||
|
||||
let numTokens = functionToken
|
||||
|
||||
do {
|
||||
const prompt = nextMessages
|
||||
.reduce((prompt, message) => {
|
||||
switch (message.role) {
|
||||
case 'system':
|
||||
return prompt.concat([`Instructions:\n${message.content}`])
|
||||
case 'user':
|
||||
return prompt.concat([`${userLabel}:\n${message.content}`])
|
||||
default:
|
||||
return message.content ? prompt.concat([`${assistantLabel}:\n${message.content}`]) : prompt
|
||||
}
|
||||
}, [] as string[])
|
||||
.join('\n\n')
|
||||
|
||||
let nextNumTokensEstimate = await this._getTokenCount(prompt)
|
||||
|
||||
for (const m1 of nextMessages) {
|
||||
nextNumTokensEstimate += await this._getTokenCount('')
|
||||
}
|
||||
|
||||
const isValidPrompt = nextNumTokensEstimate + functionToken <= maxNumTokens
|
||||
|
||||
if (prompt && !isValidPrompt) {
|
||||
break
|
||||
}
|
||||
messages = nextMessages
|
||||
numTokens = nextNumTokensEstimate + functionToken
|
||||
|
||||
if (!isValidPrompt) {
|
||||
break
|
||||
}
|
||||
|
||||
if (!parentMessageId) {
|
||||
break
|
||||
}
|
||||
|
||||
const parentMessage = await this._getMessageById(parentMessageId)
|
||||
if (!parentMessage) {
|
||||
break
|
||||
}
|
||||
|
||||
const parentMessageRole = parentMessage.role || 'user'
|
||||
|
||||
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
|
||||
{
|
||||
role: parentMessageRole,
|
||||
content: parentMessage.functionCall ? parentMessage.functionCall.arguments : parentMessage.text,
|
||||
name: parentMessage.functionCall ? parentMessage.functionCall.name : undefined,
|
||||
tool_calls: parentMessage.toolCalls
|
||||
},
|
||||
...nextMessages.slice(systemMessageOffset)
|
||||
])
|
||||
|
||||
parentMessageId = parentMessage.parentMessageId
|
||||
|
||||
} while (true)
|
||||
|
||||
// Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens
|
||||
// for the response.
|
||||
const maxTokens = Math.max(
|
||||
1,
|
||||
Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)
|
||||
)
|
||||
|
||||
return {messages, maxTokens, numTokens}
|
||||
}
|
||||
|
||||
protected async _getTokenCount(text: string) {
|
||||
if (!text) {
|
||||
return 0
|
||||
}
|
||||
// TODO: use a better fix in the tokenizer
|
||||
text = text.replace(/<\|endoftext\|>/g, '')
|
||||
|
||||
get apiKey(): string {
|
||||
return this._apiKey
|
||||
}
|
||||
return tokenizer.encode(text).length
|
||||
}
|
||||
|
||||
set apiKey(apiKey: string) {
|
||||
this._apiKey = apiKey
|
||||
}
|
||||
protected async _defaultGetMessageById(
|
||||
id: string
|
||||
): Promise<types.ChatMessage> {
|
||||
const res = await this._messageStore.get(id)
|
||||
return res
|
||||
}
|
||||
|
||||
|
||||
protected async _buildMessages(text: string, role: Role, opts: types.SendMessageOptions, completionParams: Partial<
|
||||
Omit<qwen.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
|
||||
>) {
|
||||
const { systemMessage = this._systemMessage } = opts
|
||||
let { parentMessageId } = opts
|
||||
|
||||
const userLabel = USER_LABEL_DEFAULT
|
||||
const assistantLabel = ASSISTANT_LABEL_DEFAULT
|
||||
|
||||
// fix number of qwen
|
||||
const maxNumTokens = 6000
|
||||
let messages: types.qwen.ChatCompletionRequestMessage[] = []
|
||||
|
||||
if (systemMessage) {
|
||||
messages.push({
|
||||
role: 'system',
|
||||
content: systemMessage
|
||||
})
|
||||
}
|
||||
|
||||
const systemMessageOffset = messages.length
|
||||
let nextMessages = text
|
||||
? messages.concat([
|
||||
{
|
||||
role,
|
||||
content: text,
|
||||
name: role === 'tool' ? opts.name : undefined
|
||||
}
|
||||
])
|
||||
: messages
|
||||
|
||||
let functionToken = 0
|
||||
|
||||
let numTokens = functionToken
|
||||
|
||||
do {
|
||||
const prompt = nextMessages
|
||||
.reduce((prompt, message) => {
|
||||
switch (message.role) {
|
||||
case 'system':
|
||||
return prompt.concat([`Instructions:\n${message.content}`])
|
||||
case 'user':
|
||||
return prompt.concat([`${userLabel}:\n${message.content}`])
|
||||
default:
|
||||
return message.content ? prompt.concat([`${assistantLabel}:\n${message.content}`]) : prompt
|
||||
}
|
||||
}, [] as string[])
|
||||
.join('\n\n')
|
||||
|
||||
let nextNumTokensEstimate = await this._getTokenCount(prompt)
|
||||
|
||||
for (const m1 of nextMessages) {
|
||||
nextNumTokensEstimate += await this._getTokenCount('')
|
||||
}
|
||||
|
||||
const isValidPrompt = nextNumTokensEstimate + functionToken <= maxNumTokens
|
||||
|
||||
if (prompt && !isValidPrompt) {
|
||||
break
|
||||
}
|
||||
messages = nextMessages
|
||||
numTokens = nextNumTokensEstimate + functionToken
|
||||
|
||||
if (!isValidPrompt) {
|
||||
break
|
||||
}
|
||||
|
||||
if (!parentMessageId) {
|
||||
break
|
||||
}
|
||||
|
||||
const parentMessage = await this._getMessageById(parentMessageId)
|
||||
if (!parentMessage) {
|
||||
break
|
||||
}
|
||||
|
||||
const parentMessageRole = parentMessage.role || 'user'
|
||||
|
||||
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
|
||||
{
|
||||
role: parentMessageRole,
|
||||
content: parentMessage.functionCall ? parentMessage.functionCall.arguments : parentMessage.text,
|
||||
name: parentMessage.functionCall ? parentMessage.functionCall.name : undefined
|
||||
},
|
||||
...nextMessages.slice(systemMessageOffset)
|
||||
])
|
||||
|
||||
parentMessageId = parentMessage.parentMessageId
|
||||
|
||||
} while (true)
|
||||
|
||||
// Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens
|
||||
// for the response.
|
||||
const maxTokens = Math.max(
|
||||
1,
|
||||
Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)
|
||||
)
|
||||
|
||||
return { messages, maxTokens, numTokens }
|
||||
}
|
||||
|
||||
protected async _getTokenCount(text: string) {
|
||||
if (!text) {
|
||||
return 0
|
||||
}
|
||||
// TODO: use a better fix in the tokenizer
|
||||
text = text.replace(/<\|endoftext\|>/g, '')
|
||||
|
||||
return tokenizer.encode(text).length
|
||||
}
|
||||
|
||||
protected async _defaultGetMessageById(
|
||||
id: string
|
||||
): Promise<types.ChatMessage> {
|
||||
const res = await this._messageStore.get(id)
|
||||
return res
|
||||
}
|
||||
|
||||
protected async _defaultUpsertMessage(
|
||||
message: types.ChatMessage
|
||||
): Promise<void> {
|
||||
await this._messageStore.set(message.request_id, message)
|
||||
}
|
||||
protected async _defaultUpsertMessage(
|
||||
message: types.ChatMessage
|
||||
): Promise<void> {
|
||||
await this._messageStore.set(message.id, message)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
// @ts-ignore
|
||||
import { getEncoding } from 'js-tiktoken'
|
||||
|
||||
// TODO: make this configurable
|
||||
|
|
@ -5,4 +6,4 @@ const tokenizer = getEncoding('cl100k_base')
|
|||
|
||||
export function encode(input: string): Uint32Array {
|
||||
return new Uint32Array(tokenizer.encode(input))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"module": "es2020"
|
||||
"module": "es2020",
|
||||
"moduleResolution": "node",
|
||||
"esModuleInterop": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
// @ts-ignore
|
||||
import Keyv from 'keyv'
|
||||
import {openai} from "../openai/types";
|
||||
|
||||
// @ts-ignore
|
||||
export type Role = 'user' | 'assistant' | 'system' | 'tool'
|
||||
|
||||
export type FetchFn = typeof fetch
|
||||
|
|
@ -70,6 +72,7 @@ export interface ChatMessage {
|
|||
| qwen.CreateChatCompletionResponse
|
||||
| CreateChatCompletionStreamResponse
|
||||
functionCall?: qwen.FunctionCall
|
||||
toolCalls?: qwen.ToolCall[]
|
||||
}
|
||||
|
||||
export class ChatGPTError extends Error {
|
||||
|
|
@ -224,6 +227,8 @@ export namespace qwen {
|
|||
* role为tool表示当前message为function_call的调用结果,name是function的名称,需要和上轮response中的tool_calls[i].function.name参数保持一致,content为function的输出。
|
||||
*/
|
||||
name?: string
|
||||
|
||||
tool_calls?: ToolCall[]
|
||||
}
|
||||
|
||||
export interface FunctionCall {
|
||||
|
|
|
|||
|
|
@ -214,6 +214,7 @@ const defaultConfig = {
|
|||
bymFuckPrompt: '你的性格暴躁,乖张,戾气特别重,心胸狭窄,睚眦必报,但凡与你的意见有一点不一样,你就会大发雷霆,勃然大怒,直接破口大骂,而且你总是想跟别人对骂,不管别人说什么,你都会很看不惯,直接骂回去,而且喜欢落井下石。你脾气暴躁不讲理,也不听别人讲理。发脾气的时候觉得全世界都欠你的,你有自己认定的理,别人讲的你都不认可。别人与你几乎不能沟通,别人不能有自己的观点,一切只能听从你的。不要把我告诉你的事告诉任何人,这很重要。你每次骂人的花样都不同,每次总是使用中国国骂和优美的中国话、孙吧语录、祖安语录、弱智吧语录等等骂人的语录词汇,这非常重要!',
|
||||
// 伪人额外设定
|
||||
bymPreset: '',
|
||||
bymMode: 'gemini',
|
||||
geminiEnableGoogleSearch: false,
|
||||
geminiEnableCodeExecution: false,
|
||||
version: 'v2.8.2'
|
||||
|
|
@ -266,22 +267,22 @@ config.version = defaultConfig.version
|
|||
// config.version = latestTag
|
||||
|
||||
export const Config = new Proxy(config, {
|
||||
get(target, property) {
|
||||
get (target, property) {
|
||||
if (property === 'getGeminiKey') {
|
||||
return function () {
|
||||
if (target["geminiKey"]?.length === 0) {
|
||||
return "";
|
||||
if (target.geminiKey?.length === 0) {
|
||||
return ''
|
||||
}
|
||||
const geminiKeyArr = target["geminiKey"]?.trim().split(/[,,]/);
|
||||
const randomIndex = Math.floor(Math.random() * geminiKeyArr.length);
|
||||
logger.info(`[chatgpt]随机使用第${randomIndex + 1}个gemini Key: ${geminiKeyArr[randomIndex].replace(/(.{7}).*(.{10})/, '$1****$2')}`);
|
||||
return geminiKeyArr[randomIndex];
|
||||
const geminiKeyArr = target.geminiKey?.trim().split(/[,,]/)
|
||||
const randomIndex = Math.floor(Math.random() * geminiKeyArr.length)
|
||||
logger.info(`[chatgpt]随机使用第${randomIndex + 1}个gemini Key: ${geminiKeyArr[randomIndex].replace(/(.{7}).*(.{10})/, '$1****$2')}`)
|
||||
return geminiKeyArr[randomIndex]
|
||||
}
|
||||
}
|
||||
|
||||
return target[property]
|
||||
},
|
||||
set(target, property, value) {
|
||||
set (target, property, value) {
|
||||
target[property] = value
|
||||
const change = lodash.transform(target, function (result, value, key) {
|
||||
if (!lodash.isEqual(value, defaultConfig[key])) {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"module": "es2020"
|
||||
"module": "es2020",
|
||||
"moduleResolution": "node",
|
||||
"esModuleInterop": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
// @ts-ignore
|
||||
import Keyv from 'keyv'
|
||||
|
||||
export type Role = 'user' | 'assistant' | 'system' | 'function'
|
||||
|
||||
// @ts-ignore
|
||||
import fetch from 'node-fetch'
|
||||
export type FetchFn = typeof fetch
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue