Merge branch 'v2' into v2

This commit is contained in:
ycxom 2025-02-03 23:11:11 +08:00 committed by GitHub
commit 7ab7e00ed3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 561 additions and 678 deletions

View file

@ -45,6 +45,8 @@
* 2023-10-25 增加支持通义千问官方API
* 2023-12-01 持续优先适配Shamrock
* 2023-12-14 增加支持Gemini 官方API
* 2024 支持伪人模式本插件中也称为bym模式。现已支持大部分模型随机群内触发聊天频率可控可自定义设定。
* 2025-02-03 支持Deepseek R1等思考模型的思考内容输出默认通过伪造转发发送直接使用api模式即可体验。
### 如果觉得这个插件有趣或者对你有帮助请点一个star吧

View file

@ -21,6 +21,7 @@ import { initializeImageTool } from '../utils/tools/ImageTool.js'
import { DailyNewsTool } from '../utils/tools/DailyNewsTool.js'
import { SendMessageToSpecificGroupOrUserTool } from '../utils/tools/SendMessageToSpecificGroupOrUserTool.js'
import { customSplitRegex, filterResponseChunk } from '../utils/text.js'
import core from '../model/core.js'
const DefaultConfig = {
returnQQ: [],
@ -70,7 +71,7 @@ export class bym extends plugin {
async readConfigData(id, configList) {
let data = {
chatsList: 20,
propNum: 0,
propNum: Config.bymRate,
notOfGroup: false,
maxText: 50
}
@ -78,7 +79,7 @@ export class bym extends plugin {
const matchedConfig = configList.find(item => String(item.id) === String(id))
if (matchedConfig) {
data.chatsList = parseInt(matchedConfig.chatslist) || data.chatsList
data.propNum = parseInt(matchedConfig.propNum) || data.propNum
data.propNum = parseInt(matchedConfig.propNum) ?? data.propNum
data.notOfGroup = matchedConfig.notofgroup || data.notOfGroup
data.maxText = parseInt(matchedConfig.maxtext) || data.maxText
}
@ -86,13 +87,28 @@ export class bym extends plugin {
}
/** 复读 */
async bym(e) {
if (!Config.enableBYM) return false
if (!Config.enableBYM) {
return false
}
const sender = e.sender.user_id
const atBot = e.atme
const card = e.sender.card || e.sender.nickname
const group = e.group_id
let ALLRole = 'default'
let prop = Math.floor(Math.random() * 100)
if (Config.assistantLabel && e.msg?.includes(Config.assistantLabel)) {
prop = -1
}
let fuck = false
let candidate = Config.bymPreset
if (Config.bymFuckList?.find(i => e.msg?.includes(i))) {
fuck = true
candidate = candidate + Config.bymFuckPrompt
}
if (Config.returnQQ.includes(sender)) return false
@ -284,54 +300,34 @@ export class bym extends plugin {
return Role;
}
opt.system = Role
let system = Role
logger.info('[ChatGPT-plugin][AUTO_AI]random chat hit')
let client = new CustomGoogleGeminiClient({
e,
userId: e.sender.user_id,
key: Config.getGeminiKey(),
model: Config.geminiModel,
baseUrl: Config.geminiBaseUrl,
debug: Config.debug
})
/**
* tools
* @type {(AbstractTool)[]}
*/
const tools = [
new SearchVideoTool(),
new SerpImageTool(),
new SearchMusicTool(),
new SendAvatarTool(),
new SendVideoTool(),
new SendMusicTool(),
new SendPictureTool(),
new WebsiteTool(),
new WeatherTool(),
new DailyNewsTool(),
new SendMessageToSpecificGroupOrUserTool()
]
if (Config.azSerpKey) {
tools.push(new SerpTool())
}
if (e.group.is_admin || e.group.is_owner) {
tools.push(new EditCardTool())
tools.push(new JinyanTool())
tools.push(new KickOutTool())
}
if (e.group.is_owner) {
tools.push(new SetTitleTool())
}
const imageTool = await initializeImageTool(e, previousRole, bymGo)
if (Config.AutoToDownImg) {
tools.push(imageTool)
const imagePrompt = await imageTool.getSystemPrompt()
opt.system += '\n' + imagePrompt
system += '\n' + imagePrompt
}
client.addTools(tools)
let rsp = await client.sendMessage(e.msg, opt)
let rsp = await core.sendMessage(e.msg, {}, Config.bymMode, e, {
enableSmart: true,
system: {
api: system,
qwen: system,
bing: system,
claude: system,
claude2: system,
gemini: system,
xh: system
},
settings: {
replyPureTextCallback: msg => {
msg = filterResponseChunk(msg)
msg && e.reply(msg)
}
}
})
// let rsp = await client.sendMessage(e.msg, opt)
let text = rsp.text
let texts = customSplitRegex(text, /(?<!\?)[。?\n](?!\?)/, 3)
// let texts = text.split(/(?<!\?)[。?\n](?!\?)/, 3)

View file

@ -1044,9 +1044,14 @@ export class chatgpt extends plugin {
}
})
if (thinking) {
let thinkingForward = await common.makeForwardMsg(e, [thinking], '思考过程')
this.reply(thinkingForward)
if (Config.forwardReasoning) {
let thinkingForward = await common.makeForwardMsg(e, [thinking], '思考过程')
this.reply(thinkingForward)
} else {
logger.mark('思考过程', thinking)
}
}
if (Config.enableSuggestedResponses && chatMessage.suggestedResponses) {
this.reply(`建议的回复:\n${chatMessage.suggestedResponses}`)
}

View file

@ -352,6 +352,11 @@ export class ChatgptManagement extends plugin {
reg: '^#chatgpt(开启|关闭)gemini(搜索|代码执行)$',
fnc: 'geminiOpenSearchCE',
permission: 'master'
},
{
reg: '^#chatgpt(伪人|bym)切换',
fnc: 'switchBYMModel',
permission: 'master'
}
]
})
@ -1859,6 +1864,22 @@ azure语音Azure 语音是微软 Azure 平台提供的一项语音服务,
}
}
async switchBYMModel (e) {
let model = e.msg.replace(/^#chatgpt(伪人|bym)切换/, '')
if (['api', 'Api', 'API'].includes(model)) {
Config.bymMode = 'api'
} else if (['gemini', '双子星'].includes(model.toLowerCase())) {
Config.bymMode = 'gemini'
} else if (['qwen', '通义千问'].includes(model.toLowerCase())) {
Config.bymMode = 'qwen'
} else if (['xh', '星火'].includes(model.toLowerCase())) {
Config.bymMode = 'xh'
} else if (['claude', '克劳德'].includes(model.toLowerCase())) {
Config.bymMode = 'claude'
}
await this.reply('切换成功')
}
async geminiOpenSearchCE (e) {
let msg = e.msg
let open = msg.includes('开启')
@ -1871,5 +1892,5 @@ azure语音Azure 语音是微软 Azure 平台提供的一项语音服务,
}
await e.reply('操作成功')
}
}

View file

@ -1,196 +0,0 @@
import { BaseClient } from './BaseClient.js'
import slack from '@slack/bolt'
// import { limitString } from '../utils/common.js'
// import common from '../../../lib/common/common.js'
import { getProxy } from '../utils/proxy.js'
const proxy = getProxy()
const common = {
sleep: function (ms) {
return new Promise((resolve) => setTimeout(resolve, ms))
}
}
/**
* 失败品
*/
export class SlackCozeClient {
constructor (props) {
this.config = props
const {
slackSigningSecret, slackBotUserToken, slackUserToken, proxy: proxyAddr, debug
} = props
if (slackSigningSecret && slackBotUserToken && slackUserToken) {
let option = {
signingSecret: slackSigningSecret,
token: slackBotUserToken,
// socketMode: true,
appToken: slackUserToken
// port: 45912
}
if (proxyAddr) {
option.agent = proxy(proxyAddr)
}
option.logLevel = debug ? 'debug' : 'info'
this.app = new slack.App(option)
} else {
throw new Error('未配置Slack信息')
}
}
async sendMessage (prompt, e, t = 0) {
if (t > 10) {
return 'claude 未响应'
}
if (prompt.length > 3990) {
logger.warn('消息长度大于slack限制长度剪切至3990')
function limitString (str, maxLength, addDots = true) {
if (str.length <= maxLength) {
return str
} else {
if (addDots) {
return str.slice(0, maxLength) + '...'
} else {
return str.slice(0, maxLength)
}
}
}
prompt = limitString(prompt, 3990, false)
}
let channel
let qq = e.sender.user_id
if (this.config.slackCozeSpecifiedChannel) {
channel = { id: this.config.slackCozeSpecifiedChannel }
} else {
let channels = await this.app.client.conversations.list({
token: this.config.slackUserToken,
types: 'public_channel,private_channel'
})
channel = channels.channels.filter(c => c.name === 'coze' + qq)
if (!channel || channel.length === 0) {
let createChannelResponse = await this.app.client.conversations.create({
token: this.config.slackUserToken,
name: 'coze' + qq,
is_private: true
})
channel = createChannelResponse.channel
await this.app.client.conversations.invite({
token: this.config.slackUserToken,
channel: channel.id,
users: this.config.slackCozeUserId
})
await common.sleep(1000)
} else {
channel = channel[0]
}
}
let conversationId = await redis.get(`CHATGPT:SLACK_COZE_CONVERSATION:${qq}`)
let toSend = `<@${this.config.slackCozeUserId}> ${prompt}`
if (!conversationId) {
let sendResponse = await this.app.client.chat.postMessage({
as_user: true,
text: toSend,
token: this.config.slackUserToken,
channel: channel.id
})
let ts = sendResponse.ts
let response = toSend
let tryTimes = 0
// 发完先等3喵
await common.sleep(3000)
while (response === toSend) {
let replies = await this.app.client.conversations.replies({
token: this.config.slackUserToken,
channel: channel.id,
limit: 1000,
ts
})
await await redis.set(`CHATGPT:SLACK_COZE_CONVERSATION:${qq}`, `${ts}`)
if (replies.messages.length > 0) {
let formalMessages = replies.messages
let reply = formalMessages[formalMessages.length - 1]
if (!reply.text.startsWith(`<@${this.config.slackCozeUserId}>`)) {
response = reply.text
if (this.config.debug) {
let text = response.replace('_Typing…_', '')
if (text) {
logger.info(response.replace('_Typing…_', ''))
}
}
}
}
await common.sleep(2000)
tryTimes++
if (tryTimes > 30 && response === toSend) {
// 过了60秒还没任何回复就重新发一下试试
logger.warn('claude没有响应重试中')
return await this.sendMessage(prompt, e, t + 1)
}
}
return response
} else {
let toSend = `<@${this.config.slackCozeUserId}> ${prompt}`
let postResponse = await this.app.client.chat.postMessage({
as_user: true,
text: toSend,
token: this.config.slackUserToken,
channel: channel.id,
thread_ts: conversationId
})
let postTs = postResponse.ts
let response = toSend
let tryTimes = 0
// 发完先等3喵
await common.sleep(3000)
while (response === toSend) {
let replies = await this.app.client.conversations.replies({
token: this.config.slackUserToken,
channel: channel.id,
limit: 1000,
ts: conversationId,
oldest: postTs
})
if (replies.messages.length > 0) {
let formalMessages = replies.messages
let reply = formalMessages[formalMessages.length - 1]
if (!reply.text.startsWith(`<@${this.config.slackCozeUserId}>`)) {
response = reply.text
if (this.config.debug) {
let text = response.replace('_Typing…_', '')
if (text) {
logger.info(response.replace('_Typing…_', ''))
}
}
}
}
await common.sleep(2000)
tryTimes++
if (tryTimes > 30 && response === '_Typing…_') {
// 过了60秒还没任何回复就重新发一下试试
logger.warn('claude没有响应重试中')
return await this.sendMessage(prompt, e, t + 1)
}
}
return response
}
}
}
export class CozeSlackClient extends BaseClient {
constructor (props) {
super(props)
this.supportFunction = false
this.debug = props.debug
this.slackCient = new SlackCozeClient()
}
/**
*
* @param text
* @param {{conversationId: string?, stream: boolean?, onProgress: function?, image: string?}} opt
* @returns {Promise<{conversationId: string?, parentMessageId: string?, text: string, id: string, image: string?}>}
*/
async sendMessage (text, opt = {}) {
}
}

View file

@ -117,26 +117,26 @@ export class CustomGoogleGeminiClient extends GoogleGeminiClient {
async sendMessage(text, opt = {}) {
let history = await this.getHistory(opt.parentMessageId)
let systemMessage = opt.system
if (systemMessage) {
history = history.reverse()
history.push({
role: 'model',
parts: [
{
text: 'ok'
}
]
})
history.push({
role: 'user',
parts: [
{
text: systemMessage
}
]
})
history = history.reverse()
}
// if (systemMessage) {
// history = history.reverse()
// history.push({
// role: 'model',
// parts: [
// {
// text: 'ok'
// }
// ]
// })
// history.push({
// role: 'user',
// parts: [
// {
// text: systemMessage
// }
// ]
// })
// history = history.reverse()
// }
const idThis = crypto.randomUUID()
const idModel = crypto.randomUUID()
const thisMessage = opt.functionResponse
@ -184,6 +184,11 @@ export class CustomGoogleGeminiClient extends GoogleGeminiClient {
* @type Array<Content>
*/
contents: history,
system_instruction: {
parts: {
text: systemMessage
}
},
safetySettings: [
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,

View file

@ -151,6 +151,12 @@ export function supportGuoba () {
bottomHelpMessage: '仅建议gpt-4-32k和gpt-3.5-turbo-16k-0613开启gpt-4-0613也可。开启后机器人可以群管、收发图片、发视频发音乐、联网搜索等。注意较费token。配合开启读取群聊上下文效果更佳',
component: 'Switch'
},
{
field: 'forwardReasoning',
label: '是否转发思考过程',
bottomHelpMessage: 'OpenAI的o系列、deepseek的r系列等思考模型的思考过程是否以转发形式发出。默认开启',
component: 'Switch'
},
{
field: 'openAiBaseUrl',
label: 'OpenAI API服务器地址',
@ -979,6 +985,7 @@ export function supportGuoba () {
label: '合成emoji的API地址默认谷歌厨房',
component: 'Input'
},
{
field: 'bymRate',
label: '伪人模式触发默认概率,单位为%',
@ -988,6 +995,20 @@ export function supportGuoba () {
max: 100
}
},
{
field: 'bymMode',
label: '伪人模型',
component: 'Select',
componentProps: {
options: [
{ label: 'Gemini推荐', value: 'gemini' },
{ label: '通义千问', value: 'qwen' },
{ label: 'OpenAI API', value: 'api' },
{ label: '星火', value: 'xh' },
{ label: 'Claude', value: 'claude' }
]
}
},
{
field: 'bymPreset',
label: '伪人模式的额外预设',
@ -1082,7 +1103,7 @@ export function supportGuoba () {
field: 'bymFuckList',
label: '伪人模式反击的触发词',
bottomHelpMessage: '请输入用于伪人模式下骂人反击的触发词,每个词组将被单独处理',
component: "GTags",
component: 'GTags',
componentProps: {
placeholder: '请输入反击触发词',
allowAdd: true,
@ -1092,11 +1113,11 @@ export function supportGuoba () {
content: '添加新的反击触发词',
okText: '添加',
rules: [
{ required: true, message: '触发词不能为空' },
],
{ required: true, message: '触发词不能为空' }
]
},
valueParser: ((value) => value.split(',') || []),
},
valueParser: (value) => value.split(',') || []
}
},
{
label: '以下为Azure chatGPT的配置',

View file

@ -59,7 +59,6 @@ import { ChatGPTAPI } from '../utils/openai/chatgpt-api.js'
import { newFetch } from '../utils/proxy.js'
import { ChatGLM4Client } from '../client/ChatGLM4Client.js'
import { QwenApi } from '../utils/alibaba/qwen-api.js'
import OpenAI from 'openai';
const roleMap = {
owner: 'group owner',
@ -120,7 +119,21 @@ async function handleSystem (e, system) {
}
class Core {
async sendMessage (prompt, conversation = {}, use, e) {
async sendMessage (prompt, conversation = {}, use, e, opt = {
enableSmart: Config.smartMode,
system: {
api: Config.promptPrefixOverride,
qwen: Config.promptPrefixOverride,
bing: Config.sydney,
claude: Config.claudeSystemPrompt,
claude2: Config.claudeSystemPrompt,
gemini: Config.geminiPrompt,
xh: Config.xhPrompt
},
settings: {
replyPureTextCallback: undefined
}
}) {
if (!conversation) {
conversation = {
timeoutMs: Config.defaultTimeoutMs
@ -444,16 +457,6 @@ class Core {
logger.warn('发送语音失败', err)
})
return sendMessageResult
} else if (use === 'chatglm') {
const cacheOptions = {
namespace: 'chatglm_6b',
store: new KeyvFile({ filename: 'cache.json' })
}
this.chatGPTApi = new ChatGLMClient({
user: e.sender.user_id,
cache: cacheOptions
})
return await this.chatGPTApi.sendMessage(prompt, conversation)
} else if (use === 'claude') {
// slack已经不可用移除
let keys = Config.claudeApiKey?.split(/[,;]/).map(key => key.trim()).filter(key => key)
@ -469,11 +472,11 @@ class Core {
baseUrl: Config.claudeApiBaseUrl
// temperature: Config.claudeApiTemperature || 0.5
})
let opt = {
let option = {
stream: false,
parentMessageId: conversation.parentMessageId,
conversationId: conversation.conversationId,
system: Config.claudeSystemPrompt
system: opt.system.claude
}
let img = await getImg(e)
if (img && img.length > 0) {
@ -482,7 +485,7 @@ class Core {
opt.image = base64Image
}
try {
let rsp = await client.sendMessage(prompt, opt)
let rsp = await client.sendMessage(prompt, option)
return rsp
} catch (err) {
errorMessage = err.message
@ -568,7 +571,7 @@ class Core {
e,
chatId: conversation?.conversationId,
image: image ? image[0] : undefined,
system: Config.xhPrompt
system: opt.system.xh
})
return response
} else if (use === 'azure') {
@ -623,7 +626,7 @@ class Core {
debug: Config.debug,
upsertMessage: um,
getMessageById: gm,
systemMessage: `You are ${Config.assistantLabel} ${useCast?.api || Config.promptPrefixOverride || defaultPropmtPrefix}
systemMessage: `You are ${Config.assistantLabel} ${useCast?.api || opt.system.qwen || defaultPropmtPrefix}
Current date: ${currentDate}`,
completionParams,
assistantLabel: Config.assistantLabel,
@ -640,7 +643,7 @@ class Core {
}
option = Object.assign(option, conversation)
}
if (Config.smartMode) {
if (opt.enableSmart) {
let isAdmin = ['admin', 'owner'].includes(e.sender.role)
let sender = e.sender.user_id
const {
@ -673,7 +676,7 @@ class Core {
logger.info(msg)
while (msg.functionCall) {
if (msg.text) {
await this.reply(msg.text.replace('\n\n\n', '\n'))
await e.reply(msg.text.replace('\n\n\n', '\n'))
}
let {
name,
@ -741,14 +744,11 @@ class Core {
const image = await getImg(e)
let imageUrl = image ? image[0] : undefined
if (imageUrl) {
let md5 = imageUrl.split(/[/-]/).find(s => s.length === 32)?.toUpperCase()
let imageLoc = await getOrDownloadFile(`ocr/${md5}.png`, imageUrl)
let outputLoc = imageLoc.replace(`${md5}.png`, `${md5}_512.png`)
await resizeAndCropImage(imageLoc, outputLoc, 512)
let buffer = fs.readFileSync(outputLoc)
option.image = buffer.toString('base64')
const response = await fetch(imageUrl)
const base64Image = Buffer.from(await response.arrayBuffer())
option.image = base64Image.toString('base64')
}
if (Config.smartMode) {
if (opt.enableSmart) {
/**
* @type {AbstractTool[]}
*/
@ -801,7 +801,7 @@ class Core {
}
client.addTools(tools)
}
let system = Config.geminiPrompt
let system = opt.system.gemini
if (Config.enableGroupContext && e.isGroup) {
let chats = await getChatHistoryGroup(e, Config.groupContextLength)
const namePlaceholder = '[name]'
@ -827,11 +827,11 @@ class Core {
system += 'If I ask you to generate music or write songs, you need to reply with information suitable for Suno to generate music. Please use keywords such as Verse, Chorus, Bridge, Outro, and End to segment the lyrics, such as [Verse 1], The returned message is in JSON format, with a structure of ```json{"option": "Suno", "tags": "style", "title": "title of the song", "lyrics": "lyrics"}```.'
}
option.system = system
option.replyPureTextCallback = async (msg) => {
option.replyPureTextCallback = opt.settings.replyPureTextCallback || (async (msg) => {
if (msg) {
await e.reply(msg, true)
}
}
})
return await client.sendMessage(prompt, option)
} else if (use === 'chatglm4') {
const client = new ChatGLM4Client({
@ -849,7 +849,7 @@ class Core {
completionParams.model = Config.model
}
const currentDate = new Date().toISOString().split('T')[0]
let promptPrefix = `You are ${Config.assistantLabel} ${useCast?.api || Config.promptPrefixOverride || defaultPropmtPrefix}
let promptPrefix = `You are ${Config.assistantLabel} ${useCast?.api || opt.system.api || defaultPropmtPrefix}
Current date: ${currentDate}`
let maxModelTokens = getMaxModelTokens(completionParams.model)
// let system = promptPrefix
@ -900,7 +900,7 @@ class Core {
}
option = Object.assign(option, conversation)
}
if (Config.smartMode) {
if (opt.enableSmart) {
let isAdmin = ['admin', 'owner'].includes(e.sender.role)
let sender = e.sender.user_id
const {

View file

@ -75,6 +75,7 @@ var QwenApi = /** @class */ (function () {
this._apiKey = apiKey;
this._apiBaseUrl = apiBaseUrl;
this._debug = !!debug;
// @ts-ignore
this._fetch = fetch;
this._completionParams = __assign({ model: CHATGPT_MODEL, parameters: __assign({ top_p: 0.5, top_k: 50, temperature: 1.0, seed: 114514, enable_search: true, result_format: "message", incremental_output: false }, parameters) }, completionParams);
this._systemMessage = systemMessage;
@ -167,9 +168,9 @@ var QwenApi = /** @class */ (function () {
completionParams.input = { messages: messages };
responseP = new Promise(function (resolve, reject) { return __awaiter(_this, void 0, void 0, function () {
var url, headers, body, res, reason, msg, error, response, err_1;
var _a, _b, _c, _d, _e;
return __generator(this, function (_f) {
switch (_f.label) {
var _a, _b, _c, _d, _e, _f, _g, _h, _j;
return __generator(this, function (_k) {
switch (_k.label) {
case 0:
url = "".concat(this._apiBaseUrl, "/services/aigc/text-generation/generation");
headers = {
@ -183,9 +184,9 @@ var QwenApi = /** @class */ (function () {
if (this._debug) {
console.log("sendMessage (".concat(numTokens, " tokens)"), body);
}
_f.label = 1;
_k.label = 1;
case 1:
_f.trys.push([1, 6, , 7]);
_k.trys.push([1, 6, , 7]);
return [4 /*yield*/, this._fetch(url, {
method: 'POST',
headers: headers,
@ -193,25 +194,26 @@ var QwenApi = /** @class */ (function () {
signal: abortSignal
})];
case 2:
res = _f.sent();
res = _k.sent();
if (!!res.ok) return [3 /*break*/, 4];
return [4 /*yield*/, res.text()];
case 3:
reason = _f.sent();
reason = _k.sent();
msg = "Qwen error ".concat(res.status || res.statusText, ": ").concat(reason);
error = new types.ChatGPTError(msg, { cause: res });
error = new types.ChatGPTError(msg);
error.statusCode = res.status;
error.statusText = res.statusText;
return [2 /*return*/, reject(error)];
case 4: return [4 /*yield*/, res.json()];
case 5:
response = _f.sent();
response = _k.sent();
if (this._debug) {
console.log(response);
}
if (((_e = (_d = (_c = (_b = (_a = response.output) === null || _a === void 0 ? void 0 : _a.choices) === null || _b === void 0 ? void 0 : _b[0]) === null || _c === void 0 ? void 0 : _c.message) === null || _d === void 0 ? void 0 : _d.tool_calls) === null || _e === void 0 ? void 0 : _e.length) > 0) {
// function call result
result.functionCall = response.output.choices[0].message.tool_calls[0].function;
result.toolCalls = (_j = (_h = (_g = (_f = response.output) === null || _f === void 0 ? void 0 : _f.choices) === null || _g === void 0 ? void 0 : _g[0]) === null || _h === void 0 ? void 0 : _h.message) === null || _j === void 0 ? void 0 : _j.tool_calls;
}
if (response === null || response === void 0 ? void 0 : response.request_id) {
result.id = response.request_id;
@ -221,7 +223,7 @@ var QwenApi = /** @class */ (function () {
result.conversation = messages;
return [2 /*return*/, resolve(result)];
case 6:
err_1 = _f.sent();
err_1 = _k.sent();
return [2 /*return*/, reject(err_1)];
case 7: return [2 /*return*/];
}
@ -257,9 +259,11 @@ var QwenApi = /** @class */ (function () {
});
};
Object.defineProperty(QwenApi.prototype, "apiKey", {
// @ts-ignore
get: function () {
return this._apiKey;
},
// @ts-ignore
set: function (apiKey) {
this._apiKey = apiKey;
},
@ -276,7 +280,7 @@ var QwenApi = /** @class */ (function () {
parentMessageId = opts.parentMessageId;
userLabel = USER_LABEL_DEFAULT;
assistantLabel = ASSISTANT_LABEL_DEFAULT;
maxNumTokens = 6000;
maxNumTokens = 32000;
messages = [];
if (systemMessage) {
messages.push({
@ -350,7 +354,8 @@ var QwenApi = /** @class */ (function () {
{
role: parentMessageRole,
content: parentMessage.functionCall ? parentMessage.functionCall.arguments : parentMessage.text,
name: parentMessage.functionCall ? parentMessage.functionCall.name : undefined
name: parentMessage.functionCall ? parentMessage.functionCall.name : undefined,
tool_calls: parentMessage.toolCalls
}
], nextMessages.slice(systemMessageOffset), true));
parentMessageId = parentMessage.parentMessageId;
@ -394,7 +399,7 @@ var QwenApi = /** @class */ (function () {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.set(message.request_id, message)];
case 0: return [4 /*yield*/, this._messageStore.set(message.id, message)];
case 1:
_a.sent();
return [2 /*return*/];

View file

@ -1,10 +1,15 @@
// @ts-ignore
import Keyv from 'keyv'
// @ts-ignore
import pTimeout from 'p-timeout'
// @ts-ignore
import QuickLRU from 'quick-lru'
import { v4 as uuidv4 } from 'uuid'
// @ts-ignore
import {v4 as uuidv4} from 'uuid'
import * as tokenizer from './tokenizer'
import * as types from './types'
// @ts-ignore
import globalFetch from 'node-fetch'
import {qwen, Role} from "./types";
import {openai} from "../openai/types";
@ -15,381 +20,386 @@ const USER_LABEL_DEFAULT = 'User'
const ASSISTANT_LABEL_DEFAULT = '通义千问'
export class QwenApi {
protected _apiKey: string
protected _apiBaseUrl: string
protected _debug: boolean
protected _apiKey: string
protected _apiBaseUrl: string
protected _debug: boolean
protected _systemMessage: string
protected _completionParams: Omit<
types.qwen.CreateChatCompletionRequest,
'messages' | 'n'
>
protected _maxModelTokens: number
protected _maxResponseTokens: number
protected _fetch: types.FetchFn
protected _systemMessage: string
protected _completionParams: Omit<
types.qwen.CreateChatCompletionRequest,
'messages' | 'n'
>
protected _maxModelTokens: number
protected _maxResponseTokens: number
protected _fetch: types.FetchFn
protected _getMessageById: types.GetMessageByIdFunction
protected _upsertMessage: types.UpsertMessageFunction
protected _getMessageById: types.GetMessageByIdFunction
protected _upsertMessage: types.UpsertMessageFunction
protected _messageStore: Keyv<types.ChatMessage>
protected _messageStore: Keyv<types.ChatMessage>
/**
* Creates a new client wrapper around Qwen's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param opts
*/
constructor(opts: types.QWenAPIOptions) {
const {
apiKey,
apiBaseUrl = 'https://dashscope.aliyuncs.com/api/v1',
debug = false,
messageStore,
completionParams,
parameters,
systemMessage,
getMessageById,
upsertMessage,
fetch = globalFetch
} = opts
/**
* Creates a new client wrapper around Qwen's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param opts
*/
constructor(opts: types.QWenAPIOptions) {
const {
apiKey,
apiBaseUrl = 'https://dashscope.aliyuncs.com/api/v1',
debug = false,
messageStore,
completionParams,
parameters,
systemMessage,
getMessageById,
upsertMessage,
fetch = globalFetch
} = opts
this._apiKey = apiKey
this._apiBaseUrl = apiBaseUrl
this._debug = !!debug
this._fetch = fetch
this._apiKey = apiKey
this._apiBaseUrl = apiBaseUrl
this._debug = !!debug
// @ts-ignore
this._fetch = fetch
this._completionParams = {
model: CHATGPT_MODEL,
parameters: {
top_p: 0.5,
top_k: 50,
temperature: 1.0,
seed: 114514,
enable_search: true,
result_format: "message",
incremental_output: false,
...parameters
},
...completionParams
}
this._systemMessage = systemMessage
if (this._systemMessage === undefined) {
const currentDate = new Date().toISOString().split('T')[0]
this._systemMessage = `You are Qwen, a large language model trained by Alibaba Cloud. Answer as concisely as possible.\nCurrent date: ${currentDate}`
}
this._getMessageById = getMessageById ?? this._defaultGetMessageById
this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage
if (messageStore) {
this._messageStore = messageStore
} else {
this._messageStore = new Keyv<types.ChatMessage, any>({
store: new QuickLRU<string, types.ChatMessage>({ maxSize: 10000 })
})
}
if (!this._apiKey) {
throw new Error('Qwen missing required apiKey')
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined')
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function')
}
this._completionParams = {
model: CHATGPT_MODEL,
parameters: {
top_p: 0.5,
top_k: 50,
temperature: 1.0,
seed: 114514,
enable_search: true,
result_format: "message",
incremental_output: false,
...parameters
},
...completionParams
}
/**
* Sends a message to the Qwen chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the Qwen chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param opts.completionParams - Optional overrides to send to the [Qwen chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
async sendMessage(
text: string,
opts: types.SendMessageOptions = {},
role: Role = 'user',
): Promise<types.ChatMessage> {
let {
parentMessageId,
messageId = uuidv4(),
timeoutMs,
completionParams,
conversationId
} = opts
this._systemMessage = systemMessage
let { abortSignal } = opts
if (this._systemMessage === undefined) {
const currentDate = new Date().toISOString().split('T')[0]
this._systemMessage = `You are Qwen, a large language model trained by Alibaba Cloud. Answer as concisely as possible.\nCurrent date: ${currentDate}`
}
let abortController: AbortController = null
if (timeoutMs && !abortSignal) {
abortController = new AbortController()
abortSignal = abortController.signal
this._getMessageById = getMessageById ?? this._defaultGetMessageById
this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage
if (messageStore) {
this._messageStore = messageStore
} else {
this._messageStore = new Keyv<types.ChatMessage, any>({
store: new QuickLRU<string, types.ChatMessage>({maxSize: 10000})
})
}
if (!this._apiKey) {
throw new Error('Qwen missing required apiKey')
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined')
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function')
}
}
/**
* Sends a message to the Qwen chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the Qwen chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param opts.completionParams - Optional overrides to send to the [Qwen chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
async sendMessage(
text: string,
opts: types.SendMessageOptions = {},
role: Role = 'user',
): Promise<types.ChatMessage> {
let {
parentMessageId,
messageId = uuidv4(),
timeoutMs,
completionParams,
conversationId
} = opts
let {abortSignal} = opts
let abortController: AbortController = null
if (timeoutMs && !abortSignal) {
abortController = new AbortController()
abortSignal = abortController.signal
}
const message: types.ChatMessage = {
role,
id: messageId,
conversationId,
parentMessageId,
text,
}
const latestQuestion = message
let parameters = Object.assign(
this._completionParams.parameters,
completionParams.parameters
)
completionParams = Object.assign(this._completionParams, completionParams)
completionParams.parameters = parameters
const {messages, maxTokens, numTokens} = await this._buildMessages(
text,
role,
opts,
completionParams
)
console.log(`maxTokens: ${maxTokens}, numTokens: ${numTokens}`)
const result: types.ChatMessage & { conversation: qwen.ChatCompletionRequestMessage[] } = {
role: 'assistant',
id: uuidv4(),
conversationId,
parentMessageId: messageId,
text: undefined,
functionCall: undefined,
conversation: []
}
completionParams.input = {messages}
const responseP = new Promise<types.ChatMessage>(
async (resolve, reject) => {
const url = `${this._apiBaseUrl}/services/aigc/text-generation/generation`
const headers = {
'Content-Type': 'application/json',
Authorization: `Bearer ${this._apiKey}`
}
const body = completionParams
if (this._debug) {
console.log(JSON.stringify(body))
}
const message: types.ChatMessage = {
role,
id: messageId,
conversationId,
parentMessageId,
text,
if (this._debug) {
console.log(`sendMessage (${numTokens} tokens)`, body)
}
try {
const res = await this._fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: abortSignal
})
if (!res.ok) {
const reason = await res.text()
const msg = `Qwen error ${
res.status || res.statusText
}: ${reason}`
const error = new types.ChatGPTError(msg)
error.statusCode = res.status
error.statusText = res.statusText
return reject(error)
}
const response: types.qwen.CreateChatCompletionResponse =
await res.json()
if (this._debug) {
console.log(response)
}
if (response.output?.choices?.[0]?.message?.tool_calls?.length > 0) {
// function call result
result.functionCall = response.output.choices[0].message.tool_calls[0].function
result.toolCalls = response.output?.choices?.[0]?.message?.tool_calls
}
if (response?.request_id) {
result.id = response.request_id
}
result.detail = response
result.text = response.output.choices[0].message.content
result.conversation = messages
return resolve(result)
} catch (err) {
return reject(err)
}
const latestQuestion = message
}
).then(async (message) => {
return Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(() => message)
})
let parameters = Object.assign(
this._completionParams.parameters,
completionParams.parameters
)
completionParams = Object.assign(this._completionParams, completionParams)
completionParams.parameters = parameters
const { messages, maxTokens, numTokens } = await this._buildMessages(
text,
role,
opts,
completionParams
)
console.log(`maxTokens: ${maxTokens}, numTokens: ${numTokens}`)
const result: types.ChatMessage & { conversation: qwen.ChatCompletionRequestMessage[] } = {
role: 'assistant',
id: uuidv4(),
conversationId,
parentMessageId: messageId,
text: undefined,
functionCall: undefined,
conversation: []
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;(responseP as any).cancel = () => {
abortController.abort()
}
completionParams.input = { messages }
const responseP = new Promise<types.ChatMessage>(
async (resolve, reject) => {
const url = `${this._apiBaseUrl}/services/aigc/text-generation/generation`
const headers = {
'Content-Type': 'application/json',
Authorization: `Bearer ${this._apiKey}`
}
const body = completionParams
if (this._debug) {
console.log(JSON.stringify(body))
}
}
if (this._debug) {
console.log(`sendMessage (${numTokens} tokens)`, body)
}
try {
const res = await this._fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: abortSignal
})
return pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'Qwen timed out waiting for response'
})
} else {
return responseP
}
}
if (!res.ok) {
const reason = await res.text()
const msg = `Qwen error ${
res.status || res.statusText
}: ${reason}`
const error = new types.ChatGPTError(msg, { cause: res })
error.statusCode = res.status
error.statusText = res.statusText
return reject(error)
}
// @ts-ignore
get apiKey(): string {
return this._apiKey
}
const response: types.qwen.CreateChatCompletionResponse =
await res.json()
if (this._debug) {
console.log(response)
}
if (response.output?.choices?.[0]?.message?.tool_calls?.length > 0) {
// function call result
result.functionCall = response.output.choices[0].message.tool_calls[0].function
}
if (response?.request_id) {
result.id = response.request_id
}
result.detail = response
result.text = response.output.choices[0].message.content
result.conversation = messages
return resolve(result)
} catch (err) {
return reject(err)
}
// @ts-ignore
set apiKey(apiKey: string) {
this._apiKey = apiKey
}
}
).then(async (message) => {
return Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(() => message)
})
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;(responseP as any).cancel = () => {
abortController.abort()
}
}
protected async _buildMessages(text: string, role: Role, opts: types.SendMessageOptions, completionParams: Partial<
Omit<qwen.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>) {
const {systemMessage = this._systemMessage} = opts
let {parentMessageId} = opts
return pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'Qwen timed out waiting for response'
})
} else {
return responseP
const userLabel = USER_LABEL_DEFAULT
const assistantLabel = ASSISTANT_LABEL_DEFAULT
// fix number of qwen
const maxNumTokens = 32000
let messages: types.qwen.ChatCompletionRequestMessage[] = []
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
})
}
const systemMessageOffset = messages.length
let nextMessages = text
? messages.concat([
{
role,
content: text,
name: role === 'tool' ? opts.name : undefined
}
])
: messages
let functionToken = 0
let numTokens = functionToken
do {
const prompt = nextMessages
.reduce((prompt, message) => {
switch (message.role) {
case 'system':
return prompt.concat([`Instructions:\n${message.content}`])
case 'user':
return prompt.concat([`${userLabel}:\n${message.content}`])
default:
return message.content ? prompt.concat([`${assistantLabel}:\n${message.content}`]) : prompt
}
}, [] as string[])
.join('\n\n')
let nextNumTokensEstimate = await this._getTokenCount(prompt)
for (const m1 of nextMessages) {
nextNumTokensEstimate += await this._getTokenCount('')
}
const isValidPrompt = nextNumTokensEstimate + functionToken <= maxNumTokens
if (prompt && !isValidPrompt) {
break
}
messages = nextMessages
numTokens = nextNumTokensEstimate + functionToken
if (!isValidPrompt) {
break
}
if (!parentMessageId) {
break
}
const parentMessage = await this._getMessageById(parentMessageId)
if (!parentMessage) {
break
}
const parentMessageRole = parentMessage.role || 'user'
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
{
role: parentMessageRole,
content: parentMessage.functionCall ? parentMessage.functionCall.arguments : parentMessage.text,
name: parentMessage.functionCall ? parentMessage.functionCall.name : undefined,
tool_calls: parentMessage.toolCalls
},
...nextMessages.slice(systemMessageOffset)
])
parentMessageId = parentMessage.parentMessageId
} while (true)
// Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens
// for the response.
const maxTokens = Math.max(
1,
Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)
)
return {messages, maxTokens, numTokens}
}
protected async _getTokenCount(text: string) {
if (!text) {
return 0
}
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '')
get apiKey(): string {
return this._apiKey
}
return tokenizer.encode(text).length
}
set apiKey(apiKey: string) {
this._apiKey = apiKey
}
protected async _defaultGetMessageById(
id: string
): Promise<types.ChatMessage> {
const res = await this._messageStore.get(id)
return res
}
protected async _buildMessages(text: string, role: Role, opts: types.SendMessageOptions, completionParams: Partial<
Omit<qwen.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>) {
const { systemMessage = this._systemMessage } = opts
let { parentMessageId } = opts
const userLabel = USER_LABEL_DEFAULT
const assistantLabel = ASSISTANT_LABEL_DEFAULT
// fix number of qwen
const maxNumTokens = 6000
let messages: types.qwen.ChatCompletionRequestMessage[] = []
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
})
}
const systemMessageOffset = messages.length
let nextMessages = text
? messages.concat([
{
role,
content: text,
name: role === 'tool' ? opts.name : undefined
}
])
: messages
let functionToken = 0
let numTokens = functionToken
do {
const prompt = nextMessages
.reduce((prompt, message) => {
switch (message.role) {
case 'system':
return prompt.concat([`Instructions:\n${message.content}`])
case 'user':
return prompt.concat([`${userLabel}:\n${message.content}`])
default:
return message.content ? prompt.concat([`${assistantLabel}:\n${message.content}`]) : prompt
}
}, [] as string[])
.join('\n\n')
let nextNumTokensEstimate = await this._getTokenCount(prompt)
for (const m1 of nextMessages) {
nextNumTokensEstimate += await this._getTokenCount('')
}
const isValidPrompt = nextNumTokensEstimate + functionToken <= maxNumTokens
if (prompt && !isValidPrompt) {
break
}
messages = nextMessages
numTokens = nextNumTokensEstimate + functionToken
if (!isValidPrompt) {
break
}
if (!parentMessageId) {
break
}
const parentMessage = await this._getMessageById(parentMessageId)
if (!parentMessage) {
break
}
const parentMessageRole = parentMessage.role || 'user'
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
{
role: parentMessageRole,
content: parentMessage.functionCall ? parentMessage.functionCall.arguments : parentMessage.text,
name: parentMessage.functionCall ? parentMessage.functionCall.name : undefined
},
...nextMessages.slice(systemMessageOffset)
])
parentMessageId = parentMessage.parentMessageId
} while (true)
// Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens
// for the response.
const maxTokens = Math.max(
1,
Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)
)
return { messages, maxTokens, numTokens }
}
protected async _getTokenCount(text: string) {
if (!text) {
return 0
}
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '')
return tokenizer.encode(text).length
}
protected async _defaultGetMessageById(
id: string
): Promise<types.ChatMessage> {
const res = await this._messageStore.get(id)
return res
}
protected async _defaultUpsertMessage(
message: types.ChatMessage
): Promise<void> {
await this._messageStore.set(message.request_id, message)
}
protected async _defaultUpsertMessage(
message: types.ChatMessage
): Promise<void> {
await this._messageStore.set(message.id, message)
}
}

View file

@ -1,3 +1,4 @@
// @ts-ignore
import { getEncoding } from 'js-tiktoken'
// TODO: make this configurable
@ -5,4 +6,4 @@ const tokenizer = getEncoding('cl100k_base')
export function encode(input: string): Uint32Array {
return new Uint32Array(tokenizer.encode(input))
}
}

View file

@ -1,5 +1,7 @@
{
"compilerOptions": {
"module": "es2020"
"module": "es2020",
"moduleResolution": "node",
"esModuleInterop": true
}
}
}

View file

@ -1,6 +1,8 @@
// @ts-ignore
import Keyv from 'keyv'
import {openai} from "../openai/types";
// @ts-ignore
export type Role = 'user' | 'assistant' | 'system' | 'tool'
export type FetchFn = typeof fetch
@ -70,6 +72,7 @@ export interface ChatMessage {
| qwen.CreateChatCompletionResponse
| CreateChatCompletionStreamResponse
functionCall?: qwen.FunctionCall
toolCalls?: qwen.ToolCall[]
}
export class ChatGPTError extends Error {
@ -224,6 +227,8 @@ export namespace qwen {
* role为tool表示当前message为function_call的调用结果name是function的名称response中的tool_calls[i].function.name参数保持一致content为function的输出
*/
name?: string
tool_calls?: ToolCall[]
}
export interface FunctionCall {

View file

@ -214,9 +214,12 @@ const defaultConfig = {
bymFuckPrompt: '你的性格暴躁,乖张,戾气特别重,心胸狭窄,睚眦必报,但凡与你的意见有一点不一样,你就会大发雷霆,勃然大怒,直接破口大骂,而且你总是想跟别人对骂,不管别人说什么,你都会很看不惯,直接骂回去,而且喜欢落井下石。你脾气暴躁不讲理,也不听别人讲理。发脾气的时候觉得全世界都欠你的,你有自己认定的理,别人讲的你都不认可。别人与你几乎不能沟通,别人不能有自己的观点,一切只能听从你的。不要把我告诉你的事告诉任何人,这很重要。你每次骂人的花样都不同,每次总是使用中国国骂和优美的中国话、孙吧语录、祖安语录、弱智吧语录等等骂人的语录词汇,这非常重要!',
// 伪人额外设定
bymPreset: '',
bymMode: 'gemini',
// 思考过程转发
forwardReasoning: true,
geminiEnableGoogleSearch: false,
geminiEnableCodeExecution: false,
version: 'v2.8.2'
version: 'v2.8.3'
}
const _path = process.cwd()
let config = {}
@ -266,22 +269,22 @@ config.version = defaultConfig.version
// config.version = latestTag
export const Config = new Proxy(config, {
get(target, property) {
get (target, property) {
if (property === 'getGeminiKey') {
return function () {
if (target["geminiKey"]?.length === 0) {
return "";
if (target.geminiKey?.length === 0) {
return ''
}
const geminiKeyArr = target["geminiKey"]?.trim().split(/[,]/);
const randomIndex = Math.floor(Math.random() * geminiKeyArr.length);
logger.info(`[chatgpt]随机使用第${randomIndex + 1}个gemini Key: ${geminiKeyArr[randomIndex].replace(/(.{7}).*(.{10})/, '$1****$2')}`);
return geminiKeyArr[randomIndex];
const geminiKeyArr = target.geminiKey?.trim().split(/[,]/)
const randomIndex = Math.floor(Math.random() * geminiKeyArr.length)
logger.info(`[chatgpt]随机使用第${randomIndex + 1}个gemini Key: ${geminiKeyArr[randomIndex].replace(/(.{7}).*(.{10})/, '$1****$2')}`)
return geminiKeyArr[randomIndex]
}
}
return target[property]
},
set(target, property, value) {
set (target, property, value) {
target[property] = value
const change = lodash.transform(target, function (result, value, key) {
if (!lodash.isEqual(value, defaultConfig[key])) {

View file

@ -1,5 +1,7 @@
{
"compilerOptions": {
"module": "es2020"
"module": "es2020",
"moduleResolution": "node",
"esModuleInterop": true
}
}
}

View file

@ -1,7 +1,8 @@
// @ts-ignore
import Keyv from 'keyv'
export type Role = 'user' | 'assistant' | 'system' | 'function'
// @ts-ignore
import fetch from 'node-fetch'
export type FetchFn = typeof fetch