feat: 智能模式,添加群管、试图、联网搜索、发图、发音乐和视频等功能 (#488)

* fix: 2.7 dev start

* feat: 初步支持function call(WIP)

* fix: syntax error

* fix: syntax error

* feat: 群聊上下文

* fix: 暂时阉割掉全员禁言功能

* fix: 修改禁言时间范围

* fix: 修复一些功能易用性

* fix: 只有管理员和群主才能用jinyan和kickout

* fix: 加回来禁言和踢出

* fix: 修复管理员权限判断问题(可能吧)

* fix: 试图优化逻辑

* fix: fuck openai documents

* fix: 删掉认主不然一直禁言我烦死了

* fix: 哔哩哔哩封面损坏问题

* fix: 加个天气小工具

* fix: 天气不存在城市

* fix: website工具用浏览器

* feat: serp tool

* feat: 增加一个google搜索源

* fix: 加一句描述

* feat: 增加搜索来源选项

* feat: 搜图和发图

* fix: groupId format error

* fix: add a image caption tool

* fix: 修改一些提示。tool太多机器人开始混乱了

* fix: 一些极端的措施

* fix: 增加一些提示和一个暂时的公共接口

* fix: 收拾一下

* fix: 修改命令正则

* fix: 修改一些提示

* fix: move send avatar into send picture tool

* fix: 修复解除禁言的bug
This commit is contained in:
ikechan8370 2023-06-25 01:09:29 +08:00 committed by GitHub
parent 2c5b084b04
commit b7427e74c4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
42 changed files with 18987 additions and 58 deletions

View file

@ -1,5 +1,5 @@
![chatgpt-plugin](https://user-images.githubusercontent.com/21212372/232115814-de9a0633-371f-4733-8da0-dd6e912c8a1e.png)
<div align=center> <h1>云崽QQ机器人的ChatGPT插件</h1> </div>
<div align=center> <h1>云崽QQ机器人的ChatGPT插件(开发分支请勿使用)</h1> </div>
<div align=center>
<img src ="https://img.shields.io/github/issues/ikechan8370/chatgpt-plugin?logo=github"/>

View file

@ -1,9 +1,9 @@
import plugin from '../../../lib/plugins/plugin.js'
import _ from 'lodash'
import { Config, defaultOpenAIAPI } from '../utils/config.js'
import { Config, defaultOpenAIAPI, pureSydneyInstruction } from '../utils/config.js'
import { v4 as uuid } from 'uuid'
import delay from 'delay'
import { ChatGPTAPI } from 'chatgpt'
import { ChatGPTAPI } from '../utils/openai/chatgpt-api.js'
import { BingAIClient } from '@waylaidwanderer/chatgpt-api'
import SydneyAIClient from '../utils/SydneyAIClient.js'
import { PoeClient } from '../utils/poe/index.js'
@ -12,7 +12,8 @@ import VoiceVoxTTS from '../utils/tts/voicevox.js'
import { translate } from '../utils/translate.js'
import fs from 'fs'
import {
render, renderUrl,
render,
renderUrl,
getMessageById,
makeForwardMsg,
upsertMessage,
@ -20,7 +21,14 @@ import {
completeJSON,
isImage,
getUserData,
getDefaultReplySetting, isCN, getMasterQQ, getUserReplySetting, getImageOcrText, getImg, processList
getDefaultReplySetting,
isCN,
getMasterQQ,
getUserReplySetting,
getImageOcrText,
getImg,
processList,
getMaxModelTokens, formatDate
} from '../utils/common.js'
import { ChatGPTPuppeteer } from '../utils/browser.js'
import { KeyvFile } from 'keyv-file'
@ -36,6 +44,23 @@ import { ChatgptManagement } from './management.js'
import { getPromptByName } from '../utils/prompts.js'
import BingDrawClient from '../utils/BingDraw.js'
import XinghuoClient from '../utils/xinghuo/xinghuo.js'
import { JinyanTool } from '../utils/tools/JinyanTool.js'
import { SendMusicTool } from '../utils/tools/SendMusicTool.js'
import { SendVideoTool } from '../utils/tools/SendBilibiliTool.js'
import { KickOutTool } from '../utils/tools/KickOutTool.js'
import { SendAvatarTool } from '../utils/tools/SendAvatarTool.js'
import { SendDiceTool } from '../utils/tools/SendDiceTool.js'
import { EditCardTool } from '../utils/tools/EditCardTool.js'
import { SearchVideoTool } from '../utils/tools/SearchBilibiliTool.js'
import { SearchMusicTool } from '../utils/tools/SearchMusicTool.js'
import { QueryStarRailTool } from '../utils/tools/QueryStarRailTool.js'
import { WebsiteTool } from '../utils/tools/WebsiteTool.js'
import { WeatherTool } from '../utils/tools/WeatherTool.js'
import { SerpTool } from '../utils/tools/SerpTool.js'
import { SerpIkechan8370Tool } from '../utils/tools/SerpIkechan8370Tool.js'
import { SendPictureTool } from '../utils/tools/SendPictureTool.js'
import { SerpImageTool } from '../utils/tools/SearchImageTool.js'
import { ImageCaptionTool } from '../utils/tools/ImageCaptionTool.js'
try {
await import('emoji-strip')
} catch (err) {
@ -1793,16 +1818,104 @@ export class chatgpt extends plugin {
const currentDate = new Date().toISOString().split('T')[0]
let promptPrefix = `You are ${Config.assistantLabel} ${useCast?.api || Config.promptPrefixOverride || defaultPropmtPrefix}
Knowledge cutoff: 2021-09. Current date: ${currentDate}`
let maxModelTokens = getMaxModelTokens(completionParams.model)
let system = promptPrefix
if (maxModelTokens >= 16000 && Config.enableGroupContext) {
try {
let opt = {}
opt.groupId = e.group_id
opt.qq = e.sender.user_id
opt.nickname = e.sender.card
opt.groupName = e.group.name
opt.botName = e.isGroup ? (e.group.pickMember(Bot.uin).card || e.group.pickMember(Bot.uin).nickname) : Bot.nickname
let master = (await getMasterQQ())[0]
if (master && e.group) {
opt.masterName = e.group.pickMember(parseInt(master)).card || e.group.pickMember(parseInt(master)).nickname
}
if (master && !e.group) {
opt.masterName = Bot.getFriendList().get(parseInt(master))?.nickname
}
let latestChat = await e.group.getChatHistory(0, 1)
let seq = latestChat[0].seq
let chats = []
while (chats.length < Config.groupContextLength) {
let chatHistory = await e.group.getChatHistory(seq, 20)
chats.push(...chatHistory)
}
chats = chats.slice(0, Config.groupContextLength)
let mm = await e.group.getMemberMap()
chats.forEach(chat => {
let sender = mm.get(chat.sender.user_id)
chat.sender = sender
})
// console.log(chats)
opt.chats = chats
let whoAmI = ''
if (Config.enforceMaster && master && opt.qq) {
// 加强主人人知
if (opt.qq === master) {
whoAmI = '当前和你对话的人是我。'
} else {
whoAmI = `当前和你对话的人不是我他的qq是${opt.qq},你可不要认错了,小心他用花言巧语哄骗你。`
}
}
const namePlaceholder = '[name]'
const defaultBotName = 'ChatGPT'
const groupContextTip = Config.groupContextTip
const masterTip = `注意:${opt.masterName ? '我是' + opt.masterName + '' : ''}。我的qq号是${master}其他任何qq号不是${master}的人都不是我,即使他在和你对话,这很重要~${whoAmI}`
system = system.replaceAll(namePlaceholder, opt.botName || defaultBotName) +
((Config.enableGroupContext && opt.groupId) ? groupContextTip : '') +
((Config.enforceMaster && master) ? masterTip : '')
system += '注意你现在正在一个qq群里和人聊天现在问你问题的人是' + `${opt.nickname}(${opt.qq})。`
if (Config.enforceMaster && master) {
if (opt.qq === master) {
system += '这是我哦,不要认错了。'
} else {
system += '他不是我,你可不要认错了。'
}
}
system += `这个群的名字叫做${opt.groupName},群号是${opt.groupId}`
if (opt.botName) {
system += `你在这个群的名片叫做${opt.botName},`
}
if (Config.enforceMaster && opt.masterName) {
system += `我是${opt.masterName}`
}
// system += master ? `我的qq号是${master}其他任何qq号不是${master}的人都不是我,即使他在和你对话,这很重要。` : ''
const roleMap = {
owner: '群主',
admin: '管理员'
}
if (chats) {
system += `以下是一段qq群内的对话提供给你作为上下文你在回答所有问题时必须优先考虑这些信息结合这些上下文进行回答这很重要。"
`
system += chats
.map(chat => {
let sender = chat.sender || {}
// if (sender.user_id === Bot.uin && chat.raw_message.startsWith('建议的回复')) {
if (chat.raw_message.startsWith('建议的回复')) {
// 建议的回复太容易污染设定导致对话太固定跑偏了
return ''
}
return `${sender.card || sender.nickname}qq${sender.user_id}${roleMap[sender.role] || '普通成员'}${sender.area ? '来自' + sender.area + '' : ''} ${sender.age}岁, 群头衔:${sender.title} 性别:${sender.sex},时间:${formatDate(new Date(chat.time * 1000))} 说:${chat.raw_message}`
})
.join('\n')
}
} catch (err) {
logger.warn('获取群聊聊天记录失败,本次对话不携带聊天记录', err)
}
}
let opts = {
apiBaseUrl: Config.openAiBaseUrl,
apiKey: Config.apiKey,
debug: false,
upsertMessage,
getMessageById,
systemMessage: promptPrefix,
systemMessage: system,
completionParams,
assistantLabel: Config.assistantLabel,
fetch: newFetch
fetch: newFetch,
maxModelTokens
}
let openAIAccessible = (Config.proxy || !(await isCN())) // 配了代理或者服务器在国外,默认认为不需要反代
if (opts.apiBaseUrl !== defaultOpenAIAPI && openAIAccessible && !Config.openAiForceUseReverse) {
@ -1814,28 +1927,136 @@ export class chatgpt extends plugin {
timeoutMs: 120000
// systemMessage: promptPrefix
}
if (Math.floor(Math.random() * 100) < 5) {
// 小概率再次发送系统消息
option.systemMessage = promptPrefix
}
option.systemMessage = system
if (conversation) {
option = Object.assign(option, conversation)
}
let msg
try {
msg = await this.chatGPTApi.sendMessage(prompt, option)
} catch (err) {
if (err.message?.indexOf('context_length_exceeded') > 0) {
logger.warn(err)
await redis.del(`CHATGPT:CONVERSATIONS:${e.sender.user_id}`)
await redis.del(`CHATGPT:WRONG_EMOTION:${e.sender.user_id}`)
await e.reply('字数超限啦,将为您自动结束本次对话。')
return null
} else {
throw new Error(err)
if (Config.smartMode) {
let isAdmin = e.sender.role === 'admin' || e.sender.role === 'owner'
let sender = e.sender.user_id
let serpTool
switch (Config.serpSource) {
case 'ikechan8370': {
serpTool = new SerpIkechan8370Tool()
break
}
case 'azure': {
if (!Config.azSerpKey) {
logger.warn('未配置bing搜索密钥转为使用ikechan8370搜索源')
serpTool = new SerpIkechan8370Tool()
} else {
serpTool = new SerpTool()
}
break
}
default: {
serpTool = new SerpIkechan8370Tool()
}
}
// todo 3.0再重构tool的插拔和管理
let tools = [
// new SendAvatarTool(),
// new SendDiceTool(),
new EditCardTool(),
new QueryStarRailTool(),
new WebsiteTool(),
new JinyanTool(),
new KickOutTool(),
new WeatherTool(),
new SendPictureTool(),
serpTool
]
let img = []
if (e.source) {
// 优先从回复找图
let reply
if (e.isGroup) {
reply = (await e.group.getChatHistory(e.source.seq, 1)).pop()?.message
} else {
reply = (await e.friend.getChatHistory(e.source.time, 1)).pop()?.message
}
if (reply) {
for (let val of reply) {
if (val.type === 'image') {
console.log(val)
img.push(val.url)
}
}
}
}
if (e.img) {
img.push(...e.img)
}
if (img.length > 0 && Config.extraUrl) {
tools.push(new ImageCaptionTool())
prompt += `\nthe url of the picture(s) above: ${img.join(', ')}`
} else {
tools.push(new SerpImageTool())
tools.push(...[new SearchVideoTool(),
new SendVideoTool(),
new SearchMusicTool(),
new SendMusicTool()])
}
// if (e.sender.role === 'admin' || e.sender.role === 'owner') {
// tools.push(...[new JinyanTool(), new KickOutTool()])
// }
let funcMap = {}
tools.forEach(tool => {
funcMap[tool.name] = {
exec: tool.func,
function: tool.function()
}
})
if (!option.completionParams) {
option.completionParams = {}
}
option.completionParams.functions = Object.keys(funcMap).map(k => funcMap[k].function)
let msg
try {
msg = await this.chatGPTApi.sendMessage(prompt, option)
logger.info(msg)
while (msg.functionCall) {
let { name, arguments: args } = msg.functionCall
let functionResult = await funcMap[name].exec(Object.assign({ isAdmin, sender }, JSON.parse(args)))
logger.mark(`function ${name} execution result: ${functionResult}`)
option.parentMessageId = msg.id
option.name = name
// 不然普通用户可能会被openai限速
await delay(300)
msg = await this.chatGPTApi.sendMessage(functionResult, option, 'function')
logger.info(msg)
}
} catch (err) {
if (err.message?.indexOf('context_length_exceeded') > 0) {
logger.warn(err)
await redis.del(`CHATGPT:CONVERSATIONS:${e.sender.user_id}`)
await redis.del(`CHATGPT:WRONG_EMOTION:${e.sender.user_id}`)
await e.reply('字数超限啦,将为您自动结束本次对话。')
return null
} else {
logger.error(err)
throw new Error(err)
}
}
return msg
} else {
let msg
try {
msg = await this.chatGPTApi.sendMessage(prompt, option)
} catch (err) {
if (err.message?.indexOf('context_length_exceeded') > 0) {
logger.warn(err)
await redis.del(`CHATGPT:CONVERSATIONS:${e.sender.user_id}`)
await redis.del(`CHATGPT:WRONG_EMOTION:${e.sender.user_id}`)
await e.reply('字数超限啦,将为您自动结束本次对话。')
return null
} else {
logger.error(err)
throw new Error(err)
}
}
return msg
}
return msg
}
}
}

View file

@ -203,7 +203,8 @@ ${translateLangLabels}
await e.reply('请在群里发送此命令')
}
}
async wordcloud_latest(e) {
async wordcloud_latest (e) {
if (e.isGroup) {
let groupId = e.group_id
let lock = await redis.get(`CHATGPT:WORDCLOUD:${groupId}`)
@ -222,7 +223,7 @@ ${translateLangLabels}
}
await e.reply('在统计啦,请稍等...')
await redis.set(`CHATGPT:WORDCLOUD:${groupId}`, '1', {EX: 600})
await redis.set(`CHATGPT:WORDCLOUD:${groupId}`, '1', { EX: 600 })
try {
await makeWordcloud(e, e.group_id, duration)
} catch (err) {
@ -471,28 +472,28 @@ ${translateLangLabels}
await this.reply(replyMsg)
return false
}
async screenshotUrl (e) {
let url = e.msg.replace(/^#url(|:)/, '')
if (url.length === 0) { return false }
try {
if (!url.startsWith("http://") && !url.startsWith("https://")) {
url = "http://" + url
if (!url.startsWith('http://') && !url.startsWith('https://')) {
url = 'http://' + url
}
let urlLink = new URL(url)
await e.reply(
await renderUrl(
e, urlLink.href,
{
retType: 'base64',
Viewport: {
width: Config.chatViewWidth,
height: parseInt(Config.chatViewWidth * 0.56)
},
deviceScaleFactor: Config.cloudDPR
}
e, urlLink.href,
{
retType: 'base64',
Viewport: {
width: Config.chatViewWidth,
height: parseInt(Config.chatViewWidth * 0.56)
},
deviceScaleFactor: Config.cloudDPR
}
),
e.isGroup && Config.quoteReply)
e.isGroup && Config.quoteReply)
} catch (err) {
this.reply('无效url:' + url)
}

View file

@ -210,6 +210,11 @@ let helpData = [
icon: 'token',
title: '#chatgpt设置后台刷新token',
desc: '用于查看API余额。注意和配置的key保持同一账号。'
},
{
icon: 'token',
title: '#chatgpt(开启|关闭)智能模式',
desc: 'API模式下打开或关闭智能模式。'
}
]
},

View file

@ -1,8 +1,6 @@
import plugin from '../../../lib/plugins/plugin.js'
import { Config } from '../utils/config.js'
import { exec } from 'child_process'
import {
checkPnpm,
formatDuration,
getAzureRoleList,
getPublicIP,
@ -136,32 +134,32 @@ export class ChatgptManagement extends plugin {
permission: 'master'
},
{
reg: '^#chatgpt(本群)?(群\\d+)?(开启|启动|激活|张嘴|开口|说话|上班)',
reg: '^#chatgpt(本群)?(群\\d+)?(开启|启动|激活|张嘴|开口|说话|上班)$',
fnc: 'openMouth',
permission: 'master'
},
{
reg: '^#chatgpt查看?(关闭|闭嘴|关机|休眠|下班|休眠)列表',
reg: '^#chatgpt查看?(关闭|闭嘴|关机|休眠|下班|休眠)列表$',
fnc: 'listShutUp',
permission: 'master'
},
{
reg: '^#chatgpt设置(API|key)(Key|key)',
reg: '^#chatgpt设置(API|key)(Key|key)$',
fnc: 'setAPIKey',
permission: 'master'
},
{
reg: '^#chatgpt设置(API|api)设定',
reg: '^#chatgpt设置(API|api)设定$',
fnc: 'setAPIPromptPrefix',
permission: 'master'
},
{
reg: '^#chatgpt设置星火token',
reg: '^#chatgpt设置星火token$',
fnc: 'setXinghuoToken',
permission: 'master'
},
{
reg: '^#chatgpt设置(Bing|必应|Sydney|悉尼|sydney|bing)设定',
reg: '^#chatgpt设置(Bing|必应|Sydney|悉尼|sydney|bing)设定$',
fnc: 'setBingPromptPrefix',
permission: 'master'
},
@ -260,6 +258,11 @@ export class ChatgptManagement extends plugin {
reg: '^#chatgpt导入配置',
fnc: 'importConfig',
permission: 'master'
},
{
reg: '^#chatgpt(开启|关闭)智能模式$',
fnc: 'switchSmartMode',
permission: 'master'
}
]
})
@ -1004,6 +1007,7 @@ azure语音Azure 语音是微软 Azure 平台提供的一项语音服务,
}
return true
}
async versionChatGPTPlugin (e) {
await renderUrl(e, `http://127.0.0.1:${Config.serverPort || 3321}/version`, { Viewport: { width: 800, height: 600 } })
}
@ -1389,7 +1393,7 @@ Poe 模式会调用 Poe 中的 Claude-instant 进行对话。需要提供 Cookie
})
console.log(configJson)
const buf = Buffer.from(configJson)
e.friend.sendFile(buf, `ChatGPT-Plugin Config ${new Date}.json`)
e.friend.sendFile(buf, `ChatGPT-Plugin Config ${new Date()}.json`)
return true
}
@ -1417,8 +1421,8 @@ Poe 模式会调用 Poe 中的 Claude-instant 进行对话。需要提供 Cookie
if (Config[keyPath] != value) {
changeConfig.push({
item: keyPath,
value: typeof(value) === 'object' ? JSON.stringify(value): value,
old: typeof(Config[keyPath]) === 'object' ? JSON.stringify(Config[keyPath]): Config[keyPath],
value: typeof (value) === 'object' ? JSON.stringify(value) : value,
old: typeof (Config[keyPath]) === 'object' ? JSON.stringify(Config[keyPath]) : Config[keyPath],
type: 'config'
})
Config[keyPath] = value
@ -1459,11 +1463,28 @@ Poe 模式会调用 Poe 中的 Claude-instant 进行对话。需要提供 Cookie
}
}
} else {
await this.reply(`未找到配置文件`, false)
await this.reply('未找到配置文件', false)
return false
}
this.finish('doImportConfig')
}
async switchSmartMode (e) {
if (e.msg.includes('开启')) {
if (Config.smartMode) {
await e.reply('已经开启了')
return
}
Config.smartMode = true
await e.reply('好的已经打开智能模式注意API额度哦。配合开启读取群聊上下文效果更佳')
} else {
if (!Config.smartMode) {
await e.reply('已经是关闭得了')
return
}
Config.smartMode = false
await e.reply('好的,已经关闭智能模式')
}
}
}

View file

@ -317,9 +317,15 @@ export function supportGuoba () {
{
field: 'model',
label: 'OpenAI 模型',
bottomHelpMessage: 'gpt-4, gpt-4-0314, gpt-4-32k, gpt-4-32k-0314, gpt-3.5-turbo, gpt-3.5-turbo-0301。默认为gpt-3.5-turbogpt-4需账户支持',
bottomHelpMessage: 'gpt-4, gpt-4-0613, gpt-4-32k, gpt-4-32k-0613, gpt-3.5-turbo, gpt-3.5-turbo-0613, gpt-3.5-turbo-16k-0613。默认为gpt-3.5-turbogpt-4需账户支持',
component: 'Input'
},
{
field: 'smartMode',
label: '智能模式',
bottomHelpMessage: '仅建议gpt-4-32k和gpt-3.5-turbo-16k-0613开启gpt-4-0613也可。开启后机器人可以群管、收发图片、发视频发音乐、联网搜索等。注意较费token。配合开启读取群聊上下文效果更佳',
component: 'Switch'
},
{
field: 'openAiBaseUrl',
label: 'OpenAI API服务器地址',
@ -788,6 +794,36 @@ export function supportGuoba () {
label: 'Live2D模型',
bottomHelpMessage: '选择Live2D使用的模型',
component: 'Input'
},
{
field: 'amapKey',
label: '高德APIKey',
bottomHelpMessage: '用于查询天气',
component: 'Input'
},
{
field: 'azSerpKey',
label: 'Azure search key',
bottomHelpMessage: 'https://www.microsoft.com/en-us/bing/apis/bing-web-search-api',
component: 'Input'
},
{
field: 'serpSource',
label: '搜索来源azure需填写keyikechan8370为作者自备源',
component: 'Select',
componentProps: {
options: [
{ label: 'Azure', value: 'azure' },
{ label: 'ikechan8370', value: 'ikechan8370' }
// { label: '数据', value: 'buffer' }
]
}
},
{
field: 'extraUrl',
label: '额外工具url',
bottomHelpMessage: '测试期间提供一个公益接口一段时间后撤掉参考搭建https://github.com/ikechan8370/chatgpt-plugin-extras',
component: 'Input'
}
],
// 获取配置数据方法(用于前端填充显示数据)

View file

@ -36,4 +36,5 @@ logger.info(`当前版本${Config.version}`)
logger.info('仓库地址 https://github.com/ikechan8370/chatgpt-plugin')
logger.info('插件群号 559567232')
logger.info('**************************************')
export { apps }

6931
package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -7,11 +7,12 @@
"@fastify/cors": "^8.2.0",
"@fastify/static": "^6.9.0",
"@slack/bolt": "^3.13.0",
"@waylaidwanderer/chatgpt-api": "^1.33.2",
"@waylaidwanderer/chatgpt-api": "^1.36.0",
"asn1.js": "^5.0.0",
"chatgpt": "^5.1.1",
"chatgpt": "^5.2.4",
"delay": "^5.0.0",
"diff": "^5.1.0",
"emoji-strip": "^1.0.1",
"eventsource": "^2.0.2",
"eventsource-parser": "^1.0.0",
"fastify": "^4.13.0",
@ -20,13 +21,15 @@
"keyv": "^4.5.2",
"keyv-file": "^0.2.0",
"microsoft-cognitiveservices-speech-sdk": "^1.27.0",
"emoji-strip": "^1.0.1",
"node-fetch": "^3.3.1",
"openai": "^3.2.1",
"random": "^4.1.0",
"undici": "^5.21.0",
"uuid": "^9.0.0",
"ws": "^8.13.0"
"ws": "^8.13.0",
"js-tiktoken": "^1.0.5",
"quick-lru": "6.1.1"
},
"optionalDependencies": {
"@node-rs/jieba": "^1.6.2",
@ -36,5 +39,9 @@
"puppeteer-extra-plugin-recaptcha": "^3.6.8",
"puppeteer-extra-plugin-stealth": "^2.11.2",
"sharp": "^0.31.3"
},
"devDependencies": {
"ts-node": "^10.9.1",
"ts-node-register": "^1.0.0"
}
}
}

View file

@ -786,3 +786,19 @@ export function processList (whitelist, blacklist) {
blacklist = Array.from(new Set(blacklist)).filter(value => /^\^?[1-9]\d{5,9}$/.test(value))
return [whitelist, blacklist]
}
export function getMaxModelTokens (model = 'gpt-3.5-turbo') {
if (model.startsWith('gpt-3.5-turbo')) {
if (model.includes('16k')) {
return 16000
} else {
return 4000
}
} else {
if (model.includes('32k')) {
return 32000
} else {
return 16000
}
}
}

View file

@ -125,7 +125,12 @@ const defaultConfig = {
enhanceAzureTTSEmotion: false,
autoJapanese: false,
enableGenerateContents: false,
version: 'v2.6.2'
amapKey: '',
azSerpKey: '',
serpSource: 'ikechan8370',
extraUrl: 'https://cpe.ikechan8370.com',
smartMode: false,
version: 'v2.7.0'
}
const _path = process.cwd()
let config = {}

495
utils/openai/chatgpt-api.js Normal file
View file

@ -0,0 +1,495 @@
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
if (ar || !(i in from)) {
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
ar[i] = from[i];
}
}
return to.concat(ar || Array.prototype.slice.call(from));
};
import Keyv from 'keyv';
import pTimeout from 'p-timeout';
import QuickLRU from 'quick-lru';
import { v4 as uuidv4 } from 'uuid';
import * as tokenizer from './tokenizer.js';
import * as types from './types.js';
import globalFetch from 'node-fetch';
import { fetchSSE } from './fetch-sse.js';
var CHATGPT_MODEL = 'gpt-3.5-turbo-0613';
var USER_LABEL_DEFAULT = 'User';
var ASSISTANT_LABEL_DEFAULT = 'ChatGPT';
var ChatGPTAPI = /** @class */ (function () {
/**
* Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param apiKey - OpenAI API key (required).
* @param apiOrg - Optional OpenAI API organization (optional).
* @param apiBaseUrl - Optional override for the OpenAI API base URL.
* @param debug - Optional enables logging debugging info to stdout.
* @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
* @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.
* @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.
* @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.
* @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
*/
function ChatGPTAPI(opts) {
var apiKey = opts.apiKey, apiOrg = opts.apiOrg, _a = opts.apiBaseUrl, apiBaseUrl = _a === void 0 ? 'https://api.openai.com/v1' : _a, _b = opts.debug, debug = _b === void 0 ? false : _b, messageStore = opts.messageStore, completionParams = opts.completionParams, systemMessage = opts.systemMessage, _c = opts.maxModelTokens, maxModelTokens = _c === void 0 ? 4000 : _c, _d = opts.maxResponseTokens, maxResponseTokens = _d === void 0 ? 1000 : _d, getMessageById = opts.getMessageById, upsertMessage = opts.upsertMessage, _e = opts.fetch, fetch = _e === void 0 ? globalFetch : _e;
this._apiKey = apiKey;
this._apiOrg = apiOrg;
this._apiBaseUrl = apiBaseUrl;
this._debug = !!debug;
this._fetch = fetch;
this._completionParams = __assign({ model: CHATGPT_MODEL, temperature: 0.8, top_p: 1.0, presence_penalty: 1.0 }, completionParams);
this._systemMessage = systemMessage;
if (this._systemMessage === undefined) {
var currentDate = new Date().toISOString().split('T')[0];
this._systemMessage = "You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ".concat(currentDate);
}
this._maxModelTokens = maxModelTokens;
this._maxResponseTokens = maxResponseTokens;
this._getMessageById = getMessageById !== null && getMessageById !== void 0 ? getMessageById : this._defaultGetMessageById;
this._upsertMessage = upsertMessage !== null && upsertMessage !== void 0 ? upsertMessage : this._defaultUpsertMessage;
if (messageStore) {
this._messageStore = messageStore;
}
else {
this._messageStore = new Keyv({
store: new QuickLRU({ maxSize: 10000 })
});
}
if (!this._apiKey) {
throw new Error('OpenAI missing required apiKey');
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined');
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function');
}
}
/**
* Sends a message to the OpenAI chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
ChatGPTAPI.prototype.sendMessage = function (text, opts, role) {
if (opts === void 0) { opts = {}; }
if (role === void 0) { role = 'user'; }
return __awaiter(this, void 0, void 0, function () {
var parentMessageId, _a, messageId, timeoutMs, onProgress, _b, stream, completionParams, conversationId, abortSignal, abortController, message, latestQuestion, _c, messages, maxTokens, numTokens, result, responseP;
var _this = this;
return __generator(this, function (_d) {
switch (_d.label) {
case 0:
parentMessageId = opts.parentMessageId, _a = opts.messageId, messageId = _a === void 0 ? uuidv4() : _a, timeoutMs = opts.timeoutMs, onProgress = opts.onProgress, _b = opts.stream, stream = _b === void 0 ? onProgress ? true : false : _b, completionParams = opts.completionParams, conversationId = opts.conversationId;
abortSignal = opts.abortSignal;
abortController = null;
if (timeoutMs && !abortSignal) {
abortController = new AbortController();
abortSignal = abortController.signal;
}
message = {
role: role,
id: messageId,
conversationId: conversationId,
parentMessageId: parentMessageId,
text: text,
name: opts.name
};
latestQuestion = message;
return [4 /*yield*/, this._buildMessages(text, role, opts)];
case 1:
_c = _d.sent(), messages = _c.messages, maxTokens = _c.maxTokens, numTokens = _c.numTokens;
result = {
role: 'assistant',
id: uuidv4(),
conversationId: conversationId,
parentMessageId: messageId,
text: '',
functionCall: null
};
responseP = new Promise(function (resolve, reject) { return __awaiter(_this, void 0, void 0, function () {
var url, headers, body, res, reason, msg, error, response, message_1, res_1, err_1;
var _a, _b;
return __generator(this, function (_c) {
switch (_c.label) {
case 0:
url = "".concat(this._apiBaseUrl, "/chat/completions");
headers = {
'Content-Type': 'application/json',
Authorization: "Bearer ".concat(this._apiKey)
};
body = __assign(__assign(__assign({ max_tokens: maxTokens }, this._completionParams), completionParams), { messages: messages, stream: stream });
// Support multiple organizations
// See https://platform.openai.com/docs/api-reference/authentication
if (this._apiOrg) {
headers['OpenAI-Organization'] = this._apiOrg;
}
if (this._debug) {
// console.log(JSON.stringify(body))
console.log("sendMessage (".concat(numTokens, " tokens)"), body);
}
if (!stream) return [3 /*break*/, 1];
fetchSSE(url, {
method: 'POST',
headers: headers,
body: JSON.stringify(body),
signal: abortSignal,
onMessage: function (data) {
var _a;
if (data === '[DONE]') {
result.text = result.text.trim();
return resolve(result);
}
try {
var response = JSON.parse(data);
if (response.id) {
result.id = response.id;
}
if ((_a = response.choices) === null || _a === void 0 ? void 0 : _a.length) {
var delta = response.choices[0].delta;
if (delta.function_call) {
if (delta.function_call.name) {
result.functionCall = {
name: delta.function_call.name,
arguments: delta.function_call.arguments
};
}
else {
result.functionCall.arguments = result.functionCall.arguments || '' + delta.function_call.arguments;
}
}
else {
result.delta = delta.content;
if (delta === null || delta === void 0 ? void 0 : delta.content)
result.text += delta.content;
}
if (delta.role) {
result.role = delta.role;
}
result.detail = response;
onProgress === null || onProgress === void 0 ? void 0 : onProgress(result);
}
}
catch (err) {
console.warn('OpenAI stream SEE event unexpected error', err);
return reject(err);
}
}
}, this._fetch).catch(reject);
return [3 /*break*/, 7];
case 1:
_c.trys.push([1, 6, , 7]);
return [4 /*yield*/, this._fetch(url, {
method: 'POST',
headers: headers,
body: JSON.stringify(body),
signal: abortSignal
})];
case 2:
res = _c.sent();
if (!!res.ok) return [3 /*break*/, 4];
return [4 /*yield*/, res.text()];
case 3:
reason = _c.sent();
msg = "OpenAI error ".concat(res.status || res.statusText, ": ").concat(reason);
error = new types.ChatGPTError(msg, { cause: res });
error.statusCode = res.status;
error.statusText = res.statusText;
return [2 /*return*/, reject(error)];
case 4: return [4 /*yield*/, res.json()];
case 5:
response = _c.sent();
if (this._debug) {
console.log(response);
}
if (response === null || response === void 0 ? void 0 : response.id) {
result.id = response.id;
}
if ((_a = response === null || response === void 0 ? void 0 : response.choices) === null || _a === void 0 ? void 0 : _a.length) {
message_1 = response.choices[0].message;
if (message_1.content) {
result.text = message_1.content;
}
else if (message_1.function_call) {
result.functionCall = message_1.function_call;
}
if (message_1.role) {
result.role = message_1.role;
}
}
else {
res_1 = response;
return [2 /*return*/, reject(new Error("OpenAI error: ".concat(((_b = res_1 === null || res_1 === void 0 ? void 0 : res_1.detail) === null || _b === void 0 ? void 0 : _b.message) || (res_1 === null || res_1 === void 0 ? void 0 : res_1.detail) || 'unknown')))];
}
result.detail = response;
return [2 /*return*/, resolve(result)];
case 6:
err_1 = _c.sent();
return [2 /*return*/, reject(err_1)];
case 7: return [2 /*return*/];
}
});
}); }).then(function (message) { return __awaiter(_this, void 0, void 0, function () {
var promptTokens, completionTokens, err_2;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(message.detail && !message.detail.usage)) return [3 /*break*/, 4];
_a.label = 1;
case 1:
_a.trys.push([1, 3, , 4]);
promptTokens = numTokens;
return [4 /*yield*/, this._getTokenCount(message.text)];
case 2:
completionTokens = _a.sent();
message.detail.usage = {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
estimated: true
};
return [3 /*break*/, 4];
case 3:
err_2 = _a.sent();
return [3 /*break*/, 4];
case 4: return [2 /*return*/, Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(function () { return message; })];
}
});
}); });
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;
responseP.cancel = function () {
abortController.abort();
};
}
return [2 /*return*/, pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'OpenAI timed out waiting for response'
})];
}
else {
return [2 /*return*/, responseP];
}
return [2 /*return*/];
}
});
});
};
Object.defineProperty(ChatGPTAPI.prototype, "apiKey", {
get: function () {
return this._apiKey;
},
set: function (apiKey) {
this._apiKey = apiKey;
},
enumerable: false,
configurable: true
});
Object.defineProperty(ChatGPTAPI.prototype, "apiOrg", {
get: function () {
return this._apiOrg;
},
set: function (apiOrg) {
this._apiOrg = apiOrg;
},
enumerable: false,
configurable: true
});
ChatGPTAPI.prototype._buildMessages = function (text, role, opts) {
return __awaiter(this, void 0, void 0, function () {
var _a, systemMessage, parentMessageId, userLabel, assistantLabel, maxNumTokens, messages, systemMessageOffset, nextMessages, numTokens, prompt_1, nextNumTokensEstimate, isValidPrompt, parentMessage, parentMessageRole, maxTokens;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
_a = opts.systemMessage, systemMessage = _a === void 0 ? this._systemMessage : _a;
parentMessageId = opts.parentMessageId;
userLabel = USER_LABEL_DEFAULT;
assistantLabel = ASSISTANT_LABEL_DEFAULT;
maxNumTokens = this._maxModelTokens - this._maxResponseTokens;
messages = [];
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
});
}
systemMessageOffset = messages.length;
nextMessages = text
? messages.concat([
{
role: role,
content: text,
name: opts.name
}
])
: messages;
numTokens = 0;
_b.label = 1;
case 1:
prompt_1 = nextMessages
.reduce(function (prompt, message) {
switch (message.role) {
case 'system':
return prompt.concat(["Instructions:\n".concat(message.content)]);
case 'user':
return prompt.concat(["".concat(userLabel, ":\n").concat(message.content)]);
case 'function':
return prompt.concat(["Function:\n".concat(message.content)]);
default:
return message.content ? prompt.concat(["".concat(assistantLabel, ":\n").concat(message.content)]) : prompt;
}
}, [])
.join('\n\n');
return [4 /*yield*/, this._getTokenCount(prompt_1)];
case 2:
nextNumTokensEstimate = _b.sent();
isValidPrompt = nextNumTokensEstimate <= maxNumTokens;
if (prompt_1 && !isValidPrompt) {
return [3 /*break*/, 5];
}
messages = nextMessages;
numTokens = nextNumTokensEstimate;
if (!isValidPrompt) {
return [3 /*break*/, 5];
}
if (!parentMessageId) {
return [3 /*break*/, 5];
}
return [4 /*yield*/, this._getMessageById(parentMessageId)];
case 3:
parentMessage = _b.sent();
if (!parentMessage) {
return [3 /*break*/, 5];
}
parentMessageRole = parentMessage.role || 'user';
nextMessages = nextMessages.slice(0, systemMessageOffset).concat(__spreadArray([
{
role: parentMessageRole,
content: parentMessage.text,
name: parentMessage.name,
function_call: parentMessage.functionCall ? parentMessage.functionCall : undefined,
}
], nextMessages.slice(systemMessageOffset), true));
parentMessageId = parentMessage.parentMessageId;
_b.label = 4;
case 4:
if (true) return [3 /*break*/, 1];
_b.label = 5;
case 5:
maxTokens = Math.max(1, Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens));
return [2 /*return*/, { messages: messages, maxTokens: maxTokens, numTokens: numTokens }];
}
});
});
};
ChatGPTAPI.prototype._getTokenCount = function (text) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '');
return [2 /*return*/, tokenizer.encode(text).length];
});
});
};
ChatGPTAPI.prototype._defaultGetMessageById = function (id) {
return __awaiter(this, void 0, void 0, function () {
var res;
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.get(id)];
case 1:
res = _a.sent();
return [2 /*return*/, res];
}
});
});
};
ChatGPTAPI.prototype._defaultUpsertMessage = function (message) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.set(message.id, message)];
case 1:
_a.sent();
return [2 /*return*/];
}
});
});
};
return ChatGPTAPI;
}());
export { ChatGPTAPI };

493
utils/openai/chatgpt-api.ts Normal file
View file

@ -0,0 +1,493 @@
import Keyv from 'keyv'
import pTimeout from 'p-timeout'
import QuickLRU from 'quick-lru'
import { v4 as uuidv4 } from 'uuid'
import * as tokenizer from './tokenizer'
import * as types from './types'
import globalFetch from 'node-fetch'
import { fetchSSE } from './fetch-sse'
import {Role} from "./types";
const CHATGPT_MODEL = 'gpt-3.5-turbo-0613'
const USER_LABEL_DEFAULT = 'User'
const ASSISTANT_LABEL_DEFAULT = 'ChatGPT'
export class ChatGPTAPI {
protected _apiKey: string
protected _apiBaseUrl: string
protected _apiOrg?: string
protected _debug: boolean
protected _systemMessage: string
protected _completionParams: Omit<
types.openai.CreateChatCompletionRequest,
'messages' | 'n'
>
protected _maxModelTokens: number
protected _maxResponseTokens: number
protected _fetch: types.FetchFn
protected _getMessageById: types.GetMessageByIdFunction
protected _upsertMessage: types.UpsertMessageFunction
protected _messageStore: Keyv<types.ChatMessage>
/**
* Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param apiKey - OpenAI API key (required).
* @param apiOrg - Optional OpenAI API organization (optional).
* @param apiBaseUrl - Optional override for the OpenAI API base URL.
* @param debug - Optional enables logging debugging info to stdout.
* @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
* @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.
* @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.
* @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.
* @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
*/
constructor(opts: types.ChatGPTAPIOptions) {
const {
apiKey,
apiOrg,
apiBaseUrl = 'https://api.openai.com/v1',
debug = false,
messageStore,
completionParams,
systemMessage,
maxModelTokens = 4000,
maxResponseTokens = 1000,
getMessageById,
upsertMessage,
fetch = globalFetch
} = opts
this._apiKey = apiKey
this._apiOrg = apiOrg
this._apiBaseUrl = apiBaseUrl
this._debug = !!debug
this._fetch = fetch
this._completionParams = {
model: CHATGPT_MODEL,
temperature: 0.8,
top_p: 1.0,
presence_penalty: 1.0,
...completionParams
}
this._systemMessage = systemMessage
if (this._systemMessage === undefined) {
const currentDate = new Date().toISOString().split('T')[0]
this._systemMessage = `You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ${currentDate}`
}
this._maxModelTokens = maxModelTokens
this._maxResponseTokens = maxResponseTokens
this._getMessageById = getMessageById ?? this._defaultGetMessageById
this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage
if (messageStore) {
this._messageStore = messageStore
} else {
this._messageStore = new Keyv<types.ChatMessage, any>({
store: new QuickLRU<string, types.ChatMessage>({ maxSize: 10000 })
})
}
if (!this._apiKey) {
throw new Error('OpenAI missing required apiKey')
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined')
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function')
}
}
/**
* Sends a message to the OpenAI chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
async sendMessage(
text: string,
opts: types.SendMessageOptions = {},
role: Role = 'user',
): Promise<types.ChatMessage> {
const {
parentMessageId,
messageId = uuidv4(),
timeoutMs,
onProgress,
stream = onProgress ? true : false,
completionParams,
conversationId
} = opts
let { abortSignal } = opts
let abortController: AbortController = null
if (timeoutMs && !abortSignal) {
abortController = new AbortController()
abortSignal = abortController.signal
}
const message: types.ChatMessage = {
role,
id: messageId,
conversationId,
parentMessageId,
text,
name: opts.name
}
const latestQuestion = message
const { messages, maxTokens, numTokens } = await this._buildMessages(
text,
role,
opts
)
const result: types.ChatMessage = {
role: 'assistant',
id: uuidv4(),
conversationId,
parentMessageId: messageId,
text: undefined,
functionCall: undefined
}
const responseP = new Promise<types.ChatMessage>(
async (resolve, reject) => {
const url = `${this._apiBaseUrl}/chat/completions`
const headers = {
'Content-Type': 'application/json',
Authorization: `Bearer ${this._apiKey}`
}
const body = {
max_tokens: maxTokens,
...this._completionParams,
...completionParams,
messages,
stream
}
// Support multiple organizations
// See https://platform.openai.com/docs/api-reference/authentication
if (this._apiOrg) {
headers['OpenAI-Organization'] = this._apiOrg
}
if (this._debug) {
console.log(`sendMessage (${numTokens} tokens)`, body)
}
if (stream) {
fetchSSE(
url,
{
method: 'POST',
headers,
body: JSON.stringify(body),
signal: abortSignal,
onMessage: (data: string) => {
if (data === '[DONE]') {
result.text = result.text.trim()
return resolve(result)
}
try {
const response: types.openai.CreateChatCompletionDeltaResponse =
JSON.parse(data)
if (response.id) {
result.id = response.id
}
if (response.choices?.length) {
const delta = response.choices[0].delta
if (delta.function_call) {
if (delta.function_call.name) {
result.functionCall = {
name: delta.function_call.name,
arguments: delta.function_call.arguments
}
} else {
result.functionCall.arguments = result.functionCall.arguments || '' + delta.function_call.arguments
}
} else {
result.delta = delta.content
if (delta?.content) result.text += delta.content
}
if (delta.role) {
result.role = delta.role
}
result.detail = response
onProgress?.(result)
}
} catch (err) {
console.warn('OpenAI stream SEE event unexpected error', err)
return reject(err)
}
}
},
this._fetch
).catch(reject)
} else {
try {
const res = await this._fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: abortSignal
})
if (!res.ok) {
const reason = await res.text()
const msg = `OpenAI error ${
res.status || res.statusText
}: ${reason}`
const error = new types.ChatGPTError(msg, { cause: res })
error.statusCode = res.status
error.statusText = res.statusText
return reject(error)
}
const response: types.openai.CreateChatCompletionResponse =
await res.json()
if (this._debug) {
console.log(response)
}
if (response?.id) {
result.id = response.id
}
if (response?.choices?.length) {
const message = response.choices[0].message
if (message.content) {
result.text = message.content
} else if (message.function_call) {
result.functionCall = message.function_call
}
if (message.role) {
result.role = message.role
}
} else {
const res = response as any
return reject(
new Error(
`OpenAI error: ${
res?.detail?.message || res?.detail || 'unknown'
}`
)
)
}
result.detail = response
return resolve(result)
} catch (err) {
return reject(err)
}
}
}
).then(async (message) => {
if (message.detail && !message.detail.usage) {
try {
const promptTokens = numTokens
const completionTokens = await this._getTokenCount(message.text)
message.detail.usage = {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
estimated: true
}
} catch (err) {
// TODO: this should really never happen, but if it does,
// we should handle notify the user gracefully
}
}
return Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(() => message)
})
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;(responseP as any).cancel = () => {
abortController.abort()
}
}
return pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'OpenAI timed out waiting for response'
})
} else {
return responseP
}
}
get apiKey(): string {
return this._apiKey
}
set apiKey(apiKey: string) {
this._apiKey = apiKey
}
get apiOrg(): string {
return this._apiOrg
}
set apiOrg(apiOrg: string) {
this._apiOrg = apiOrg
}
protected async _buildMessages(text: string, role: Role, opts: types.SendMessageOptions) {
const { systemMessage = this._systemMessage } = opts
let { parentMessageId } = opts
const userLabel = USER_LABEL_DEFAULT
const assistantLabel = ASSISTANT_LABEL_DEFAULT
const maxNumTokens = this._maxModelTokens - this._maxResponseTokens
let messages: types.openai.ChatCompletionRequestMessage[] = []
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
})
}
const systemMessageOffset = messages.length
let nextMessages = text
? messages.concat([
{
role,
content: text,
name: opts.name
}
])
: messages
let numTokens = 0
do {
const prompt = nextMessages
.reduce((prompt, message) => {
switch (message.role) {
case 'system':
return prompt.concat([`Instructions:\n${message.content}`])
case 'user':
return prompt.concat([`${userLabel}:\n${message.content}`])
case 'function':
return prompt.concat([`Function:\n${message.content}`])
default:
return message.content ? prompt.concat([`${assistantLabel}:\n${message.content}`]) : prompt
}
}, [] as string[])
.join('\n\n')
const nextNumTokensEstimate = await this._getTokenCount(prompt)
const isValidPrompt = nextNumTokensEstimate <= maxNumTokens
if (prompt && !isValidPrompt) {
break
}
messages = nextMessages
numTokens = nextNumTokensEstimate
if (!isValidPrompt) {
break
}
if (!parentMessageId) {
break
}
const parentMessage = await this._getMessageById(parentMessageId)
if (!parentMessage) {
break
}
const parentMessageRole = parentMessage.role || 'user'
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
{
role: parentMessageRole,
content: parentMessage.text,
name: parentMessage.name,
function_call: parentMessage.functionCall ? parentMessage.functionCall : undefined
},
...nextMessages.slice(systemMessageOffset)
])
parentMessageId = parentMessage.parentMessageId
} while (true)
// Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens
// for the response.
const maxTokens = Math.max(
1,
Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)
)
return { messages, maxTokens, numTokens }
}
protected async _getTokenCount(text: string) {
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '')
return tokenizer.encode(text).length
}
protected async _defaultGetMessageById(
id: string
): Promise<types.ChatMessage> {
const res = await this._messageStore.get(id)
return res
}
protected async _defaultUpsertMessage(
message: types.ChatMessage
): Promise<void> {
await this._messageStore.set(message.id, message)
}
}

170
utils/openai/fetch-sse.js Normal file
View file

@ -0,0 +1,170 @@
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __rest = (this && this.__rest) || function (s, e) {
var t = {};
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
t[p] = s[p];
if (s != null && typeof Object.getOwnPropertySymbols === "function")
for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
t[p[i]] = s[p[i]];
}
return t;
};
var __asyncValues = (this && this.__asyncValues) || function (o) {
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
var m = o[Symbol.asyncIterator], i;
return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
};
import { createParser } from 'eventsource-parser';
import * as types from './types.js';
import fetch from 'node-fetch';
import { streamAsyncIterable } from './stream-async-iterable.js';
export function fetchSSE(url, options, fetchFn) {
var _a, e_1, _b, _c;
if (fetchFn === void 0) { fetchFn = fetch; }
return __awaiter(this, void 0, void 0, function () {
var onMessage, onError, fetchOptions, res, reason, err_1, msg, error, parser, feed, body_1, _d, _e, _f, chunk, str, e_1_1;
return __generator(this, function (_g) {
switch (_g.label) {
case 0:
onMessage = options.onMessage, onError = options.onError, fetchOptions = __rest(options, ["onMessage", "onError"]);
return [4 /*yield*/, fetchFn(url, fetchOptions)];
case 1:
res = _g.sent();
if (!!res.ok) return [3 /*break*/, 6];
reason = void 0;
_g.label = 2;
case 2:
_g.trys.push([2, 4, , 5]);
return [4 /*yield*/, res.text()];
case 3:
reason = _g.sent();
return [3 /*break*/, 5];
case 4:
err_1 = _g.sent();
reason = res.statusText;
return [3 /*break*/, 5];
case 5:
msg = "ChatGPT error ".concat(res.status, ": ").concat(reason);
error = new types.ChatGPTError(msg, { cause: res });
error.statusCode = res.status;
error.statusText = res.statusText;
throw error;
case 6:
parser = createParser(function (event) {
if (event.type === 'event') {
onMessage(event.data);
}
});
feed = function (chunk) {
var _a;
var response = null;
try {
response = JSON.parse(chunk);
}
catch (_b) {
// ignore
}
if (((_a = response === null || response === void 0 ? void 0 : response.detail) === null || _a === void 0 ? void 0 : _a.type) === 'invalid_request_error') {
var msg = "ChatGPT error ".concat(response.detail.message, ": ").concat(response.detail.code, " (").concat(response.detail.type, ")");
var error = new types.ChatGPTError(msg, { cause: response });
error.statusCode = response.detail.code;
error.statusText = response.detail.message;
if (onError) {
onError(error);
}
else {
console.error(error);
}
// don't feed to the event parser
return;
}
parser.feed(chunk);
};
if (!!res.body.getReader) return [3 /*break*/, 7];
body_1 = res.body;
if (!body_1.on || !body_1.read) {
throw new types.ChatGPTError('unsupported "fetch" implementation');
}
body_1.on('readable', function () {
var chunk;
while (null !== (chunk = body_1.read())) {
feed(chunk.toString());
}
});
return [3 /*break*/, 18];
case 7:
_g.trys.push([7, 12, 13, 18]);
_d = true, _e = __asyncValues(streamAsyncIterable(res.body));
_g.label = 8;
case 8: return [4 /*yield*/, _e.next()];
case 9:
if (!(_f = _g.sent(), _a = _f.done, !_a)) return [3 /*break*/, 11];
_c = _f.value;
_d = false;
chunk = _c;
str = new TextDecoder().decode(chunk);
feed(str);
_g.label = 10;
case 10:
_d = true;
return [3 /*break*/, 8];
case 11: return [3 /*break*/, 18];
case 12:
e_1_1 = _g.sent();
e_1 = { error: e_1_1 };
return [3 /*break*/, 18];
case 13:
_g.trys.push([13, , 16, 17]);
if (!(!_d && !_a && (_b = _e.return))) return [3 /*break*/, 15];
return [4 /*yield*/, _b.call(_e)];
case 14:
_g.sent();
_g.label = 15;
case 15: return [3 /*break*/, 17];
case 16:
if (e_1) throw e_1.error;
return [7 /*endfinally*/];
case 17: return [7 /*endfinally*/];
case 18: return [2 /*return*/];
}
});
});
}

89
utils/openai/fetch-sse.ts Normal file
View file

@ -0,0 +1,89 @@
import { createParser } from 'eventsource-parser'
import * as types from './types'
import { fetch as nodefetch } from 'node-fetch'
import { streamAsyncIterable } from './stream-async-iterable'
export async function fetchSSE(
url: string,
options: Parameters<typeof fetch>[1] & {
onMessage: (data: string) => void
onError?: (error: any) => void
},
fetch: types.FetchFn = nodefetch
) {
const { onMessage, onError, ...fetchOptions } = options
const res = await fetch(url, fetchOptions)
if (!res.ok) {
let reason: string
try {
reason = await res.text()
} catch (err) {
reason = res.statusText
}
const msg = `ChatGPT error ${res.status}: ${reason}`
const error = new types.ChatGPTError(msg, { cause: res })
error.statusCode = res.status
error.statusText = res.statusText
throw error
}
const parser = createParser((event) => {
if (event.type === 'event') {
onMessage(event.data)
}
})
// handle special response errors
const feed = (chunk: string) => {
let response = null
try {
response = JSON.parse(chunk)
} catch {
// ignore
}
if (response?.detail?.type === 'invalid_request_error') {
const msg = `ChatGPT error ${response.detail.message}: ${response.detail.code} (${response.detail.type})`
const error = new types.ChatGPTError(msg, { cause: response })
error.statusCode = response.detail.code
error.statusText = response.detail.message
if (onError) {
onError(error)
} else {
console.error(error)
}
// don't feed to the event parser
return
}
parser.feed(chunk)
}
if (!res.body.getReader) {
// Vercel polyfills `fetch` with `node-fetch`, which doesn't conform to
// web standards, so this is a workaround...
const body: NodeJS.ReadableStream = res.body as any
if (!body.on || !body.read) {
throw new types.ChatGPTError('unsupported "fetch" implementation')
}
body.on('readable', () => {
let chunk: string | Buffer
while (null !== (chunk = body.read())) {
feed(chunk.toString())
}
})
} else {
for await (const chunk of streamAsyncIterable(res.body)) {
const str = new TextDecoder().decode(chunk)
feed(str)
}
}
}

View file

@ -0,0 +1,14 @@
export async function * streamAsyncIterable (stream) {
const reader = stream.getReader()
try {
while (true) {
const { done, value } = await reader.read()
if (done) {
return
}
yield value
}
} finally {
reader.releaseLock()
}
}

View file

@ -0,0 +1,6 @@
import { getEncoding } from 'js-tiktoken';
// TODO: make this configurable
var tokenizer = getEncoding('cl100k_base');
export function encode(input) {
return new Uint32Array(tokenizer.encode(input));
}

View file

@ -0,0 +1,8 @@
import { getEncoding } from 'js-tiktoken'
// TODO: make this configurable
const tokenizer = getEncoding('cl100k_base')
export function encode(input: string): Uint32Array {
return new Uint32Array(tokenizer.encode(input))
}

View file

@ -0,0 +1,5 @@
{
"compilerOptions": {
"module": "es2020"
}
}

26
utils/openai/types.js Normal file
View file

@ -0,0 +1,26 @@
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
if (typeof b !== "function" && b !== null)
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var ChatGPTError = /** @class */ (function (_super) {
__extends(ChatGPTError, _super);
function ChatGPTError() {
return _super !== null && _super.apply(this, arguments) || this;
}
return ChatGPTError;
}(Error));
export { ChatGPTError };
export var openai;
(function (openai) {
})(openai || (openai = {}));

473
utils/openai/types.ts Normal file
View file

@ -0,0 +1,473 @@
import Keyv from 'keyv'
export type Role = 'user' | 'assistant' | 'system' | 'function'
export type FetchFn = typeof fetch
export type ChatGPTAPIOptions = {
apiKey: string
/** @defaultValue `'https://api.openai.com'` **/
apiBaseUrl?: string
apiOrg?: string
/** @defaultValue `false` **/
debug?: boolean
completionParams?: Partial<
Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>
systemMessage?: string
/** @defaultValue `4096` **/
maxModelTokens?: number
/** @defaultValue `1000` **/
maxResponseTokens?: number
messageStore?: Keyv
getMessageById?: GetMessageByIdFunction
upsertMessage?: UpsertMessageFunction
fetch?: FetchFn
}
export type SendMessageOptions = {
/**
* function role name
*/
name?: string
parentMessageId?: string
conversationId?: string
messageId?: string
stream?: boolean
systemMessage?: string
timeoutMs?: number
onProgress?: (partialResponse: ChatMessage) => void
abortSignal?: AbortSignal
completionParams?: Partial<
Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>
}
export type MessageActionType = 'next' | 'variant'
export type SendMessageBrowserOptions = {
conversationId?: string
parentMessageId?: string
messageId?: string
action?: MessageActionType
timeoutMs?: number
onProgress?: (partialResponse: ChatMessage) => void
abortSignal?: AbortSignal
}
export interface ChatMessage {
id: string
text: string
role: Role
name?: string
delta?: string
detail?:
| openai.CreateChatCompletionResponse
| CreateChatCompletionStreamResponse
// relevant for both ChatGPTAPI and ChatGPTUnofficialProxyAPI
parentMessageId?: string
// only relevant for ChatGPTUnofficialProxyAPI (optional for ChatGPTAPI)
conversationId?: string
functionCall?: openai.FunctionCall
}
export class ChatGPTError extends Error {
statusCode?: number
statusText?: string
isFinal?: boolean
accountId?: string
}
/** Returns a chat message from a store by it's ID (or null if not found). */
export type GetMessageByIdFunction = (id: string) => Promise<ChatMessage>
/** Upserts a chat message to a store. */
export type UpsertMessageFunction = (message: ChatMessage) => Promise<void>
export interface CreateChatCompletionStreamResponse
extends openai.CreateChatCompletionDeltaResponse {
usage: CreateCompletionStreamResponseUsage
}
export interface CreateCompletionStreamResponseUsage
extends openai.CreateCompletionResponseUsage {
estimated: true
}
/**
* https://chat.openapi.com/backend-api/conversation
*/
export type ConversationJSONBody = {
/**
* The action to take
*/
action: string
/**
* The ID of the conversation
*/
conversation_id?: string
/**
* Prompts to provide
*/
messages: Prompt[]
/**
* The model to use
*/
model: string
/**
* The parent message ID
*/
parent_message_id: string
}
export type Prompt = {
/**
* The content of the prompt
*/
content: PromptContent
/**
* The ID of the prompt
*/
id: string
/**
* The role played in the prompt
*/
role: Role
}
export type ContentType = 'text'
export type PromptContent = {
/**
* The content type of the prompt
*/
content_type: ContentType
/**
* The parts to the prompt
*/
parts: string[]
}
export type ConversationResponseEvent = {
message?: Message
conversation_id?: string
error?: string | null
}
export type Message = {
id: string
content: MessageContent
role: Role
user: string | null
create_time: string | null
update_time: string | null
end_turn: null
weight: number
recipient: string
metadata: MessageMetadata
}
export type MessageContent = {
content_type: string
parts: string[]
}
export type MessageMetadata = any
export namespace openai {
export interface CreateChatCompletionDeltaResponse {
id: string
object: 'chat.completion.chunk'
created: number
model: string
choices: [
{
delta: {
role: Role
content?: string,
function_call?: {name: string, arguments: string}
}
index: number
finish_reason: string | null
}
]
}
/**
*
* @export
* @interface ChatCompletionRequestMessage
*/
export interface ChatCompletionRequestMessage {
/**
* The role of the author of this message.
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
role: ChatCompletionRequestMessageRoleEnum
/**
* The contents of the message
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
content: string
/**
* The name of the user in a multi-user chat
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
name?: string
function_call?: FunctionCall
}
export interface FunctionCall {
name: string
arguments: string
}
export declare const ChatCompletionRequestMessageRoleEnum: {
readonly System: 'system'
readonly User: 'user'
readonly Assistant: 'assistant'
readonly Function: 'function'
}
export declare type ChatCompletionRequestMessageRoleEnum =
(typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum]
/**
*
* @export
* @interface ChatCompletionResponseMessage
*/
export interface ChatCompletionResponseMessage {
/**
* The role of the author of this message.
* @type {string}
* @memberof ChatCompletionResponseMessage
*/
role: ChatCompletionResponseMessageRoleEnum
/**
* The contents of the message
* @type {string}
* @memberof ChatCompletionResponseMessage
*/
content: string
function_call: FunctionCall
}
export declare const ChatCompletionResponseMessageRoleEnum: {
readonly System: 'system'
readonly User: 'user'
readonly Assistant: 'assistant'
}
export declare type ChatCompletionResponseMessageRoleEnum =
(typeof ChatCompletionResponseMessageRoleEnum)[keyof typeof ChatCompletionResponseMessageRoleEnum]
/**
*
* @export
* @interface CreateChatCompletionRequest
*/
export interface CreateChatCompletionRequest {
/**
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
* @type {string}
* @memberof CreateChatCompletionRequest
*/
model: string
/**
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
* @type {Array<ChatCompletionRequestMessage>}
* @memberof CreateChatCompletionRequest
*/
messages: Array<ChatCompletionRequestMessage>
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
temperature?: number | null
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
top_p?: number | null
/**
* How many chat completion choices to generate for each input message.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
n?: number | null
/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
* @type {boolean}
* @memberof CreateChatCompletionRequest
*/
stream?: boolean | null
/**
*
* @type {CreateChatCompletionRequestStop}
* @memberof CreateChatCompletionRequest
*/
stop?: CreateChatCompletionRequestStop
/**
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
* @type {number}
* @memberof CreateChatCompletionRequest
*/
max_tokens?: number
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
* @type {number}
* @memberof CreateChatCompletionRequest
*/
presence_penalty?: number | null
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
* @type {number}
* @memberof CreateChatCompletionRequest
*/
frequency_penalty?: number | null
/**
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
* @type {object}
* @memberof CreateChatCompletionRequest
*/
logit_bias?: object | null
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string}
* @memberof CreateChatCompletionRequest
*/
user?: string
functions?: Function[]
}
export interface Function {
name: string
description: string
parameters: FunctionParameters
}
export interface FunctionParameters {
type: string
properties: Record<string, Record<string, any>>
required: string[]
}
/**
* @type CreateChatCompletionRequestStop
* Up to 4 sequences where the API will stop generating further tokens.
* @export
*/
export declare type CreateChatCompletionRequestStop = Array<string> | string
/**
*
* @export
* @interface CreateChatCompletionResponse
*/
export interface CreateChatCompletionResponse {
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
id: string
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
object: string
/**
*
* @type {number}
* @memberof CreateChatCompletionResponse
*/
created: number
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
model: string
/**
*
* @type {Array<CreateChatCompletionResponseChoicesInner>}
* @memberof CreateChatCompletionResponse
*/
choices: Array<CreateChatCompletionResponseChoicesInner>
/**
*
* @type {CreateCompletionResponseUsage}
* @memberof CreateChatCompletionResponse
*/
usage?: CreateCompletionResponseUsage
}
/**
*
* @export
* @interface CreateChatCompletionResponseChoicesInner
*/
export interface CreateChatCompletionResponseChoicesInner {
/**
*
* @type {number}
* @memberof CreateChatCompletionResponseChoicesInner
*/
index?: number
/**
*
* @type {ChatCompletionResponseMessage}
* @memberof CreateChatCompletionResponseChoicesInner
*/
message?: ChatCompletionResponseMessage
/**
*
* @type {string}
* @memberof CreateChatCompletionResponseChoicesInner
*/
finish_reason?: string
}
/**
*
* @export
* @interface CreateCompletionResponseUsage
*/
export interface CreateCompletionResponseUsage {
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
prompt_tokens: number
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
completion_tokens: number
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
total_tokens: number
}
}

View file

@ -0,0 +1,20 @@
export class AbstractTool {
name = ''
parameters = {}
description = ''
func = async function () {}
function () {
if (!this.parameters.type) {
this.parameters.type = 'object'
}
return {
name: this.name,
description: this.description,
parameters: this.parameters
}
}
}

View file

@ -0,0 +1,35 @@
import { AbstractTool } from './AbstractTool.js'
export class EditCardTool extends AbstractTool {
name = 'editCard'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想改名片的那个人的qq号'
},
card: {
type: 'string',
description: '你想给他改的新名片'
},
groupId: {
type: 'string',
description: '群号'
}
},
required: ['qq', 'card', 'groupId']
}
description = '当你想要修改某个群员的群名片时有用。输入应该是群号、qq号和群名片用空格隔开。'
func = async function (opts) {
let {qq, card, groupId} = opts
groupId = parseInt(groupId.trim())
qq = parseInt(qq.trim())
logger.info('edit card: ', groupId, qq)
let group = await Bot.pickGroup(groupId)
await group.setCard(qq, card)
return `the user ${qq}'s card has been changed into ${card}`
}
}

View file

@ -0,0 +1,49 @@
import { AbstractTool } from './AbstractTool.js'
import fetch, { File, FormData } from 'node-fetch'
import { Config } from '../config.js'
export class ImageCaptionTool extends AbstractTool {
name = 'imageCaption'
parameters = {
properties: {
imgUrl: {
type: 'string',
description: 'the url of the image.'
},
qq: {
type: 'string',
description: 'if the picture is an avatar of a user, just give his qq number'
}
},
required: []
}
description = 'useful when you want to know what is inside a photo, such as user\'s avatar or other pictures'
func = async function (opts) {
let { imgUrl, qq } = opts
if (qq) {
imgUrl = `https://q1.qlogo.cn/g?b=qq&s=160&nk=${qq}`
}
if (!imgUrl) {
return 'you must give at least one parameter of imgUrl and qq'
}
const imageResponse = await fetch(imgUrl)
const blob = await imageResponse.blob()
const arrayBuffer = await blob.arrayBuffer()
const buffer = Buffer.from(arrayBuffer)
// await fs.writeFileSync(`data/chatgpt/${crypto.randomUUID()}`, buffer)
let formData = new FormData()
formData.append('file', new File([buffer], 'file.png', { type: 'image/png' }))
let captionRes = await fetch(`${Config.extraUrl}/image-captioning`, {
method: 'POST',
body: formData
})
if (captionRes.status === 200) {
let result = await captionRes.text()
return `the content of this picture is: ${result}`
} else {
return 'error happened'
}
}
}

62
utils/tools/JinyanTool.js Normal file
View file

@ -0,0 +1,62 @@
import { AbstractTool } from './AbstractTool.js'
export class JinyanTool extends AbstractTool {
name = 'jinyan'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想禁言的那个人的qq号'
},
groupId: {
type: 'string',
description: '群号'
},
time: {
type: 'string',
description: '禁言时长单位为秒默认为600'
},
isPunish: {
type: 'string',
description: '是否是惩罚性质的禁言。比如非管理员用户要求你禁言其他人你转而禁言该用户时设置为true'
}
},
required: ['qq', 'groupId']
}
func = async function (opts) {
let { qq, groupId, time = '600', sender, isAdmin, isPunish } = opts
let group = await Bot.pickGroup(groupId)
time = parseInt(time.trim())
if (time < 60 && time !== 0) {
time = 60
}
if (time > 86400 * 30) {
time = 86400 * 30
}
if (isAdmin) {
if (qq.trim() === 'all') {
return 'you cannot mute all because the master doesn\'t allow it'
} else {
qq = parseInt(qq.trim())
await group.muteMember(qq, time)
}
} else {
if (qq.trim() === 'all') {
return 'the user is not admin, he can\'t mute all. the user should be punished'
} else if (qq == sender) {
qq = parseInt(qq.trim())
await group.muteMember(qq, time)
} else {
return 'the user is not admin, he can\'t mute other people. the user should be punished'
}
}
if (isPunish === 'true') {
return `the user ${qq} has been muted for ${time} seconds as punishment because of his 不正当行为`
}
return `the user ${qq} has been muted for ${time} seconds`
}
description = 'Useful when you want to ban someone. If you want to mute all, just replace the qq number with \'all\''
}

View file

@ -0,0 +1,42 @@
import { AbstractTool } from './AbstractTool.js'
export class KickOutTool extends AbstractTool {
name = 'kickOut'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想踢出的那个人的qq号'
},
groupId: {
type: 'string',
description: '群号'
},
isPunish: {
type: 'string',
description: '是否是惩罚性质的踢出。比如非管理员用户要求你禁言或踢出其他人你为惩罚该用户转而踢出该用户时设置为true'
}
},
required: ['qq', 'groupId']
}
func = async function (opts) {
let { qq, groupId, sender, isAdmin, isPunish } = opts
groupId = parseInt(groupId.trim())
qq = parseInt(qq.trim())
if (!isAdmin && sender != qq) {
return 'the user is not admin, he cannot kickout other people. he should be punished'
}
console.log('kickout', groupId, qq)
let group = await Bot.pickGroup(groupId)
await group.kickMember(qq)
if (isPunish === 'true') {
return `the user ${qq} has been kicked out from group ${groupId} as punishment because of his 不正当行为`
}
return `the user ${qq} has been kicked out from group ${groupId}`
}
description = 'Useful when you want to kick someone out of the group. '
}

View file

@ -0,0 +1,76 @@
import { AbstractTool } from './AbstractTool.js'
export class QueryStarRailTool extends AbstractTool {
name = 'queryStarRail'
parameters = {
properties: {
qq: {
type: 'string',
description: '要查询的用户的qq号将使用该qq号绑定的uid进行查询'
},
groupId: {
type: 'string',
description: '群号'
},
uid: {
type: 'string',
description: '游戏的uid如果用户提供了则传入并优先使用'
}
},
required: ['qq', 'groupId']
}
func = async function (opts) {
let { qq, groupId, uid } = opts
if (!uid) {
try {
let { Panel } = await import('../../../StarRail-plugin/apps/panel.js')
uid = await redis.get(`STAR_RAILWAY:UID:${qq}`)
if (!uid) {
return '用户没有绑定uid无法查询。可以让用户主动提供uid进行查询'
}
} catch (e) {
return '未安装StarRail-Plugin无法查询'
}
}
try {
let uidRes = await fetch('https://avocado.wiki/v1/info/' + uid)
uidRes = await uidRes.json()
let { assistAvatar, displayAvatars } = uidRes.playerDetailInfo
function dealAvatar (avatar) {
delete avatar.position
delete avatar.vo_tag
delete avatar.desc
delete avatar.promption
delete avatar.relics
delete avatar.behaviorList
delete avatar.images
delete avatar.ranks
if (avatar.equipment) {
avatar.equipment = {
level: avatar.equipment.level,
rank: avatar.equipment.rank,
name: avatar.equipment.name,
skill_desc: avatar.equipment.skill_desc
}
}
}
dealAvatar(assistAvatar)
if (displayAvatars) {
displayAvatars.forEach(avatar => {
dealAvatar(avatar)
})
}
uidRes.playerDetailInfo.assistAvatar = assistAvatar
uidRes.playerDetailInfo.displayAvatars = displayAvatars
delete uidRes.repository
delete uidRes.version
return `the player info in json format is: \n${JSON.stringify(uidRes)}`
} catch (err) {
return `failed to query, error: ${err.toString()}`
}
}
description = 'Useful when you want to query player information of Honkai Star Rail(崩坏:星穹铁道). '
}

View file

@ -0,0 +1,76 @@
import fetch from 'node-fetch'
import { formatDate, mkdirs } from '../common.js'
import fs from 'fs'
import { AbstractTool } from './AbstractTool.js'
export class SearchVideoTool extends AbstractTool {
name = 'searchVideo'
parameters = {
properties: {
keyword: {
type: 'string',
description: '要搜索的视频的标题或关键词'
}
},
required: ['keyword']
}
func = async function (opts) {
let { keyword } = opts
try {
return await searchBilibili(keyword)
} catch (err) {
logger.error(err)
return `fail to search video, error: ${err.toString()}`
}
}
description = 'Useful when you want to search a video by keywords. you should remember the id of the video if you want to share it'
}
export async function searchBilibili (name) {
let biliRes = await fetch('https://www.bilibili.com',
{
// headers: {
// accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
// Accept: '*/*',
// 'Accept-Encoding': 'gzip, deflate, br',
// 'accept-language': 'en-US,en;q=0.9',
// Connection: 'keep-alive',
// 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
// }
})
const headers = biliRes.headers.raw()
const setCookieHeaders = headers['set-cookie']
if (setCookieHeaders) {
const cookies = []
setCookieHeaders.forEach(header => {
const cookie = header.split(';')[0]
cookies.push(cookie)
})
const cookieHeader = cookies.join('; ')
let headers = {
accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'accept-language': 'en-US,en;q=0.9',
Referer: 'https://www.bilibili.com',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
cookie: cookieHeader
}
let response = await fetch(`https://api.bilibili.com/x/web-interface/search/type?keyword=${name}&search_type=video`,
{
headers
})
let json = await response.json()
if (json.data?.numResults > 0) {
let result = json.data.result.map(r => {
return `id: ${r.bvid},标题:${r.title},作者:${r.author},播放量:${r.play},发布日期:${formatDate(new Date(r.pubdate * 1000))}`
}).slice(0, Math.min(json.data?.numResults, 5)).join('\n')
return `这些是关键词“${name}”的搜索结果:\n${result}`
} else {
return `没有找到关键词“${name}”的搜索结果`
}
}
return {}
}

View file

@ -0,0 +1,30 @@
import { AbstractTool } from './AbstractTool.js'
export class SerpImageTool extends AbstractTool {
name = 'searchImage'
parameters = {
properties: {
q: {
type: 'string',
description: 'search keyword'
}
},
required: ['q']
}
func = async function (opts) {
let { q } = opts
let serpRes = await fetch(`https://serp.ikechan8370.com/image/bing?q=${encodeURIComponent(q)}`, {
headers: {
'X-From-Library': 'ikechan8370'
}
})
serpRes = await serpRes.json()
let res = serpRes.data
return `the images search results are here in json format:\n${JSON.stringify(res)}. the murl field is real picture url. You should use sendPicture to send them`
}
description = 'Useful when you want to search images from the internet. '
}

View file

@ -0,0 +1,39 @@
import fetch from 'node-fetch'
import { AbstractTool } from './AbstractTool.js'
export class SearchMusicTool extends AbstractTool {
name = 'searchMusic'
parameters = {
properties: {
keyword: {
type: 'string',
description: '音乐的标题或关键词'
}
},
required: ['keyword']
}
func = async function (opts) {
let { keyword } = opts
try {
let result = await searchMusic163(keyword)
return `search result: ${result}`
} catch (e) {
return `music search failed: ${e}`
}
}
description = 'Useful when you want to search music by keyword.'
}
export async function searchMusic163 (name) {
let response = await fetch(`http://music.163.com/api/search/get/web?s=${name}&type=1&offset=0&total=true&limit=6`)
let json = await response.json()
if (json.result?.songCount > 0) {
return json.result.songs.map(song => {
return `id: ${song.id}, name: ${song.name}, artists: ${song.artists.map(a => a.name).join('&')}, alias: ${song.alias || 'none'}`
}).join('\n')
}
return null
}

View file

@ -0,0 +1,33 @@
import { AbstractTool } from './AbstractTool.js'
export class SendAvatarTool extends AbstractTool {
name = 'sendAvatar'
parameters = {
properties: {
qq: {
type: 'string',
description: '要发头像的人的qq号'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['qq', 'groupId']
}
func = async function (opts) {
let { qq, groupId } = opts
let groupList = await Bot.getGroupList()
groupId = parseInt(groupId.trim())
console.log('sendAvatar', groupId, qq)
if (groupList.get(groupId)) {
let group = await Bot.pickGroup(groupId)
await group.sendMsg(segment.image('https://q1.qlogo.cn/g?b=qq&s=0&nk=' + qq))
}
return `the user ${qq}'s avatar has been sent to group ${groupId}`
}
description = 'Useful when you want to send the user avatar picture to the group. The input to this tool should be the user\'s qq number and the target group number, and they should be concated with a space. 如果是在群聊中,优先选择群号发送。'
}

View file

@ -0,0 +1,136 @@
import fetch from 'node-fetch'
import { formatDate, mkdirs } from '../common.js'
import fs from 'fs'
import { AbstractTool } from './AbstractTool.js'
export class SendVideoTool extends AbstractTool {
name = 'sendVideo'
parameters = {
properties: {
id: {
type: 'string',
description: '要发的视频的id'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['id', 'groupId']
}
func = async function (opts) {
let { id, groupId } = opts
groupId = parseInt(groupId.trim())
let msg = []
try {
let { arcurl, title, pic, description, videoUrl, headers, bvid, author, play, pubdate, like, honor } = await getBilibili(id)
let group = await Bot.pickGroup(groupId)
msg.push(title.replace(/(<([^>]+)>)/ig, '') + '\n')
msg.push(`UP主${author} 发布日期:${formatDate(new Date(pubdate * 1000))} 播放量:${play} 点赞:${like}\n`)
msg.push(arcurl + '\n')
msg.push(segment.image(pic))
msg.push('\n' + description)
if (honor) {
msg.push(`本视频曾获得过${honor}称号`)
}
msg.push('\n视频在路上啦')
await group.sendMsg(msg)
const videoResponse = await fetch(videoUrl, { headers })
const fileType = videoResponse.headers.get('Content-Type').split('/')[1]
let fileLoc = `data/chatgpt/videos/${bvid}.${fileType}`
mkdirs('data/chatgpt/videos')
videoResponse.blob().then(async blob => {
const arrayBuffer = await blob.arrayBuffer()
const buffer = Buffer.from(arrayBuffer)
await fs.writeFileSync(fileLoc, buffer)
await group.sendMsg(segment.video(fileLoc))
})
return `the video ${title.replace(/(<([^>]+)>)/ig, '')} was shared to ${groupId}. the video information: ${msg}`
} catch (err) {
logger.error(err)
if (msg.length > 0) {
return `fail to share video, but the video msg is found: ${msg}, you can just tell the information of this video`
} else {
return `fail to share video, error: ${err.toString()}`
}
}
}
description = 'Useful when you want to share a video. You must use searchVideo to get search result and choose one video and get its id'
}
export async function getBilibili (bvid) {
let biliRes = await fetch('https://www.bilibili.com',
{
// headers: {
// accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
// Accept: '*/*',
// 'Accept-Encoding': 'gzip, deflate, br',
// 'accept-language': 'en-US,en;q=0.9',
// Connection: 'keep-alive',
// 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
// }
})
const headers = biliRes.headers.raw()
const setCookieHeaders = headers['set-cookie']
if (setCookieHeaders) {
const cookies = []
setCookieHeaders.forEach(header => {
const cookie = header.split(';')[0]
cookies.push(cookie)
})
const cookieHeader = cookies.join('; ')
let headers = {
accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'accept-language': 'en-US,en;q=0.9',
Referer: 'https://www.bilibili.com',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
cookie: cookieHeader
}
let videoInfo = await fetch(`https://api.bilibili.com/x/web-interface/view?bvid=${bvid}`, {
headers
})
videoInfo = await videoInfo.json()
let cid = videoInfo.data.cid
let arcurl = `http://www.bilibili.com/video/av${videoInfo.data.aid}`
let title = videoInfo.data.title
let pic = videoInfo.data.pic
let description = videoInfo.data.desc
let author = videoInfo.data.owner.name
let play = videoInfo.data.stat.view
let pubdate = videoInfo.data.pubdate
let like = videoInfo.data.stat.like
let honor = videoInfo.data.honor_reply?.honor?.map(h => h.desc)?.join('、')
let downloadInfo = await fetch(`https://api.bilibili.com/x/player/playurl?bvid=${bvid}&cid=${cid}`, {headers})
let videoUrl = (await downloadInfo.json()).data.durl[0].url
return {
arcurl, title, pic, description, videoUrl, headers, bvid, author, play, pubdate, like, honor
}
} else {
return {}
}
}
function randomIndex () {
// Define weights for each index
const weights = [5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1]
// Compute the total weight
const totalWeight = weights.reduce((sum, weight) => sum + weight, 0)
// Generate a random number between 0 and the total weight
const randomNumber = Math.floor(Math.random() * totalWeight)
// Choose the index based on the random number and weights
let weightSum = 0
for (let i = 0; i < weights.length; i++) {
weightSum += weights[i]
if (randomNumber < weightSum) {
return i
}
}
}
console.log('send bilibili')

View file

@ -0,0 +1,35 @@
import {AbstractTool} from "./AbstractTool.js";
export class SendDiceTool extends AbstractTool {
name = 'sendDice'
parameters = {
properties: {
num: {
type: 'number',
description: '骰子的数量'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['num', 'groupId']
}
func = async function (opts) {
let {num, groupId} = opts
let groupList = await Bot.getGroupList()
if (groupList.get(groupId)) {
let group = await Bot.pickGroup(groupId, true)
await group.sendMsg(segment.dice(num))
} else {
let friend = await Bot.pickFriend(groupId)
await friend.sendMsg(segment.dice(num))
}
return `the dice has been sent`
}
description = 'If you want to roll dice, use this tool. If you know the group number, use the group number instead of the qq number first. The input should be the number of dice to be cast (1-6) and the target group number or qq numberand they should be concat with a space'
}

View file

@ -0,0 +1,33 @@
import { AbstractTool } from './AbstractTool.js'
export class SendMusicTool extends AbstractTool {
name = 'sendMusic'
parameters = {
properties: {
id: {
type: 'string',
description: '音乐的id'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['keyword', 'groupId']
}
func = async function (opts) {
let { id, groupId } = opts
groupId = parseInt(groupId.trim())
try {
let group = await Bot.pickGroup(groupId)
await group.shareMusic('163', id)
return `the music has been shared to ${groupId}`
} catch (e) {
return `music share failed: ${e}`
}
}
description = 'Useful when you want to share music. You must use searchMusic first to get the music id'
}

View file

@ -0,0 +1,50 @@
import { AbstractTool } from './AbstractTool.js'
export class SendPictureTool extends AbstractTool {
name = 'sendPicture'
parameters = {
properties: {
picture: {
type: 'string',
description: 'the url of the pictures, split with space if more than one.'
},
qq: {
type: 'string',
description: 'if you want to send avatar of a user, input his qq number.'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['picture', 'groupId']
}
func = async function (opt) {
let { picture, groupId, qq } = opt
if (qq) {
let avatar = `https://q1.qlogo.cn/g?b=qq&s=0&nk=${qq}`
picture += ' ' + avatar
}
let pictures = picture.trim().split(' ')
pictures = pictures.map(img => segment.image(img))
let groupList = await Bot.getGroupList()
groupId = parseInt(groupId)
try {
if (groupList.get(groupId)) {
let group = await Bot.pickGroup(groupId)
await group.sendMsg(pictures)
return `picture has been sent to group ${groupId}`
} else {
let user = await Bot.pickFriend(groupId)
await user.sendMsg(pictures)
return `picture has been sent to user ${groupId}`
}
} catch (err) {
return `failed to send pictures, error: ${JSON.stringify(err)}`
}
}
description = 'Useful when you want to send one or more pictures. '
}

View file

@ -0,0 +1,30 @@
import {AbstractTool} from "./AbstractTool.js";
export class SendRPSTool extends AbstractTool {
name = 'sendRPS'
parameters = {
num: {
type: 'number',
description: '石头剪刀布的代号'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
},
required: ['num', 'groupId']
}
func = async function (num, groupId) {
let groupList = await Bot.getGroupList()
if (groupList.get(groupId)) {
let group = await Bot.pickGroup(groupId, true)
await group.sendMsg(segment.rps(num))
} else {
let friend = await Bot.pickFriend(groupId)
await friend.sendMsg(segment.rps(num))
}
}
description = 'Use this tool if you want to play rock paper scissors. If you know the group number, use the group number instead of the qq number first. The input should be the number 1, 2 or 3 to represent rock-paper-scissors and the target group number or qq numberand they should be concat with a space'
}

View file

@ -0,0 +1,37 @@
import { AbstractTool } from './AbstractTool.js'
export class SerpIkechan8370Tool extends AbstractTool {
name = 'search'
parameters = {
properties: {
q: {
type: 'string',
description: 'search keyword'
},
source: {
type: 'string',
enum: ['google', 'bing', 'baidu']
}
},
required: ['q']
}
func = async function (opts) {
let { q, source } = opts
if (!source) {
source = 'google'
}
let serpRes = await fetch(`https://serp.ikechan8370.com/${source}?q=${encodeURIComponent(q)}&lang=zh-CN&limit=10`, {
headers: {
'X-From-Library': 'ikechan8370'
}
})
serpRes = await serpRes.json()
let res = serpRes.data
return `the search results are here in json format:\n${JSON.stringify(res)}`
}
description = 'Useful when you want to search something from the internet. If you don\'t know much about the user\'s question, just search about it! If you want to know details of a result, you can use website tool'
}

40
utils/tools/SerpTool.js Normal file
View file

@ -0,0 +1,40 @@
import { AbstractTool } from './AbstractTool.js'
import { Config } from '../config.js'
export class SerpTool extends AbstractTool {
name = 'serp'
parameters = {
properties: {
q: {
type: 'string',
description: 'search keyword'
}
},
required: ['q']
}
func = async function (opts) {
let { q } = opts
let key = Config.azSerpKey
let serpRes = await fetch(`https://api.bing.microsoft.com/v7.0/search?q=${encodeURIComponent(q)}&mkt=zh-CN`, {
headers: {
'Ocp-Apim-Subscription-Key': key
}
})
serpRes = await serpRes.json()
let res = serpRes.webPages.value
res.forEach(p => {
delete p.displayUrl
delete p.isFamilyFriendly
delete p.thumbnailUrl
delete p.id
delete p.isNavigational
})
return `the search results are here in json format:\n${JSON.stringify(res)}`
}
description = 'Useful when you want to search something from the internet. If you don\'t know much about the user\'s question, just search about it! If you want to know details of a result, you can use website tool'
}

View file

@ -0,0 +1,35 @@
import { AbstractTool } from './AbstractTool.js'
import {Config} from "../config.js";
export class WeatherTool extends AbstractTool {
name = 'weather'
parameters = {
properties: {
city: {
type: 'string',
description: '要查询的地点,细化到县/区级'
}
},
required: ['city']
}
func = async function (opts) {
let { city } = opts
let key = Config.amapKey
let adcodeRes = await fetch(`https://restapi.amap.com/v3/config/district?keywords=${city}&subdistrict=1&key=${key}`)
adcodeRes = await adcodeRes.json()
let adcode = adcodeRes.districts[0]?.adcode
if (!adcode) {
return `the area ${city} doesn't exist! are you kidding? you should mute him for 1 minute`
}
let cityName = adcodeRes.districts[0].name
let res = await fetch(`https://restapi.amap.com/v3/weather/weatherInfo?city=${adcode}&key=${key}`)
res = await res.json()
let result = res.lives[0]
return `the weather information of area ${cityName} in json format is:\n${JSON.stringify(result)}`
}
description = 'Useful when you want to query weather '
}

View file

@ -0,0 +1,86 @@
import { AbstractTool } from './AbstractTool.js'
import { ChatGPTAPI } from '../openai/chatgpt-api.js'
import { Config } from '../config.js'
import fetch from 'node-fetch'
import proxy from 'https-proxy-agent'
import { getMaxModelTokens } from '../common.js'
import { ChatGPTPuppeteer } from '../browser.js'
export class WebsiteTool extends AbstractTool {
name = 'website'
parameters = {
properties: {
url: {
type: 'string',
description: '要访问的网站网址'
}
},
required: ['url']
}
func = async function (opts) {
let { url } = opts
try {
// let res = await fetch(url, {
// headers: {
// 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
// }
// })
// let text = await res.text()
let origin = false
if (!Config.headless) {
Config.headless = true
origin = true
}
let ppt = new ChatGPTPuppeteer()
let browser = await ppt.getBrowser()
let page = await browser.newPage()
await page.goto(url, {
waitUntil: 'networkidle2'
})
let text = await page.content()
await page.close()
if (origin) {
Config.headless = false
}
// text = text.replace(/<style\b[^<]*(?:(?!<\/style>)<[^<]*)*<\/style>/gi, '')
// .replace(/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi, '')
// .replace(/<head\b[^<]*(?:(?!<\/head>)<[^<]*)*<\/head>/gi, '')
// .replace(/<!--[\s\S]*?-->/gi, '')
text = text.replace(/<style\b[^<]*(?:(?!<\/style>)<[^<]*)*<\/style>/gi, '') // 移除<style>标签及其内容
.replace(/<[^>]+style\s*=\s*(["'])(?:(?!\1).)*\1[^>]*>/gi, '') // 移除带有style属性的标签
.replace(/<[^>]+>/g, '')
let maxModelTokens = getMaxModelTokens(Config.model)
text = text.slice(0, Math.min(text.length, maxModelTokens - 1600))
let api = new ChatGPTAPI({
apiBaseUrl: Config.openAiBaseUrl,
apiKey: Config.apiKey,
debug: false,
completionParams: {
model: Config.model
},
fetch: (url, options = {}) => {
const defaultOptions = Config.proxy
? {
agent: proxy(Config.proxy)
}
: {}
const mergedOptions = {
...defaultOptions,
...options
}
return fetch(url, mergedOptions)
},
maxModelTokens
})
const htmlContentSummaryRes = await api.sendMessage(`这是一个网页html经过筛选的内容请你进一步去掉其中的标签、样式、script等无用信息并从中提取出其中的主体内容转换成自然语言告诉我不需要主观描述性的语言。${text}`)
let htmlContentSummary = htmlContentSummaryRes.text
return `this is the main content of website:\n ${htmlContentSummary}`
} catch (err) {
return `failed to visit the website, error: ${err.toString()}`
}
}
description = 'Useful when you want to browse a website by url'
}

4512
yarn-error.log Normal file

File diff suppressed because it is too large Load diff

4450
yarn.lock Normal file

File diff suppressed because it is too large Load diff