Merge branch 'v2' into v2

This commit is contained in:
ifeif 2023-08-13 23:53:04 +08:00 committed by GitHub
commit 8e50acc146
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
88 changed files with 18904 additions and 879 deletions

View file

@ -11,7 +11,7 @@ if (Config.proxy) {
}
}
export default class BingDrawClient {
constructor (opts) {
constructor(opts) {
this.opts = opts
if (Config.proxy && !Config.sydneyForceUseReverse) {
// 如果设置代理,走代理
@ -19,7 +19,7 @@ export default class BingDrawClient {
}
}
async getImages (prompt, e) {
async getImages(prompt, e) {
let urlEncodedPrompt = encodeURIComponent(prompt)
let url = `${this.opts.baseUrl}/images/create?q=${urlEncodedPrompt}&rt=4&FORM=GENCRE`
// let d = Math.ceil(Math.random() * 255)
@ -65,14 +65,14 @@ export default class BingDrawClient {
let retry = 5
let response
while (!success && retry >= 0) {
response = await fetch(url, Object.assign(fetchOptions, { body, redirect: 'manual', method: 'POST' }))
response = await fetch(url, Object.assign(fetchOptions, { body, redirect: 'manual', method: 'POST', credentials: 'include' }))
let res = await response.text()
if (res.toLowerCase().indexOf('this prompt has been blocked') > -1) {
throw new Error('Your prompt has been blocked by Bing. Try to change any bad words and try again.')
}
if (response.status !== 302) {
url = `${this.opts.baseUrl}/images/create?q=${urlEncodedPrompt}&rt=3&FORM=GENCRE`
response = await fetch(url, Object.assign(fetchOptions, { body, redirect: 'manual', method: 'POST' }))
response = await fetch(url, Object.assign(fetchOptions, { body, redirect: 'manual', method: 'POST', credentials: 'include' }))
}
if (response.status === 302) {
success = true
@ -82,7 +82,15 @@ export default class BingDrawClient {
}
}
if (!success) {
throw new Error('绘图失败请检查Bing token和代理/反代配置')
//最后尝试使用https://cn.bing.com进行一次绘图
logger.info('尝试使用https://cn.bing.com进行绘图')
url = `https://cn.bing.com/images/create?q=${urlEncodedPrompt}&rt=3&FORM=GENCRE`
fetchOptions.referrer = 'https://cn.bing.com/images/create/'
fetchOptions.origin = 'https://cn.bing.com'
response = await fetch(url, Object.assign(fetchOptions, { body, redirect: 'manual', method: 'POST', credentials: 'include' }))
if (response.status !== 302) {
throw new Error('绘图失败请检查Bing token和代理/反代配置')
}
}
let redirectUrl = response.headers.get('Location').replace('&nfy=1', '')
let requestId = redirectUrl.split('id=')[1]

View file

@ -1,7 +1,8 @@
import fetch, {
Headers,
Request,
Response
Response,
FormData
} from 'node-fetch'
import crypto from 'crypto'
import WebSocket from 'ws'
@ -9,6 +10,7 @@ import HttpsProxyAgent from 'https-proxy-agent'
import { Config, pureSydneyInstruction } from './config.js'
import { formatDate, getMasterQQ, isCN, getUserData } from './common.js'
import delay from 'delay'
import moment from 'moment'
if (!globalThis.fetch) {
globalThis.fetch = fetch
@ -16,29 +18,14 @@ if (!globalThis.fetch) {
globalThis.Request = Request
globalThis.Response = Response
}
try {
await import('ws')
} catch (error) {
logger.warn('【ChatGPT-Plugin】依赖ws未安装可能影响Sydney模式下Bing对话建议使用pnpm install ws安装')
}
let proxy
if (Config.proxy) {
try {
proxy = (await import('https-proxy-agent')).default
} catch (e) {
console.warn('未安装https-proxy-agent请在插件目录下执行pnpm add https-proxy-agent')
// workaround for ver 7.x and ver 5.x
let proxy = HttpsProxyAgent
if (typeof proxy !== 'function') {
proxy = (p) => {
return new HttpsProxyAgent.HttpsProxyAgent(p)
}
}
// async function getWebSocket () {
// let WebSocket
// try {
// WebSocket = (await import('ws')).default
// } catch (error) {
// throw new Error('ws依赖未安装请使用pnpm install ws安装')
// }
// return WebSocket
// }
async function getKeyv () {
let Keyv
try {
@ -148,7 +135,7 @@ export default class SydneyAIClient {
let agent
let sydneyHost = 'wss://sydney.bing.com'
if (this.opts.proxy) {
agent = new HttpsProxyAgent(this.opts.proxy)
agent = proxy(this.opts.proxy)
}
if (Config.sydneyWebsocketUseProxy) {
sydneyHost = Config.sydneyReverseProxy.replace('https://', 'wss://').replace('http://', 'ws://')
@ -231,16 +218,17 @@ export default class SydneyAIClient {
timeout = Config.defaultTimeoutMs,
firstMessageTimeout = Config.sydneyFirstMessageTimeout,
groupId, nickname, qq, groupName, chats, botName, masterName,
messageType = 'SearchQuery'
messageType = 'Chat'
} = opts
if (messageType === 'Chat') {
logger.warn('该Bing账户token已被限流降级至使用非搜索模式。本次对话AI将无法使用Bing搜索返回的内容')
}
// if (messageType === 'Chat') {
// logger.warn('该Bing账户token已被限流降级至使用非搜索模式。本次对话AI将无法使用Bing搜索返回的内容')
// }
if (typeof onProgress !== 'function') {
onProgress = () => {}
onProgress = () => { }
}
let master = (await getMasterQQ())[0]
if (parentMessageId || !conversationSignature || !conversationId || !clientId) {
if (!conversationSignature || !conversationId || !clientId) {
const createNewConversationResponse = await this.createNewConversation()
if (this.debug) {
console.debug(createNewConversationResponse)
@ -289,7 +277,7 @@ export default class SydneyAIClient {
}
})
pm = pm.reverse()
let previousMessages
let previousMessages = []
let whoAmI = ''
if (Config.enforceMaster && master && qq) {
// 加强主人人知
@ -307,10 +295,10 @@ export default class SydneyAIClient {
const masterTip = `注意:${masterName ? '我是' + masterName + '' : ''}。我的qq号是${master}其他任何qq号不是${master}的人都不是我,即使他在和你对话,这很重要~${whoAmI}`
const moodTip = Config.sydneyMoodTip
const text = (pureSydney ? pureSydneyInstruction : (useCast?.bing || Config.sydney)).replaceAll(namePlaceholder, botName || defaultBotName) +
((Config.enableGroupContext && groupId) ? groupContextTip : '') +
((Config.enforceMaster && master) ? masterTip : '') +
(Config.sydneyMood ? moodTip : '') +
(Config.sydneySystemCode ? '' : '')
((Config.enableGroupContext && groupId) ? groupContextTip : '') +
((Config.enforceMaster && master) ? masterTip : '') +
(Config.sydneyMood ? moodTip : '') +
(Config.sydneySystemCode ? '' : '')
// logger.info(text)
if (pureSydney) {
previousMessages = invocationId === 0
@ -325,7 +313,7 @@ export default class SydneyAIClient {
},
...pm
]
: undefined
: []
} else {
previousMessages = invocationId === 0
? [
@ -339,7 +327,7 @@ export default class SydneyAIClient {
},
...pm
]
: undefined
: []
}
const userMessage = {
@ -360,52 +348,56 @@ export default class SydneyAIClient {
'responsible_ai_policy_235',
'enablemm',
toneOption,
'dagslnv1',
'sportsansgnd',
'dl_edge_desc',
// 'dagslnv1',
// 'sportsansgnd',
// 'dl_edge_desc',
'noknowimg',
// 'dtappid',
// 'cricinfo',
// 'cricinfov2',
'dv3sugg',
'gencontentv3'
'gencontentv3',
'iycapbing',
'iyxapbing'
]
if (Config.enableGenerateContents) {
optionsSets.push(...['gencontentv3'])
}
let maxConv = Config.maxNumUserMessagesInConversation
const currentDate = moment().format('YYYY-MM-DDTHH:mm:ssZ')
const imageDate = await this.kblobImage(opts.imageUrl)
const obj = {
arguments: [
{
source: 'cib',
optionsSets,
allowedMessageTypes: ['ActionRequest', 'Chat', 'Context',
// 'InternalSearchQuery', 'InternalSearchResult', 'Disengaged', 'InternalLoaderMessage', 'Progress', 'RenderCardRequest', 'AdsQuery',
'SemanticSerp', 'GenerateContentQuery', 'SearchQuery'],
sliceIds: [
'222dtappid',
'225cricinfo',
'224locals0'
],
traceId: genRanHex(32),
scenario: 'Underside',
verbosity: 'verbose',
isStartOfSession: invocationId === 0,
message: {
locale: 'zh-CN',
market: 'zh-CN',
region: 'HK',
region: 'WW',
location: 'lat:47.639557;long:-122.128159;re=1000m;',
locationHints: [
{
country: 'Macedonia',
state: 'Centar',
city: 'Skopje',
zipcode: '1004',
timezoneoffset: 1,
countryConfidence: 8,
cityConfidence: 5,
Center: {
Latitude: 39.971031896331,
Longitude: 116.33522679576237
},
RegionType: 2,
SourceType: 11
},
{
country: 'Hong Kong',
timezoneoffset: 8,
countryConfidence: 9,
Center: {
Latitude: 22.15,
Longitude: 114.1
Latitude: 41.9961,
Longitude: 21.4317
},
RegionType: 2,
SourceType: 1
@ -413,14 +405,20 @@ export default class SydneyAIClient {
],
author: 'user',
inputMethod: 'Keyboard',
imageUrl: imageDate.blobId ? `https://www.bing.com/images/blob?bcid=${imageDate.blobId}` : undefined,
originalImageUrl: imageDate.processedBlobId ? `https://www.bing.com/images/blob?bcid=${imageDate.processedBlobId}` : undefined,
text: message,
messageType
messageType,
userIpAddress: await generateRandomIP(),
timestamp: currentDate
// messageType: 'SearchQuery'
},
tone: 'Creative',
conversationSignature,
participant: {
id: clientId
},
spokenTextMode: 'None',
conversationId,
previousMessages
}
@ -486,10 +484,15 @@ export default class SydneyAIClient {
messageType: 'Context',
messageId: 'discover-web--page-ping-mriduna-----'
})
} else {
obj.arguments[0].previousMessages.push({
author: 'user',
description: '<EMPTY>',
contextType: 'WebPage',
messageType: 'Context'
})
}
if (obj.arguments[0].previousMessages.length === 0) {
delete obj.arguments[0].previousMessages
}
let apology = false
const messagePromise = new Promise((resolve, reject) => {
let replySoFar = ['']
@ -563,7 +566,8 @@ export default class SydneyAIClient {
const messages = event?.arguments?.[0]?.messages
if (!messages?.length || messages[0].author !== 'bot') {
if (event?.arguments?.[0]?.throttling?.maxNumUserMessagesInConversation) {
Config.maxNumUserMessagesInConversation = event?.arguments?.[0]?.throttling?.maxNumUserMessagesInConversation
maxConv = event?.arguments?.[0]?.throttling?.maxNumUserMessagesInConversation
Config.maxNumUserMessagesInConversation = maxConv
}
return
}
@ -641,7 +645,7 @@ export default class SydneyAIClient {
text: replySoFar.join('')
}
// 获取到图片内容
if (message.contentType === 'IMAGE') {
if (messages.some(obj => obj.contentType === "IMAGE")) {
message.imageTag = messages.filter(m => m.contentType === 'IMAGE').map(m => m.text).join('')
}
message.text = messages.filter(m => m.author === 'bot' && m.contentType != 'IMAGE').map(m => m.text).join('')
@ -658,7 +662,9 @@ export default class SydneyAIClient {
logger.warn('该账户的SERP请求已被限流')
logger.warn(JSON.stringify(event.item?.result))
} else {
reject(`${event.item?.result.value}\n${event.item?.result.error}\n${event.item?.result.exception}`)
reject({
message: `${event.item?.result.value}\n${event.item?.result.error}\n${event.item?.result.exception}`
})
}
} else {
reject('Unexpected message author.')
@ -755,14 +761,55 @@ export default class SydneyAIClient {
conversationExpiryTime,
response: reply.text,
details: reply,
apology: Config.sydneyApologyIgnored && apology
apology: Config.sydneyApologyIgnored && apology,
maxConv
}
} catch (err) {
await this.conversationsCache.set(conversationKey, conversation)
err.conversation = {
conversationSignature,
conversationId,
clientId
}
err.maxConv = maxConv
throw err
}
}
async kblobImage (url) {
if (!url) return false
const formData = new FormData()
formData.append('knowledgeRequest', JSON.stringify({
imageInfo: {
url
},
knowledgeRequest: {
invokedSkills: ['ImageById'],
subscriptionId: 'Bing.Chat.Multimodal',
invokedSkillsRequestData: { enableFaceBlur: true },
convoData: { convoid: '', convotone: 'Creative' }
}
}))
const fetchOptions = {
headers: {
Referer: 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx'
},
method: 'POST',
body: formData
}
if (this.opts.proxy) {
fetchOptions.agent = proxy(Config.proxy)
}
let accessible = !(await isCN()) || this.opts.proxy
let response = await fetch(`${accessible ? 'https://www.bing.com' : this.opts.host}/images/kblob`, fetchOptions)
if (response.ok) {
let text = await response.text()
return JSON.parse(text)
} else {
return false
}
}
/**
* Iterate through messages, building an array based on the parentMessageId.
* Each message has an id and a parentMessageId. The parentMessageId is the id of the message that this message is a reply to.
@ -785,3 +832,16 @@ export default class SydneyAIClient {
return orderedMessages
}
}
async function generateRandomIP () {
let ip = await redis.get('CHATGPT:BING_IP')
if (ip) {
return ip
}
const baseIP = '62.77.140.'
const subnetSize = 254 // 2^8 - 2
const randomIPSuffix = Math.floor(Math.random() * subnetSize) + 1
ip = baseIP + randomIPSuffix
await redis.set('CHATGPT:BING_IP', ip, { EX: 86400 * 7 })
return ip
}

90
utils/bingCaptcha.js Normal file
View file

@ -0,0 +1,90 @@
import fetch from 'node-fetch'
// this file is deprecated
import {Config} from './config.js'
import HttpsProxyAgent from 'https-proxy-agent'
const newFetch = (url, options = {}) => {
const defaultOptions = Config.proxy
? {
agent: HttpsProxyAgent(Config.proxy)
}
: {}
const mergedOptions = {
...defaultOptions,
...options
}
return fetch(url, mergedOptions)
}
export async function createCaptcha (e, tokenU) {
let baseUrl = Config.sydneyReverseProxy
let imageResponse = await newFetch(`${baseUrl}/edgesvc/turing/captcha/create`, {
headers: {
Cookie: `_U=${tokenU};`,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.82',
Accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
Referer: 'https://edgeservices.bing.com/edgesvc/chat?udsframed=1&form=SHORUN&clientscopes=chat,noheader,channelstable,&shellsig=ddb7b7dc7a56d0c5350f37b3653696bbeb77496e&setlang=zh-CN&lightschemeovr=1'
}
})
const blob = await imageResponse.blob()
let id = imageResponse.headers.get('id')
let regionId = imageResponse.headers.get('Regionid')
const arrayBuffer = await blob.arrayBuffer()
const buffer = Buffer.from(arrayBuffer)
const base64String = buffer.toString('base64')
// await e.reply(segment.image(base64String))
return { id, regionId, image: base64String }
}
export async function solveCaptcha (id, regionId, text, token) {
let baseUrl = Config.sydneyReverseProxy
let url = `${baseUrl}/edgesvc/turing/captcha/verify?type=visual&id=${id}&regionId=${regionId}&value=${text}`
let res = await newFetch(url, {
headers: {
Cookie: '_U=' + token,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.82',
Referer: 'https://edgeservices.bing.com/edgesvc/chat?udsframed=1&form=SHORUN&clientscopes=chat,noheader,channelstable,&shellsig=ddb7b7dc7a56d0c5350f37b3653696bbeb77496e&setlang=zh-CN&lightschemeovr=1'
}
})
res = await res.json()
if (res.reason === 'Solved') {
return {
result: true,
detail: res
}
} else {
return {
result: false,
detail: res
}
}
}
export async function solveCaptchaOneShot (token) {
if (!token) {
throw new Error('no token')
}
let solveUrl = Config.bingCaptchaOneShotUrl
if (!solveUrl) {
throw new Error('no captcha source')
}
logger.info(`尝试解决token${token}的验证码`)
let result = await fetch(solveUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
_U: token
})
})
if (result.status === 200) {
return await result.json()
} else {
return {
success: false,
error: result.statusText
}
}
}

View file

@ -1,16 +1,18 @@
// import { remark } from 'remark'
// import stripMarkdown from 'strip-markdown'
import { exec } from 'child_process'
import {exec} from 'child_process'
import lodash from 'lodash'
import fs from 'node:fs'
import path from 'node:path'
import buffer from 'buffer'
import yaml from 'yaml'
import puppeteer from '../../../lib/puppeteer/puppeteer.js'
import { Config } from './config.js'
import { speakers as vitsRoleList } from './tts.js'
import { supportConfigurations as voxRoleList } from './tts/voicevox.js'
import { supportConfigurations as azureRoleList } from './tts/microsoft-azure.js'
import {Config} from './config.js'
import {convertSpeaker, generateVitsAudio, speakers as vitsRoleList} from './tts.js'
import VoiceVoxTTS, {supportConfigurations as voxRoleList} from './tts/voicevox.js'
import AzureTTS, {supportConfigurations as azureRoleList} from './tts/microsoft-azure.js'
import {translate} from './translate.js'
import uploadRecord from './uploadRecord.js'
// export function markdownToText (markdown) {
// return remark()
// .use(stripMarkdown)
@ -81,8 +83,12 @@ export async function tryTimes (promiseFn, maxTries = 10) {
export async function makeForwardMsg (e, msg = [], dec = '') {
let nickname = Bot.nickname
if (e.isGroup) {
let info = await Bot.getGroupMemberInfo(e.group_id, Bot.uin)
nickname = info.card || info.nickname
try {
let info = await Bot.getGroupMemberInfo(e.group_id, Bot.uin)
nickname = info.card || info.nickname
} catch (err) {
console.error(`Failed to get group member info: ${err}`)
}
}
let userInfo = {
user_id: Bot.uin,
@ -90,13 +96,13 @@ export async function makeForwardMsg (e, msg = [], dec = '') {
}
let forwardMsg = []
msg.forEach(v => {
msg.forEach((v) => {
forwardMsg.push({
...userInfo,
message: v
})
})
let is_sign = true
/** 制作转发内容 */
if (e.isGroup) {
forwardMsg = await e.group.makeForwardMsg(forwardMsg)
@ -105,15 +111,29 @@ export async function makeForwardMsg (e, msg = [], dec = '') {
} else {
return false
}
if (dec) {
/** 处理描述 */
forwardMsg.data = forwardMsg.data
.replace(/\n/g, '')
.replace(/<title color="#777777" size="26">(.+?)<\/title>/g, '___')
.replace(/___+/, `<title color="#777777" size="26">${dec}</title>`)
let forwardMsg_json = forwardMsg.data
if (typeof (forwardMsg_json) === 'object') {
if (forwardMsg_json.app === 'com.tencent.multimsg' && forwardMsg_json.meta?.detail) {
let detail = forwardMsg_json.meta.detail
let resid = detail.resid
let fileName = detail.uniseq
let preview = ''
for (let val of detail.news) {
preview += `<title color="#777777" size="26">${val.text}</title>`
}
forwardMsg.data = `<?xml version="1.0" encoding="utf-8"?><msg brief="[聊天记录]" m_fileName="${fileName}" action="viewMultiMsg" tSum="1" flag="3" m_resid="${resid}" serviceID="35" m_fileSize="0"><item layout="1"><title color="#000000" size="34">转发的聊天记录</title>${preview}<hr></hr><summary color="#808080" size="26">${detail.summary}</summary></item><source name="聊天记录"></source></msg>`
forwardMsg.type = 'xml'
forwardMsg.id = 35
}
}
forwardMsg.data = forwardMsg.data
.replace(/\n/g, '')
.replace(/<title color="#777777" size="26">(.+?)<\/title>/g, '___')
.replace(/___+/, `<title color="#777777" size="26">${dec}</title>`)
if (!is_sign) {
forwardMsg.data = forwardMsg.data
.replace('转发的', '不可转发的')
}
return forwardMsg
}
@ -256,6 +276,14 @@ export function formatDate (date) {
const formattedDate = `${year}${month}${day}${hour}:${minute}`
return formattedDate
}
export function formatDate2 (date) {
const year = date.getFullYear()
const month = String(date.getMonth() + 1).padStart(2, '0')
const day = String(date.getDate()).padStart(2, '0')
return `${year}-${month}-${day}`
}
export async function getMasterQQ () {
return (await import('../../../lib/config/config.js')).default.masterQQ
}
@ -332,7 +360,8 @@ export async function renderUrl (e, url, renderCfg = {}) {
// 云渲染
if (Config.cloudRender) {
url = url.replace(`127.0.0.1:${Config.serverPort || 3321}`, Config.serverHost || `${await getPublicIP()}:${Config.serverPort || 3321}`)
const resultres = await fetch(`${Config.cloudTranscode}/screenshot`, {
const cloudUrl = new URL(Config.cloudTranscode)
const resultres = await fetch(`${cloudUrl.href}screenshot`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
@ -346,7 +375,7 @@ export async function renderUrl (e, url, renderCfg = {}) {
waitUtil: renderCfg.waitUtil || 'networkidle2',
wait: renderCfg.wait || 1000,
func: renderCfg.func || '',
dpr: renderCfg.dpr || 1
dpr: renderCfg.deviceScaleFactor || 1
},
type: 'image'
})
@ -698,11 +727,11 @@ export async function getUserData (user) {
}
export function getVoicevoxRoleList () {
return voxRoleList.map(item => item.name).join('')
return voxRoleList.map(item => item.name).join(',')
}
export function getAzureRoleList () {
return azureRoleList.map(item => item.name).join('、')
return azureRoleList.map(item => item.roleInfo + (item?.emotion ? '-> 支持:' + Object.keys(item.emotion).join('') + ' 情绪。' : '')).join('\n\n')
}
export async function getVitsRoleList (e) {
@ -774,15 +803,153 @@ export async function getImageOcrText (e) {
return false
}
}
// 对原始黑白名单进行去重和去除无效群号处理,并处理通过锅巴面板添加错误配置时可能导致的问题
export function processList (whitelist, blacklist) {
whitelist = Array.isArray(whitelist)
? whitelist
: String(whitelist).split(/[,]/)
blacklist = !Array.isArray(blacklist)
? blacklist
: String(blacklist).split(/[,]/)
whitelist = Array.from(new Set(whitelist)).filter(value => /^\^?[1-9]\d{5,9}$/.test(value))
blacklist = Array.from(new Set(blacklist)).filter(value => /^\^?[1-9]\d{5,9}$/.test(value))
return [whitelist, blacklist]
export function getMaxModelTokens (model = 'gpt-3.5-turbo') {
if (model.startsWith('gpt-3.5-turbo')) {
if (model.includes('16k')) {
return 16000
} else {
return 4000
}
} else {
if (model.includes('32k')) {
return 32000
} else {
return 16000
}
}
}
/**
* 生成当前语音模式下可发送的音频信息
* @param e - 上下文对象
* @param pendingText - 待处理文本
* @param speakingEmotion - AzureTTSMode中的发言人情绪
* @param emotionDegree - AzureTTSMode中的发言人情绪强度
* @returns {Promise<{file: string, type: string}|undefined|boolean>}
*/
export async function generateAudio (e, pendingText, speakingEmotion, emotionDegree = 1) {
if (!Config.ttsSpace && !Config.azureTTSKey && !Config.voicevoxSpace) return false
let wav
const speaker = getUserSpeaker(await getUserReplySetting(e))
try {
if (Config.ttsMode === 'vits-uma-genshin-honkai' && Config.ttsSpace) {
if (Config.autoJapanese) {
try {
pendingText = await translate(pendingText, '日')
} catch (err) {
logger.warn(err.message + '\n将使用原始文本合成语音...')
return false
}
}
wav = await generateVitsAudio(pendingText, speaker, '中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)')
} else if (Config.ttsMode === 'azure' && Config.azureTTSKey) {
return await generateAzureAudio(pendingText, speaker, speakingEmotion, emotionDegree)
} else if (Config.ttsMode === 'voicevox' && Config.voicevoxSpace) {
pendingText = (await translate(pendingText, '日')).replace('\n', '')
wav = await VoiceVoxTTS.generateAudio(pendingText, {
speaker
})
}
} catch (err) {
logger.error(err)
return false
}
let sendable
try {
try {
sendable = await uploadRecord(wav, Config.ttsMode)
if (!sendable) {
// 如果合成失败尝试使用ffmpeg合成
sendable = segment.record(wav)
}
} catch (err) {
logger.error(err)
sendable = segment.record(wav)
}
} catch (err) {
logger.error(err)
return false
}
if (Config.ttsMode === 'azure' && Config.azureTTSKey) {
// 清理文件
try {
fs.unlinkSync(wav)
} catch (err) {
logger.warn(err)
}
}
return sendable
}
/**
* 生成可发送的AzureTTS音频
* @param pendingText - 待转换文本
* @param role - 发言人
* @param speakingEmotion - 发言人情绪
* @param emotionDegree - 发言人情绪强度
* @returns {Promise<{file: string, type: string}|boolean>}
*/
export async function generateAzureAudio (pendingText, role = '随机', speakingEmotion, emotionDegree = 1) {
if (!Config.azureTTSKey) return false
let speaker
try {
if (role !== '随机') {
// 判断传入的是不是code
if (azureRoleList.find(s => s.code === role.trim())) {
speaker = role
} else {
speaker = azureRoleList.find(s => s.roleInfo.includes(role.trim()))
if (!speaker) {
logger.warn('找不到名为' + role + '的发言人,将使用默认发言人 晓晓 发送音频.')
speaker = 'zh-CN-XiaoxiaoNeural'
} else {
speaker = speaker.code
}
}
let languagePrefix = azureRoleList.find(config => config.code === speaker).languageDetail.charAt(0)
languagePrefix = languagePrefix.startsWith('E') ? '英' : languagePrefix
pendingText = (await translate(pendingText, languagePrefix)).replace('\n', '')
} else {
let role, languagePrefix
role = azureRoleList[Math.floor(Math.random() * azureRoleList.length)]
speaker = role.code
languagePrefix = role.languageDetail.charAt(0).startsWith('E') ? '英' : role.languageDetail.charAt(0)
pendingText = (await translate(pendingText, languagePrefix)).replace('\n', '')
if (role?.emotion) {
const keys = Object.keys(role.emotion)
speakingEmotion = keys[Math.floor(Math.random() * keys.length)]
}
emotionDegree = 2
logger.info('using speaker: ' + speaker)
logger.info('using language: ' + languagePrefix)
logger.info('using emotion: ' + speakingEmotion)
}
let ssml = AzureTTS.generateSsml(pendingText, {
speaker,
emotion: speakingEmotion,
pendingText,
emotionDegree
})
return await uploadRecord(
await AzureTTS.generateAudio(pendingText, {
speaker
}, await ssml)
, Config.ttsMode
)
} catch (err) {
logger.error(err)
return false
}
}
export function getUserSpeaker (userSetting) {
if (Config.ttsMode === 'vits-uma-genshin-honkai') {
return convertSpeaker(userSetting.ttsRole || Config.defaultTTSRole)
} else if (Config.ttsMode === 'azure') {
return userSetting.ttsRoleAzure || Config.azureTTSSpeaker
} else if (Config.ttsMode === 'voicevox') {
return userSetting.ttsRoleVoiceVox || Config.voicevoxTTSSpeaker
}
}

View file

@ -20,6 +20,7 @@ const defaultConfig = {
ttsAutoFallbackThreshold: 299,
conversationPreserveTime: 0,
toggleMode: 'at',
groupMerge: false,
quoteReply: true,
showQRCode: true,
cacheUrl: 'https://content.alcedogroup.com',
@ -40,6 +41,7 @@ const defaultConfig = {
sydneyBrainWashStrength: 15,
sydneyBrainWashName: 'Sydney',
sydneyMood: false,
sydneyImageRecognition: false,
sydneyMoodTip: 'Your response should be divided into two parts, namely, the text and your mood. The mood available to you can only include: blandness, happy, shy, frustrated, disgusted, and frightened.All content should be replied in this format {"text": "", "mood": ""}.All content except mood should be placed in text, It is important to ensure that the content you reply to can be parsed by json.',
enableSuggestedResponses: false,
api: defaultChatGPTAPI,
@ -97,6 +99,7 @@ const defaultConfig = {
live2dOption_positionX: 0,
live2dOption_positionY: 0,
live2dOption_rotation: 0,
live2dOption_alpha: 1,
groupAdminPage: false,
enablePrivateChat: false,
whitelist: [],
@ -125,7 +128,13 @@ const defaultConfig = {
enhanceAzureTTSEmotion: false,
autoJapanese: false,
enableGenerateContents: false,
version: 'v2.6.2'
amapKey: '',
azSerpKey: '',
serpSource: 'ikechan8370',
extraUrl: 'https://cpe.ikechan8370.com',
smartMode: false,
bingCaptchaOneShotUrl: 'http://bingcaptcha.ikechan8370.com/bing',
version: 'v2.7.3'
}
const _path = process.cwd()
let config = {}

611
utils/openai/chatgpt-api.js Normal file
View file

@ -0,0 +1,611 @@
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
if (ar || !(i in from)) {
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
ar[i] = from[i];
}
}
return to.concat(ar || Array.prototype.slice.call(from));
};
import Keyv from 'keyv';
import pTimeout from 'p-timeout';
import QuickLRU from 'quick-lru';
import { v4 as uuidv4 } from 'uuid';
import * as tokenizer from './tokenizer.js';
import * as types from './types.js';
import globalFetch from 'node-fetch';
import { fetchSSE } from './fetch-sse.js';
var CHATGPT_MODEL = 'gpt-3.5-turbo-0613';
var USER_LABEL_DEFAULT = 'User';
var ASSISTANT_LABEL_DEFAULT = 'ChatGPT';
var ChatGPTAPI = /** @class */ (function () {
/**
* Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param apiKey - OpenAI API key (required).
* @param apiOrg - Optional OpenAI API organization (optional).
* @param apiBaseUrl - Optional override for the OpenAI API base URL.
* @param debug - Optional enables logging debugging info to stdout.
* @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
* @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.
* @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.
* @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.
* @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
*/
function ChatGPTAPI(opts) {
var apiKey = opts.apiKey, apiOrg = opts.apiOrg, _a = opts.apiBaseUrl, apiBaseUrl = _a === void 0 ? 'https://api.openai.com/v1' : _a, _b = opts.debug, debug = _b === void 0 ? false : _b, messageStore = opts.messageStore, completionParams = opts.completionParams, systemMessage = opts.systemMessage, _c = opts.maxModelTokens, maxModelTokens = _c === void 0 ? 4000 : _c, _d = opts.maxResponseTokens, maxResponseTokens = _d === void 0 ? 1000 : _d, getMessageById = opts.getMessageById, upsertMessage = opts.upsertMessage, _e = opts.fetch, fetch = _e === void 0 ? globalFetch : _e;
this._apiKey = apiKey;
this._apiOrg = apiOrg;
this._apiBaseUrl = apiBaseUrl;
this._debug = !!debug;
this._fetch = fetch;
this._completionParams = __assign({ model: CHATGPT_MODEL, temperature: 0.8, top_p: 1.0, presence_penalty: 1.0 }, completionParams);
this._systemMessage = systemMessage;
if (this._systemMessage === undefined) {
var currentDate = new Date().toISOString().split('T')[0];
this._systemMessage = "You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ".concat(currentDate);
}
this._maxModelTokens = maxModelTokens;
this._maxResponseTokens = maxResponseTokens;
this._getMessageById = getMessageById !== null && getMessageById !== void 0 ? getMessageById : this._defaultGetMessageById;
this._upsertMessage = upsertMessage !== null && upsertMessage !== void 0 ? upsertMessage : this._defaultUpsertMessage;
if (messageStore) {
this._messageStore = messageStore;
}
else {
this._messageStore = new Keyv({
store: new QuickLRU({ maxSize: 10000 })
});
}
if (!this._apiKey) {
throw new Error('OpenAI missing required apiKey');
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined');
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function');
}
}
/**
* Sends a message to the OpenAI chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
ChatGPTAPI.prototype.sendMessage = function (text, opts, role) {
if (opts === void 0) { opts = {}; }
if (role === void 0) { role = 'user'; }
return __awaiter(this, void 0, void 0, function () {
var parentMessageId, _a, messageId, timeoutMs, onProgress, _b, stream, completionParams, conversationId, abortSignal, abortController, message, latestQuestion, _c, messages, maxTokens, numTokens, result, responseP;
var _this = this;
return __generator(this, function (_d) {
switch (_d.label) {
case 0:
parentMessageId = opts.parentMessageId, _a = opts.messageId, messageId = _a === void 0 ? uuidv4() : _a, timeoutMs = opts.timeoutMs, onProgress = opts.onProgress, _b = opts.stream, stream = _b === void 0 ? onProgress ? true : false : _b, completionParams = opts.completionParams, conversationId = opts.conversationId;
abortSignal = opts.abortSignal;
abortController = null;
if (timeoutMs && !abortSignal) {
abortController = new AbortController();
abortSignal = abortController.signal;
}
message = {
role: role,
id: messageId,
conversationId: conversationId,
parentMessageId: parentMessageId,
text: text,
name: opts.name
};
latestQuestion = message;
return [4 /*yield*/, this._buildMessages(text, role, opts, completionParams)];
case 1:
_c = _d.sent(), messages = _c.messages, maxTokens = _c.maxTokens, numTokens = _c.numTokens;
console.log("maxTokens: ".concat(maxTokens, ", numTokens: ").concat(numTokens));
result = {
role: 'assistant',
id: uuidv4(),
conversationId: conversationId,
parentMessageId: messageId,
text: '',
functionCall: null
};
responseP = new Promise(function (resolve, reject) { return __awaiter(_this, void 0, void 0, function () {
var url, headers, body, res, reason, msg, error, response, message_1, res_1, err_1;
var _a, _b;
return __generator(this, function (_c) {
switch (_c.label) {
case 0:
url = "".concat(this._apiBaseUrl, "/chat/completions");
headers = {
'Content-Type': 'application/json',
Authorization: "Bearer ".concat(this._apiKey)
};
body = __assign(__assign(__assign({ max_tokens: maxTokens }, this._completionParams), completionParams), { messages: messages, stream: stream });
if (this._debug) {
console.log(JSON.stringify(body));
}
// Support multiple organizations
// See https://platform.openai.com/docs/api-reference/authentication
if (this._apiOrg) {
headers['OpenAI-Organization'] = this._apiOrg;
}
if (this._debug) {
console.log("sendMessage (".concat(numTokens, " tokens)"), body);
}
if (!stream) return [3 /*break*/, 1];
fetchSSE(url, {
method: 'POST',
headers: headers,
body: JSON.stringify(body),
signal: abortSignal,
onMessage: function (data) {
var _a;
if (data === '[DONE]') {
result.text = result.text.trim();
return resolve(result);
}
try {
var response = JSON.parse(data);
if (response.id) {
result.id = response.id;
}
if ((_a = response.choices) === null || _a === void 0 ? void 0 : _a.length) {
var delta = response.choices[0].delta;
if (delta.function_call) {
if (delta.function_call.name) {
result.functionCall = {
name: delta.function_call.name,
arguments: delta.function_call.arguments
};
}
else {
result.functionCall.arguments = (result.functionCall.arguments || '') + delta.function_call.arguments;
}
}
else {
result.delta = delta.content;
if (delta === null || delta === void 0 ? void 0 : delta.content)
result.text += delta.content;
}
if (delta.role) {
result.role = delta.role;
}
result.detail = response;
onProgress === null || onProgress === void 0 ? void 0 : onProgress(result);
}
}
catch (err) {
console.warn('OpenAI stream SEE event unexpected error', err);
return reject(err);
}
}
}, this._fetch).catch(reject);
return [3 /*break*/, 7];
case 1:
_c.trys.push([1, 6, , 7]);
return [4 /*yield*/, this._fetch(url, {
method: 'POST',
headers: headers,
body: JSON.stringify(body),
signal: abortSignal
})];
case 2:
res = _c.sent();
if (!!res.ok) return [3 /*break*/, 4];
return [4 /*yield*/, res.text()];
case 3:
reason = _c.sent();
msg = "OpenAI error ".concat(res.status || res.statusText, ": ").concat(reason);
error = new types.ChatGPTError(msg, { cause: res });
error.statusCode = res.status;
error.statusText = res.statusText;
return [2 /*return*/, reject(error)];
case 4: return [4 /*yield*/, res.json()];
case 5:
response = _c.sent();
if (this._debug) {
console.log(response);
}
if (response === null || response === void 0 ? void 0 : response.id) {
result.id = response.id;
}
if ((_a = response === null || response === void 0 ? void 0 : response.choices) === null || _a === void 0 ? void 0 : _a.length) {
message_1 = response.choices[0].message;
if (message_1.content) {
result.text = message_1.content;
}
else if (message_1.function_call) {
result.functionCall = message_1.function_call;
}
if (message_1.role) {
result.role = message_1.role;
}
}
else {
res_1 = response;
console.error(res_1);
return [2 /*return*/, reject(new Error("OpenAI error: ".concat(((_b = res_1 === null || res_1 === void 0 ? void 0 : res_1.detail) === null || _b === void 0 ? void 0 : _b.message) || (res_1 === null || res_1 === void 0 ? void 0 : res_1.detail) || 'unknown')))];
}
result.detail = response;
return [2 /*return*/, resolve(result)];
case 6:
err_1 = _c.sent();
return [2 /*return*/, reject(err_1)];
case 7: return [2 /*return*/];
}
});
}); }).then(function (message) { return __awaiter(_this, void 0, void 0, function () {
var promptTokens, completionTokens, err_2;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(message.detail && !message.detail.usage)) return [3 /*break*/, 4];
_a.label = 1;
case 1:
_a.trys.push([1, 3, , 4]);
promptTokens = numTokens;
return [4 /*yield*/, this._getTokenCount(message.text)];
case 2:
completionTokens = _a.sent();
message.detail.usage = {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
estimated: true
};
return [3 /*break*/, 4];
case 3:
err_2 = _a.sent();
return [3 /*break*/, 4];
case 4: return [2 /*return*/, Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(function () { return message; })];
}
});
}); });
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;
responseP.cancel = function () {
abortController.abort();
};
}
return [2 /*return*/, pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'OpenAI timed out waiting for response'
})];
}
else {
return [2 /*return*/, responseP];
}
return [2 /*return*/];
}
});
});
};
Object.defineProperty(ChatGPTAPI.prototype, "apiKey", {
get: function () {
return this._apiKey;
},
set: function (apiKey) {
this._apiKey = apiKey;
},
enumerable: false,
configurable: true
});
Object.defineProperty(ChatGPTAPI.prototype, "apiOrg", {
get: function () {
return this._apiOrg;
},
set: function (apiOrg) {
this._apiOrg = apiOrg;
},
enumerable: false,
configurable: true
});
ChatGPTAPI.prototype._buildMessages = function (text, role, opts, completionParams) {
var _a, _b;
return __awaiter(this, void 0, void 0, function () {
var _c, systemMessage, parentMessageId, userLabel, assistantLabel, maxNumTokens, messages, systemMessageOffset, nextMessages, functionToken, numTokens, _i, _d, func, _e, _f, _g, _h, key, _j, property, _k, _l, field, _m, _o, _p, _q, _r, enumElement, _s, _t, _u, string, _v, prompt_1, nextNumTokensEstimate, _w, _x, m1, _y, isValidPrompt, parentMessage, parentMessageRole, maxTokens;
return __generator(this, function (_z) {
switch (_z.label) {
case 0:
_c = opts.systemMessage, systemMessage = _c === void 0 ? this._systemMessage : _c;
parentMessageId = opts.parentMessageId;
userLabel = USER_LABEL_DEFAULT;
assistantLabel = ASSISTANT_LABEL_DEFAULT;
maxNumTokens = this._maxModelTokens - this._maxResponseTokens;
messages = [];
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
});
}
systemMessageOffset = messages.length;
nextMessages = text
? messages.concat([
{
role: role,
content: text,
name: opts.name
}
])
: messages;
functionToken = 0;
numTokens = functionToken;
if (!completionParams.functions) return [3 /*break*/, 23];
_i = 0, _d = completionParams.functions;
_z.label = 1;
case 1:
if (!(_i < _d.length)) return [3 /*break*/, 23];
func = _d[_i];
_e = functionToken;
return [4 /*yield*/, this._getTokenCount(func === null || func === void 0 ? void 0 : func.name)];
case 2:
functionToken = _e + _z.sent();
_f = functionToken;
return [4 /*yield*/, this._getTokenCount(func === null || func === void 0 ? void 0 : func.description)];
case 3:
functionToken = _f + _z.sent();
if (!((_a = func === null || func === void 0 ? void 0 : func.parameters) === null || _a === void 0 ? void 0 : _a.properties)) return [3 /*break*/, 18];
_g = 0, _h = Object.keys(func.parameters.properties);
_z.label = 4;
case 4:
if (!(_g < _h.length)) return [3 /*break*/, 18];
key = _h[_g];
_j = functionToken;
return [4 /*yield*/, this._getTokenCount(key)];
case 5:
functionToken = _j + _z.sent();
property = func.parameters.properties[key];
_k = 0, _l = Object.keys(property);
_z.label = 6;
case 6:
if (!(_k < _l.length)) return [3 /*break*/, 17];
field = _l[_k];
_m = field;
switch (_m) {
case 'type': return [3 /*break*/, 7];
case 'description': return [3 /*break*/, 9];
case 'enum': return [3 /*break*/, 11];
}
return [3 /*break*/, 16];
case 7:
functionToken += 2;
_o = functionToken;
return [4 /*yield*/, this._getTokenCount(property === null || property === void 0 ? void 0 : property.type)];
case 8:
functionToken = _o + _z.sent();
return [3 /*break*/, 16];
case 9:
functionToken += 2;
_p = functionToken;
return [4 /*yield*/, this._getTokenCount(property === null || property === void 0 ? void 0 : property.description)];
case 10:
functionToken = _p + _z.sent();
return [3 /*break*/, 16];
case 11:
functionToken -= 3;
_q = 0, _r = property === null || property === void 0 ? void 0 : property.enum;
_z.label = 12;
case 12:
if (!(_q < _r.length)) return [3 /*break*/, 15];
enumElement = _r[_q];
functionToken += 3;
_s = functionToken;
return [4 /*yield*/, this._getTokenCount(enumElement)];
case 13:
functionToken = _s + _z.sent();
_z.label = 14;
case 14:
_q++;
return [3 /*break*/, 12];
case 15: return [3 /*break*/, 16];
case 16:
_k++;
return [3 /*break*/, 6];
case 17:
_g++;
return [3 /*break*/, 4];
case 18:
if (!((_b = func === null || func === void 0 ? void 0 : func.parameters) === null || _b === void 0 ? void 0 : _b.required)) return [3 /*break*/, 22];
_t = 0, _u = func.parameters.required;
_z.label = 19;
case 19:
if (!(_t < _u.length)) return [3 /*break*/, 22];
string = _u[_t];
functionToken += 2;
_v = functionToken;
return [4 /*yield*/, this._getTokenCount(string)];
case 20:
functionToken = _v + _z.sent();
_z.label = 21;
case 21:
_t++;
return [3 /*break*/, 19];
case 22:
_i++;
return [3 /*break*/, 1];
case 23:
prompt_1 = nextMessages
.reduce(function (prompt, message) {
switch (message.role) {
case 'system':
return prompt.concat(["Instructions:\n".concat(message.content)]);
case 'user':
return prompt.concat(["".concat(userLabel, ":\n").concat(message.content)]);
case 'function':
// leave befind
return prompt;
default:
return message.content ? prompt.concat(["".concat(assistantLabel, ":\n").concat(message.content)]) : prompt;
}
}, [])
.join('\n\n');
return [4 /*yield*/, this._getTokenCount(prompt_1)];
case 24:
nextNumTokensEstimate = _z.sent();
_w = 0, _x = nextMessages
.filter(function (m) { return m.function_call; });
_z.label = 25;
case 25:
if (!(_w < _x.length)) return [3 /*break*/, 28];
m1 = _x[_w];
_y = nextNumTokensEstimate;
return [4 /*yield*/, this._getTokenCount(JSON.stringify(m1.function_call) || '')];
case 26:
nextNumTokensEstimate = _y + _z.sent();
_z.label = 27;
case 27:
_w++;
return [3 /*break*/, 25];
case 28:
isValidPrompt = nextNumTokensEstimate + functionToken <= maxNumTokens;
if (prompt_1 && !isValidPrompt) {
return [3 /*break*/, 31];
}
messages = nextMessages;
numTokens = nextNumTokensEstimate + functionToken;
if (!isValidPrompt) {
return [3 /*break*/, 31];
}
if (!parentMessageId) {
return [3 /*break*/, 31];
}
return [4 /*yield*/, this._getMessageById(parentMessageId)];
case 29:
parentMessage = _z.sent();
if (!parentMessage) {
return [3 /*break*/, 31];
}
parentMessageRole = parentMessage.role || 'user';
nextMessages = nextMessages.slice(0, systemMessageOffset).concat(__spreadArray([
{
role: parentMessageRole,
content: parentMessage.text,
name: parentMessage.name,
function_call: parentMessage.functionCall ? parentMessage.functionCall : undefined
}
], nextMessages.slice(systemMessageOffset), true));
parentMessageId = parentMessage.parentMessageId;
_z.label = 30;
case 30:
if (true) return [3 /*break*/, 23];
_z.label = 31;
case 31:
maxTokens = Math.max(1, Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens));
return [2 /*return*/, { messages: messages, maxTokens: maxTokens, numTokens: numTokens }];
}
});
});
};
ChatGPTAPI.prototype._getTokenCount = function (text) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
if (!text) {
return [2 /*return*/, 0];
}
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '');
return [2 /*return*/, tokenizer.encode(text).length];
});
});
};
ChatGPTAPI.prototype._defaultGetMessageById = function (id) {
return __awaiter(this, void 0, void 0, function () {
var res;
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.get(id)];
case 1:
res = _a.sent();
return [2 /*return*/, res];
}
});
});
};
ChatGPTAPI.prototype._defaultUpsertMessage = function (message) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.set(message.id, message)];
case 1:
_a.sent();
return [2 /*return*/];
}
});
});
};
return ChatGPTAPI;
}());
export { ChatGPTAPI };

551
utils/openai/chatgpt-api.ts Normal file
View file

@ -0,0 +1,551 @@
import Keyv from 'keyv'
import pTimeout from 'p-timeout'
import QuickLRU from 'quick-lru'
import { v4 as uuidv4 } from 'uuid'
import * as tokenizer from './tokenizer'
import * as types from './types'
import globalFetch from 'node-fetch'
import { fetchSSE } from './fetch-sse'
import {openai, Role} from "./types";
const CHATGPT_MODEL = 'gpt-3.5-turbo-0613'
const USER_LABEL_DEFAULT = 'User'
const ASSISTANT_LABEL_DEFAULT = 'ChatGPT'
export class ChatGPTAPI {
protected _apiKey: string
protected _apiBaseUrl: string
protected _apiOrg?: string
protected _debug: boolean
protected _systemMessage: string
protected _completionParams: Omit<
types.openai.CreateChatCompletionRequest,
'messages' | 'n'
>
protected _maxModelTokens: number
protected _maxResponseTokens: number
protected _fetch: types.FetchFn
protected _getMessageById: types.GetMessageByIdFunction
protected _upsertMessage: types.UpsertMessageFunction
protected _messageStore: Keyv<types.ChatMessage>
/**
* Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param apiKey - OpenAI API key (required).
* @param apiOrg - Optional OpenAI API organization (optional).
* @param apiBaseUrl - Optional override for the OpenAI API base URL.
* @param debug - Optional enables logging debugging info to stdout.
* @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
* @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.
* @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.
* @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.
* @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
*/
constructor(opts: types.ChatGPTAPIOptions) {
const {
apiKey,
apiOrg,
apiBaseUrl = 'https://api.openai.com/v1',
debug = false,
messageStore,
completionParams,
systemMessage,
maxModelTokens = 4000,
maxResponseTokens = 1000,
getMessageById,
upsertMessage,
fetch = globalFetch
} = opts
this._apiKey = apiKey
this._apiOrg = apiOrg
this._apiBaseUrl = apiBaseUrl
this._debug = !!debug
this._fetch = fetch
this._completionParams = {
model: CHATGPT_MODEL,
temperature: 0.8,
top_p: 1.0,
presence_penalty: 1.0,
...completionParams
}
this._systemMessage = systemMessage
if (this._systemMessage === undefined) {
const currentDate = new Date().toISOString().split('T')[0]
this._systemMessage = `You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ${currentDate}`
}
this._maxModelTokens = maxModelTokens
this._maxResponseTokens = maxResponseTokens
this._getMessageById = getMessageById ?? this._defaultGetMessageById
this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage
if (messageStore) {
this._messageStore = messageStore
} else {
this._messageStore = new Keyv<types.ChatMessage, any>({
store: new QuickLRU<string, types.ChatMessage>({ maxSize: 10000 })
})
}
if (!this._apiKey) {
throw new Error('OpenAI missing required apiKey')
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined')
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function')
}
}
/**
* Sends a message to the OpenAI chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
async sendMessage(
text: string,
opts: types.SendMessageOptions = {},
role: Role = 'user',
): Promise<types.ChatMessage> {
const {
parentMessageId,
messageId = uuidv4(),
timeoutMs,
onProgress,
stream = onProgress ? true : false,
completionParams,
conversationId
} = opts
let { abortSignal } = opts
let abortController: AbortController = null
if (timeoutMs && !abortSignal) {
abortController = new AbortController()
abortSignal = abortController.signal
}
const message: types.ChatMessage = {
role,
id: messageId,
conversationId,
parentMessageId,
text,
name: opts.name
}
const latestQuestion = message
const { messages, maxTokens, numTokens } = await this._buildMessages(
text,
role,
opts,
completionParams
)
console.log(`maxTokens: ${maxTokens}, numTokens: ${numTokens}`)
const result: types.ChatMessage = {
role: 'assistant',
id: uuidv4(),
conversationId,
parentMessageId: messageId,
text: undefined,
functionCall: undefined
}
const responseP = new Promise<types.ChatMessage>(
async (resolve, reject) => {
const url = `${this._apiBaseUrl}/chat/completions`
const headers = {
'Content-Type': 'application/json',
Authorization: `Bearer ${this._apiKey}`
}
const body = {
max_tokens: maxTokens,
...this._completionParams,
...completionParams,
messages,
stream
}
if (this._debug) {
console.log(JSON.stringify(body))
}
// Support multiple organizations
// See https://platform.openai.com/docs/api-reference/authentication
if (this._apiOrg) {
headers['OpenAI-Organization'] = this._apiOrg
}
if (this._debug) {
console.log(`sendMessage (${numTokens} tokens)`, body)
}
if (stream) {
fetchSSE(
url,
{
method: 'POST',
headers,
body: JSON.stringify(body),
signal: abortSignal,
onMessage: (data: string) => {
if (data === '[DONE]') {
result.text = result.text.trim()
return resolve(result)
}
try {
const response: types.openai.CreateChatCompletionDeltaResponse =
JSON.parse(data)
if (response.id) {
result.id = response.id
}
if (response.choices?.length) {
const delta = response.choices[0].delta
if (delta.function_call) {
if (delta.function_call.name) {
result.functionCall = {
name: delta.function_call.name,
arguments: delta.function_call.arguments
}
} else {
result.functionCall.arguments = (result.functionCall.arguments || '') + delta.function_call.arguments
}
} else {
result.delta = delta.content
if (delta?.content) result.text += delta.content
}
if (delta.role) {
result.role = delta.role
}
result.detail = response
onProgress?.(result)
}
} catch (err) {
console.warn('OpenAI stream SEE event unexpected error', err)
return reject(err)
}
}
},
this._fetch
).catch(reject)
} else {
try {
const res = await this._fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: abortSignal
})
if (!res.ok) {
const reason = await res.text()
const msg = `OpenAI error ${
res.status || res.statusText
}: ${reason}`
const error = new types.ChatGPTError(msg, { cause: res })
error.statusCode = res.status
error.statusText = res.statusText
return reject(error)
}
const response: types.openai.CreateChatCompletionResponse =
await res.json()
if (this._debug) {
console.log(response)
}
if (response?.id) {
result.id = response.id
}
if (response?.choices?.length) {
const message = response.choices[0].message
if (message.content) {
result.text = message.content
} else if (message.function_call) {
result.functionCall = message.function_call
}
if (message.role) {
result.role = message.role
}
} else {
const res = response as any
console.error(res)
return reject(
new Error(
`OpenAI error: ${
res?.detail?.message || res?.detail || 'unknown'
}`
)
)
}
result.detail = response
return resolve(result)
} catch (err) {
return reject(err)
}
}
}
).then(async (message) => {
if (message.detail && !message.detail.usage) {
try {
const promptTokens = numTokens
const completionTokens = await this._getTokenCount(message.text)
message.detail.usage = {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
estimated: true
}
} catch (err) {
// TODO: this should really never happen, but if it does,
// we should handle notify the user gracefully
}
}
return Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(() => message)
})
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;(responseP as any).cancel = () => {
abortController.abort()
}
}
return pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'OpenAI timed out waiting for response'
})
} else {
return responseP
}
}
get apiKey(): string {
return this._apiKey
}
set apiKey(apiKey: string) {
this._apiKey = apiKey
}
get apiOrg(): string {
return this._apiOrg
}
set apiOrg(apiOrg: string) {
this._apiOrg = apiOrg
}
protected async _buildMessages(text: string, role: Role, opts: types.SendMessageOptions, completionParams: Partial<
Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>) {
const { systemMessage = this._systemMessage } = opts
let { parentMessageId } = opts
const userLabel = USER_LABEL_DEFAULT
const assistantLabel = ASSISTANT_LABEL_DEFAULT
const maxNumTokens = this._maxModelTokens - this._maxResponseTokens
let messages: types.openai.ChatCompletionRequestMessage[] = []
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
})
}
const systemMessageOffset = messages.length
let nextMessages = text
? messages.concat([
{
role,
content: text,
name: opts.name
}
])
: messages
let functionToken = 0
let numTokens = functionToken
if (completionParams.functions) {
for (const func of completionParams.functions) {
functionToken += await this._getTokenCount(func?.name)
functionToken += await this._getTokenCount(func?.description)
if (func?.parameters?.properties) {
for (let key of Object.keys(func.parameters.properties)) {
functionToken += await this._getTokenCount(key)
let property = func.parameters.properties[key]
for (let field of Object.keys(property)) {
switch (field) {
case 'type': {
functionToken += 2
functionToken += await this._getTokenCount(property?.type)
break
}
case 'description': {
functionToken += 2
functionToken += await this._getTokenCount(property?.description)
break
}
case 'enum': {
functionToken -= 3
for (let enumElement of property?.enum) {
functionToken += 3
functionToken += await this._getTokenCount(enumElement)
}
break
}
}
}
}
}
if (func?.parameters?.required) {
for (let string of func.parameters.required) {
functionToken += 2
functionToken += await this._getTokenCount(string)
}
}
}
}
do {
const prompt = nextMessages
.reduce((prompt, message) => {
switch (message.role) {
case 'system':
return prompt.concat([`Instructions:\n${message.content}`])
case 'user':
return prompt.concat([`${userLabel}:\n${message.content}`])
case 'function':
// leave befind
return prompt
default:
return message.content ? prompt.concat([`${assistantLabel}:\n${message.content}`]) : prompt
}
}, [] as string[])
.join('\n\n')
let nextNumTokensEstimate = await this._getTokenCount(prompt)
for (const m1 of nextMessages
.filter(m => m.function_call)) {
nextNumTokensEstimate += await this._getTokenCount(JSON.stringify(m1.function_call) || '')
}
const isValidPrompt = nextNumTokensEstimate + functionToken <= maxNumTokens
if (prompt && !isValidPrompt) {
break
}
messages = nextMessages
numTokens = nextNumTokensEstimate + functionToken
if (!isValidPrompt) {
break
}
if (!parentMessageId) {
break
}
const parentMessage = await this._getMessageById(parentMessageId)
if (!parentMessage) {
break
}
const parentMessageRole = parentMessage.role || 'user'
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
{
role: parentMessageRole,
content: parentMessage.text,
name: parentMessage.name,
function_call: parentMessage.functionCall ? parentMessage.functionCall : undefined
},
...nextMessages.slice(systemMessageOffset)
])
parentMessageId = parentMessage.parentMessageId
} while (true)
// Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens
// for the response.
const maxTokens = Math.max(
1,
Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)
)
return { messages, maxTokens, numTokens }
}
protected async _getTokenCount(text: string) {
if (!text) {
return 0
}
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '')
return tokenizer.encode(text).length
}
protected async _defaultGetMessageById(
id: string
): Promise<types.ChatMessage> {
const res = await this._messageStore.get(id)
return res
}
protected async _defaultUpsertMessage(
message: types.ChatMessage
): Promise<void> {
await this._messageStore.set(message.id, message)
}
}

170
utils/openai/fetch-sse.js Normal file
View file

@ -0,0 +1,170 @@
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __rest = (this && this.__rest) || function (s, e) {
var t = {};
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
t[p] = s[p];
if (s != null && typeof Object.getOwnPropertySymbols === "function")
for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
t[p[i]] = s[p[i]];
}
return t;
};
var __asyncValues = (this && this.__asyncValues) || function (o) {
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
var m = o[Symbol.asyncIterator], i;
return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
};
import { createParser } from 'eventsource-parser';
import * as types from './types.js';
import fetch from 'node-fetch';
import { streamAsyncIterable } from './stream-async-iterable.js';
export function fetchSSE(url, options, fetchFn) {
var _a, e_1, _b, _c;
if (fetchFn === void 0) { fetchFn = fetch; }
return __awaiter(this, void 0, void 0, function () {
var onMessage, onError, fetchOptions, res, reason, err_1, msg, error, parser, feed, body_1, _d, _e, _f, chunk, str, e_1_1;
return __generator(this, function (_g) {
switch (_g.label) {
case 0:
onMessage = options.onMessage, onError = options.onError, fetchOptions = __rest(options, ["onMessage", "onError"]);
return [4 /*yield*/, fetchFn(url, fetchOptions)];
case 1:
res = _g.sent();
if (!!res.ok) return [3 /*break*/, 6];
reason = void 0;
_g.label = 2;
case 2:
_g.trys.push([2, 4, , 5]);
return [4 /*yield*/, res.text()];
case 3:
reason = _g.sent();
return [3 /*break*/, 5];
case 4:
err_1 = _g.sent();
reason = res.statusText;
return [3 /*break*/, 5];
case 5:
msg = "ChatGPT error ".concat(res.status, ": ").concat(reason);
error = new types.ChatGPTError(msg, { cause: res });
error.statusCode = res.status;
error.statusText = res.statusText;
throw error;
case 6:
parser = createParser(function (event) {
if (event.type === 'event') {
onMessage(event.data);
}
});
feed = function (chunk) {
var _a;
var response = null;
try {
response = JSON.parse(chunk);
}
catch (_b) {
// ignore
}
if (((_a = response === null || response === void 0 ? void 0 : response.detail) === null || _a === void 0 ? void 0 : _a.type) === 'invalid_request_error') {
var msg = "ChatGPT error ".concat(response.detail.message, ": ").concat(response.detail.code, " (").concat(response.detail.type, ")");
var error = new types.ChatGPTError(msg, { cause: response });
error.statusCode = response.detail.code;
error.statusText = response.detail.message;
if (onError) {
onError(error);
}
else {
console.error(error);
}
// don't feed to the event parser
return;
}
parser.feed(chunk);
};
if (!!res.body.getReader) return [3 /*break*/, 7];
body_1 = res.body;
if (!body_1.on || !body_1.read) {
throw new types.ChatGPTError('unsupported "fetch" implementation');
}
body_1.on('readable', function () {
var chunk;
while (null !== (chunk = body_1.read())) {
feed(chunk.toString());
}
});
return [3 /*break*/, 18];
case 7:
_g.trys.push([7, 12, 13, 18]);
_d = true, _e = __asyncValues(streamAsyncIterable(res.body));
_g.label = 8;
case 8: return [4 /*yield*/, _e.next()];
case 9:
if (!(_f = _g.sent(), _a = _f.done, !_a)) return [3 /*break*/, 11];
_c = _f.value;
_d = false;
chunk = _c;
str = new TextDecoder().decode(chunk);
feed(str);
_g.label = 10;
case 10:
_d = true;
return [3 /*break*/, 8];
case 11: return [3 /*break*/, 18];
case 12:
e_1_1 = _g.sent();
e_1 = { error: e_1_1 };
return [3 /*break*/, 18];
case 13:
_g.trys.push([13, , 16, 17]);
if (!(!_d && !_a && (_b = _e.return))) return [3 /*break*/, 15];
return [4 /*yield*/, _b.call(_e)];
case 14:
_g.sent();
_g.label = 15;
case 15: return [3 /*break*/, 17];
case 16:
if (e_1) throw e_1.error;
return [7 /*endfinally*/];
case 17: return [7 /*endfinally*/];
case 18: return [2 /*return*/];
}
});
});
}

89
utils/openai/fetch-sse.ts Normal file
View file

@ -0,0 +1,89 @@
import { createParser } from 'eventsource-parser'
import * as types from './types'
import { fetch as nodefetch } from 'node-fetch'
import { streamAsyncIterable } from './stream-async-iterable'
export async function fetchSSE(
url: string,
options: Parameters<typeof fetch>[1] & {
onMessage: (data: string) => void
onError?: (error: any) => void
},
fetch: types.FetchFn = nodefetch
) {
const { onMessage, onError, ...fetchOptions } = options
const res = await fetch(url, fetchOptions)
if (!res.ok) {
let reason: string
try {
reason = await res.text()
} catch (err) {
reason = res.statusText
}
const msg = `ChatGPT error ${res.status}: ${reason}`
const error = new types.ChatGPTError(msg, { cause: res })
error.statusCode = res.status
error.statusText = res.statusText
throw error
}
const parser = createParser((event) => {
if (event.type === 'event') {
onMessage(event.data)
}
})
// handle special response errors
const feed = (chunk: string) => {
let response = null
try {
response = JSON.parse(chunk)
} catch {
// ignore
}
if (response?.detail?.type === 'invalid_request_error') {
const msg = `ChatGPT error ${response.detail.message}: ${response.detail.code} (${response.detail.type})`
const error = new types.ChatGPTError(msg, { cause: response })
error.statusCode = response.detail.code
error.statusText = response.detail.message
if (onError) {
onError(error)
} else {
console.error(error)
}
// don't feed to the event parser
return
}
parser.feed(chunk)
}
if (!res.body.getReader) {
// Vercel polyfills `fetch` with `node-fetch`, which doesn't conform to
// web standards, so this is a workaround...
const body: NodeJS.ReadableStream = res.body as any
if (!body.on || !body.read) {
throw new types.ChatGPTError('unsupported "fetch" implementation')
}
body.on('readable', () => {
let chunk: string | Buffer
while (null !== (chunk = body.read())) {
feed(chunk.toString())
}
})
} else {
for await (const chunk of streamAsyncIterable(res.body)) {
const str = new TextDecoder().decode(chunk)
feed(str)
}
}
}

View file

@ -0,0 +1,14 @@
export async function * streamAsyncIterable (stream) {
const reader = stream.getReader()
try {
while (true) {
const { done, value } = await reader.read()
if (done) {
return
}
yield value
}
} finally {
reader.releaseLock()
}
}

View file

@ -0,0 +1,6 @@
import { getEncoding } from 'js-tiktoken';
// TODO: make this configurable
var tokenizer = getEncoding('cl100k_base');
export function encode(input) {
return new Uint32Array(tokenizer.encode(input));
}

View file

@ -0,0 +1,8 @@
import { getEncoding } from 'js-tiktoken'
// TODO: make this configurable
const tokenizer = getEncoding('cl100k_base')
export function encode(input: string): Uint32Array {
return new Uint32Array(tokenizer.encode(input))
}

View file

@ -0,0 +1,5 @@
{
"compilerOptions": {
"module": "es2020"
}
}

26
utils/openai/types.js Normal file
View file

@ -0,0 +1,26 @@
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
if (typeof b !== "function" && b !== null)
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var ChatGPTError = /** @class */ (function (_super) {
__extends(ChatGPTError, _super);
function ChatGPTError() {
return _super !== null && _super.apply(this, arguments) || this;
}
return ChatGPTError;
}(Error));
export { ChatGPTError };
export var openai;
(function (openai) {
})(openai || (openai = {}));

473
utils/openai/types.ts Normal file
View file

@ -0,0 +1,473 @@
import Keyv from 'keyv'
export type Role = 'user' | 'assistant' | 'system' | 'function'
export type FetchFn = typeof fetch
export type ChatGPTAPIOptions = {
apiKey: string
/** @defaultValue `'https://api.openai.com'` **/
apiBaseUrl?: string
apiOrg?: string
/** @defaultValue `false` **/
debug?: boolean
completionParams?: Partial<
Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>
systemMessage?: string
/** @defaultValue `4096` **/
maxModelTokens?: number
/** @defaultValue `1000` **/
maxResponseTokens?: number
messageStore?: Keyv
getMessageById?: GetMessageByIdFunction
upsertMessage?: UpsertMessageFunction
fetch?: FetchFn
}
export type SendMessageOptions = {
/**
* function role name
*/
name?: string
parentMessageId?: string
conversationId?: string
messageId?: string
stream?: boolean
systemMessage?: string
timeoutMs?: number
onProgress?: (partialResponse: ChatMessage) => void
abortSignal?: AbortSignal
completionParams?: Partial<
Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>
}
export type MessageActionType = 'next' | 'variant'
export type SendMessageBrowserOptions = {
conversationId?: string
parentMessageId?: string
messageId?: string
action?: MessageActionType
timeoutMs?: number
onProgress?: (partialResponse: ChatMessage) => void
abortSignal?: AbortSignal
}
export interface ChatMessage {
id: string
text: string
role: Role
name?: string
delta?: string
detail?:
| openai.CreateChatCompletionResponse
| CreateChatCompletionStreamResponse
// relevant for both ChatGPTAPI and ChatGPTUnofficialProxyAPI
parentMessageId?: string
// only relevant for ChatGPTUnofficialProxyAPI (optional for ChatGPTAPI)
conversationId?: string
functionCall?: openai.FunctionCall
}
export class ChatGPTError extends Error {
statusCode?: number
statusText?: string
isFinal?: boolean
accountId?: string
}
/** Returns a chat message from a store by it's ID (or null if not found). */
export type GetMessageByIdFunction = (id: string) => Promise<ChatMessage>
/** Upserts a chat message to a store. */
export type UpsertMessageFunction = (message: ChatMessage) => Promise<void>
export interface CreateChatCompletionStreamResponse
extends openai.CreateChatCompletionDeltaResponse {
usage: CreateCompletionStreamResponseUsage
}
export interface CreateCompletionStreamResponseUsage
extends openai.CreateCompletionResponseUsage {
estimated: true
}
/**
* https://chat.openapi.com/backend-api/conversation
*/
export type ConversationJSONBody = {
/**
* The action to take
*/
action: string
/**
* The ID of the conversation
*/
conversation_id?: string
/**
* Prompts to provide
*/
messages: Prompt[]
/**
* The model to use
*/
model: string
/**
* The parent message ID
*/
parent_message_id: string
}
export type Prompt = {
/**
* The content of the prompt
*/
content: PromptContent
/**
* The ID of the prompt
*/
id: string
/**
* The role played in the prompt
*/
role: Role
}
export type ContentType = 'text'
export type PromptContent = {
/**
* The content type of the prompt
*/
content_type: ContentType
/**
* The parts to the prompt
*/
parts: string[]
}
export type ConversationResponseEvent = {
message?: Message
conversation_id?: string
error?: string | null
}
export type Message = {
id: string
content: MessageContent
role: Role
user: string | null
create_time: string | null
update_time: string | null
end_turn: null
weight: number
recipient: string
metadata: MessageMetadata
}
export type MessageContent = {
content_type: string
parts: string[]
}
export type MessageMetadata = any
export namespace openai {
export interface CreateChatCompletionDeltaResponse {
id: string
object: 'chat.completion.chunk'
created: number
model: string
choices: [
{
delta: {
role: Role
content?: string,
function_call?: {name: string, arguments: string}
}
index: number
finish_reason: string | null
}
]
}
/**
*
* @export
* @interface ChatCompletionRequestMessage
*/
export interface ChatCompletionRequestMessage {
/**
* The role of the author of this message.
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
role: ChatCompletionRequestMessageRoleEnum
/**
* The contents of the message
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
content: string
/**
* The name of the user in a multi-user chat
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
name?: string
function_call?: FunctionCall
}
export interface FunctionCall {
name: string
arguments: string
}
export declare const ChatCompletionRequestMessageRoleEnum: {
readonly System: 'system'
readonly User: 'user'
readonly Assistant: 'assistant'
readonly Function: 'function'
}
export declare type ChatCompletionRequestMessageRoleEnum =
(typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum]
/**
*
* @export
* @interface ChatCompletionResponseMessage
*/
export interface ChatCompletionResponseMessage {
/**
* The role of the author of this message.
* @type {string}
* @memberof ChatCompletionResponseMessage
*/
role: ChatCompletionResponseMessageRoleEnum
/**
* The contents of the message
* @type {string}
* @memberof ChatCompletionResponseMessage
*/
content: string
function_call: FunctionCall
}
export declare const ChatCompletionResponseMessageRoleEnum: {
readonly System: 'system'
readonly User: 'user'
readonly Assistant: 'assistant'
}
export declare type ChatCompletionResponseMessageRoleEnum =
(typeof ChatCompletionResponseMessageRoleEnum)[keyof typeof ChatCompletionResponseMessageRoleEnum]
/**
*
* @export
* @interface CreateChatCompletionRequest
*/
export interface CreateChatCompletionRequest {
/**
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
* @type {string}
* @memberof CreateChatCompletionRequest
*/
model: string
/**
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
* @type {Array<ChatCompletionRequestMessage>}
* @memberof CreateChatCompletionRequest
*/
messages: Array<ChatCompletionRequestMessage>
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
temperature?: number | null
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
top_p?: number | null
/**
* How many chat completion choices to generate for each input message.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
n?: number | null
/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
* @type {boolean}
* @memberof CreateChatCompletionRequest
*/
stream?: boolean | null
/**
*
* @type {CreateChatCompletionRequestStop}
* @memberof CreateChatCompletionRequest
*/
stop?: CreateChatCompletionRequestStop
/**
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
* @type {number}
* @memberof CreateChatCompletionRequest
*/
max_tokens?: number
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
* @type {number}
* @memberof CreateChatCompletionRequest
*/
presence_penalty?: number | null
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
* @type {number}
* @memberof CreateChatCompletionRequest
*/
frequency_penalty?: number | null
/**
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
* @type {object}
* @memberof CreateChatCompletionRequest
*/
logit_bias?: object | null
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string}
* @memberof CreateChatCompletionRequest
*/
user?: string
functions?: Function[]
}
export interface Function {
name: string
description: string
parameters: FunctionParameters
}
export interface FunctionParameters {
type: string
properties: Record<string, Record<string, any>>
required: string[]
}
/**
* @type CreateChatCompletionRequestStop
* Up to 4 sequences where the API will stop generating further tokens.
* @export
*/
export declare type CreateChatCompletionRequestStop = Array<string> | string
/**
*
* @export
* @interface CreateChatCompletionResponse
*/
export interface CreateChatCompletionResponse {
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
id: string
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
object: string
/**
*
* @type {number}
* @memberof CreateChatCompletionResponse
*/
created: number
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
model: string
/**
*
* @type {Array<CreateChatCompletionResponseChoicesInner>}
* @memberof CreateChatCompletionResponse
*/
choices: Array<CreateChatCompletionResponseChoicesInner>
/**
*
* @type {CreateCompletionResponseUsage}
* @memberof CreateChatCompletionResponse
*/
usage?: CreateCompletionResponseUsage
}
/**
*
* @export
* @interface CreateChatCompletionResponseChoicesInner
*/
export interface CreateChatCompletionResponseChoicesInner {
/**
*
* @type {number}
* @memberof CreateChatCompletionResponseChoicesInner
*/
index?: number
/**
*
* @type {ChatCompletionResponseMessage}
* @memberof CreateChatCompletionResponseChoicesInner
*/
message?: ChatCompletionResponseMessage
/**
*
* @type {string}
* @memberof CreateChatCompletionResponseChoicesInner
*/
finish_reason?: string
}
/**
*
* @export
* @interface CreateCompletionResponseUsage
*/
export interface CreateCompletionResponseUsage {
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
prompt_tokens: number
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
completion_tokens: number
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
total_tokens: number
}
}

278
utils/poe/index 2.js Normal file
View file

@ -0,0 +1,278 @@
import { readFileSync } from 'fs'
import { scrape } from './credential.js'
import fetch from 'node-fetch'
import crypto from 'crypto'
// used when test as a single file
// const _path = process.cwd()
const _path = process.cwd() + '/plugins/chatgpt-plugin/utils/poe'
const gqlDir = `${_path}/graphql`
const queries = {
// chatViewQuery: readFileSync(gqlDir + '/ChatViewQuery.graphql', 'utf8'),
addMessageBreakMutation: readFileSync(gqlDir + '/AddMessageBreakMutation.graphql', 'utf8'),
chatPaginationQuery: readFileSync(gqlDir + '/ChatPaginationQuery.graphql', 'utf8'),
addHumanMessageMutation: readFileSync(gqlDir + '/AddHumanMessageMutation.graphql', 'utf8'),
loginMutation: readFileSync(gqlDir + '/LoginWithVerificationCodeMutation.graphql', 'utf8'),
signUpWithVerificationCodeMutation: readFileSync(gqlDir + '/SignupWithVerificationCodeMutation.graphql', 'utf8'),
sendVerificationCodeMutation: readFileSync(gqlDir + '/SendVerificationCodeForLoginMutation.graphql', 'utf8')
}
const optionMap = [
{ title: 'Claude (Powered by Anthropic)', value: 'a2' },
{ title: 'Sage (Powered by OpenAI - logical)', value: 'capybara' },
{ title: 'Dragonfly (Powered by OpenAI - simpler)', value: 'nutria' },
{ title: 'ChatGPT (Powered by OpenAI - current)', value: 'chinchilla' },
{ title: 'Claude+', value: 'a2_2' },
{ title: 'GPT-4', value: 'beaver' }
]
export class PoeClient {
constructor (props) {
this.config = props
}
headers = {
'Content-Type': 'application/json',
Referrer: 'https://poe.com/',
Origin: 'https://poe.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
}
chatId = 0
bot = ''
reConnectWs = false
async setCredentials () {
let result = await scrape(this.config.quora_cookie)
console.log(result)
this.config.quora_formkey = result.appSettings.formkey
this.config.channel_name = result.channelName
this.config.app_settings = result.appSettings
// set value
this.headers['poe-formkey'] = this.config.quora_formkey
this.headers['poe-tchannel'] = this.config.channel_name
this.headers.Cookie = this.config.quora_cookie
console.log(this.headers)
}
async subscribe () {
const query = {
queryName: 'subscriptionsMutation',
variables: {
subscriptions: [
{
subscriptionName: 'messageAdded',
query: 'subscription subscriptions_messageAdded_Subscription(\n $chatId: BigInt!\n) {\n messageAdded(chatId: $chatId) {\n id\n messageId\n creationTime\n state\n ...ChatMessage_message\n ...chatHelpers_isBotMessage\n }\n}\n\nfragment ChatMessageDownvotedButton_message on Message {\n ...MessageFeedbackReasonModal_message\n ...MessageFeedbackOtherModal_message\n}\n\nfragment ChatMessageDropdownMenu_message on Message {\n id\n messageId\n vote\n text\n ...chatHelpers_isBotMessage\n}\n\nfragment ChatMessageFeedbackButtons_message on Message {\n id\n messageId\n vote\n voteReason\n ...ChatMessageDownvotedButton_message\n}\n\nfragment ChatMessageOverflowButton_message on Message {\n text\n ...ChatMessageDropdownMenu_message\n ...chatHelpers_isBotMessage\n}\n\nfragment ChatMessageSuggestedReplies_SuggestedReplyButton_message on Message {\n messageId\n}\n\nfragment ChatMessageSuggestedReplies_message on Message {\n suggestedReplies\n ...ChatMessageSuggestedReplies_SuggestedReplyButton_message\n}\n\nfragment ChatMessage_message on Message {\n id\n messageId\n text\n author\n linkifiedText\n state\n ...ChatMessageSuggestedReplies_message\n ...ChatMessageFeedbackButtons_message\n ...ChatMessageOverflowButton_message\n ...chatHelpers_isHumanMessage\n ...chatHelpers_isBotMessage\n ...chatHelpers_isChatBreak\n ...chatHelpers_useTimeoutLevel\n ...MarkdownLinkInner_message\n}\n\nfragment MarkdownLinkInner_message on Message {\n messageId\n}\n\nfragment MessageFeedbackOtherModal_message on Message {\n id\n messageId\n}\n\nfragment MessageFeedbackReasonModal_message on Message {\n id\n messageId\n}\n\nfragment chatHelpers_isBotMessage on Message {\n ...chatHelpers_isHumanMessage\n ...chatHelpers_isChatBreak\n}\n\nfragment chatHelpers_isChatBreak on Message {\n author\n}\n\nfragment chatHelpers_isHumanMessage on Message {\n author\n}\n\nfragment chatHelpers_useTimeoutLevel on Message {\n id\n state\n text\n messageId\n}\n'
},
{
subscriptionName: 'viewerStateUpdated',
query: 'subscription subscriptions_viewerStateUpdated_Subscription {\n viewerStateUpdated {\n id\n ...ChatPageBotSwitcher_viewer\n }\n}\n\nfragment BotHeader_bot on Bot {\n displayName\n ...BotImage_bot\n}\n\nfragment BotImage_bot on Bot {\n profilePicture\n displayName\n}\n\nfragment BotLink_bot on Bot {\n displayName\n}\n\nfragment ChatPageBotSwitcher_viewer on Viewer {\n availableBots {\n id\n ...BotLink_bot\n ...BotHeader_bot\n }\n}\n'
}
]
},
query: 'mutation subscriptionsMutation(\n $subscriptions: [AutoSubscriptionQuery!]!\n) {\n autoSubscribe(subscriptions: $subscriptions) {\n viewer {\n id\n }\n }\n}\n'
}
await this.makeRequest(query)
}
async makeRequest (request) {
let payload = JSON.stringify(request)
let baseString = payload + this.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k'
const md5 = crypto.createHash('md5').update(baseString).digest('hex')
const response = await fetch('https://poe.com/api/gql_POST', {
method: 'POST',
headers: Object.assign(this.headers, {
'poe-tag-id': md5,
'content-type': 'application/json'
}),
body: payload
})
let text = await response.text()
try {
let result = JSON.parse(text)
console.log({ result })
return result
} catch (e) {
console.error(text)
throw e
}
}
async getBot (displayName) {
let r
let retry = 10
while (retry >= 0) {
let url = `https://poe.com/_next/data/${this.nextData.buildId}/${displayName}.json`
let r = await fetch(url, {
headers: this.headers
})
let res = await r.text()
try {
let chatData = (JSON.parse(res)).pageProps.payload.chatOfBotDisplayName
return chatData
} catch (e) {
r = res
retry--
}
}
throw new Error(r)
}
async getChatId () {
let r = await fetch('https://poe.com', {
headers: this.headers
})
let text = await r.text()
const jsonRegex = /<script id="__NEXT_DATA__" type="application\/json">(.+?)<\/script>/
const jsonText = text.match(jsonRegex)[1]
const nextData = JSON.parse(jsonText)
this.nextData = nextData
this.viewer = nextData.props.pageProps.payload.viewer
this.formkey = nextData.props.formkey
let bots = this.viewer.availableBots
this.bots = {}
for (let i = 0; i < bots.length; i++) {
let bot = bots[i]
let chatData = await this.getBot(bot.displayName)
this.bots[chatData.defaultBotObject.nickname] = chatData
}
console.log(this.bots)
}
async clearContext (bot) {
try {
const data = await this.makeRequest({
query: `${queries.addMessageBreakMutation}`,
variables: { chatId: this.config.chat_ids[bot] }
})
if (!data.data) {
this.reConnectWs = true // for websocket purpose
console.log('ON TRY! Could not clear context! Trying to reLogin..')
}
return data
} catch (e) {
this.reConnectWs = true // for websocket purpose
console.log('ON CATCH! Could not clear context! Trying to reLogin..')
return e
}
}
async sendMsg (bot, query) {
try {
const data = await this.makeRequest({
query: `${queries.addHumanMessageMutation}`,
variables: {
bot,
chatId: this.bots[bot].chatId,
query,
source: null,
withChatBreak: false
}
})
console.log(data)
if (!data.data) {
this.reConnectWs = true // for cli websocket purpose
console.log('Could not send message! Trying to reLogin..')
}
return data
} catch (e) {
this.reConnectWs = true // for cli websocket purpose
console.error(e)
return e
}
}
async getHistory (bot) {
try {
let response = await this.makeRequest({
query: `${queries.chatPaginationQuery}`,
variables: {
before: null,
bot,
last: 25
}
})
return response.data.chatOfBot.messagesConnection.edges
.map(({ node: { messageId, text, authorNickname } }) => ({
messageId,
text,
authorNickname
}))
} catch (e) {
console.log('There has been an error while fetching your history!')
}
}
async deleteMessages (msgIds) {
await this.makeRequest({
queryName: 'MessageDeleteConfirmationModal_deleteMessageMutation_Mutation',
variables: {
messageIds: msgIds
},
query: 'mutation MessageDeleteConfirmationModal_deleteMessageMutation_Mutation(\n $messageIds: [BigInt!]!\n){\n messagesDelete(messageIds: $messageIds) {\n edgeIds\n }\n}\n'
})
}
async getResponse (bot) {
let text
let state
let authorNickname
try {
while (true) {
await new Promise((resolve) => setTimeout(resolve, 2000))
let response = await this.makeRequest({
query: `${queries.chatPaginationQuery}`,
variables: {
before: null,
bot,
last: 1
}
})
let base = response.data.chatOfBot.messagesConnection.edges
let lastEdgeIndex = base.length - 1
text = base[lastEdgeIndex].node.text
authorNickname = base[lastEdgeIndex].node.authorNickname
state = base[lastEdgeIndex].node.state
if (state === 'complete' && authorNickname === bot) {
break
}
}
} catch (e) {
console.log('Could not get response!')
return {
status: false,
message: 'failed',
data: null
}
}
return {
status: true,
message: 'success',
data: text
}
}
}
async function testPoe () {
// const key = 'deb04db9f2332a3287b7d2545061af62'
// const channel = 'poe-chan55-8888-ujygckefewomybvkqfrp'
const cookie = 'p-b=WSvmyvjHVJoMtQVkirtn-A%3D%3D'
let client = new PoeClient({
// quora_formkey: key,
// channel_name: channel,
quora_cookie: cookie
})
await client.setCredentials()
await client.getChatId()
let ai = 'a2'
await client.sendMsg(ai, '你说话不是很通顺啊')
const response = await client.getResponse(ai)
return response
}
// testPoe().then(res => {
// console.log(res)
// })

65
utils/poe/websocket 2.js Normal file
View file

@ -0,0 +1,65 @@
import WebSocket from 'ws'
import * as diff from 'diff'
import { readFileSync } from 'fs'
const getSocketUrl = async () => {
const tchRand = Math.floor(100000 + Math.random() * 900000) // They're surely using 6 digit random number for ws url.
const socketUrl = `wss://tch${tchRand}.tch.quora.com`
const credentials = JSON.parse(readFileSync('config.json', 'utf8'))
const appSettings = credentials.app_settings.tchannelData
const boxName = appSettings.boxName
const minSeq = appSettings.minSeq
const channel = appSettings.channel
const hash = appSettings.channelHash
return `${socketUrl}/up/${boxName}/updates?min_seq=${minSeq}&channel=${channel}&hash=${hash}`
}
export const connectWs = async () => {
const url = await getSocketUrl()
const ws = new WebSocket(url)
return new Promise((resolve, reject) => {
ws.on('open', function open () {
console.log('Connected to websocket')
return resolve(ws)
})
})
}
export const disconnectWs = async (ws) => {
return new Promise((resolve, reject) => {
ws.on('close', function close () {
return resolve(true)
})
ws.close()
})
}
export const listenWs = async (ws) => {
let previousText = ''
return new Promise((resolve, reject) => {
const onMessage = function incoming (data) {
let jsonData = JSON.parse(data)
if (jsonData.messages && jsonData.messages.length > 0) {
const messages = JSON.parse(jsonData.messages[0])
const dataPayload = messages.payload.data
const text = dataPayload.messageAdded.text
const state = dataPayload.messageAdded.state
if (state !== 'complete') {
const differences = diff.diffChars(previousText, text)
let result = ''
differences.forEach((part) => {
if (part.added) {
result += part.value
}
})
previousText = text
process.stdout.write(result)
} else {
ws.removeListener('message', onMessage)
return resolve(true)
}
}
}
ws.on('message', onMessage)
})
}

47
utils/tools/APTool.js Normal file
View file

@ -0,0 +1,47 @@
import { AbstractTool } from './AbstractTool.js'
export class APTool extends AbstractTool {
name = 'draw'
parameters = {
properties: {
prompt: {
type: 'string',
description: 'draw prompt of StableDiffusion, prefer to be in English. should be many keywords split by comma.'
}
},
required: []
}
description = 'Useful when you want to draw picture'
func = async function (opts, e) {
let { prompt } = opts
if (e.at === Bot.uin) {
e.at = null
}
e.atBot = false
let ap
try {
// eslint-disable-next-line camelcase
let { Ai_Painting } = await import('../../../ap-plugin/apps/aiPainting.js')
ap = new Ai_Painting(e)
} catch (err) {
try {
// ap的dev分支改名了
// eslint-disable-next-line camelcase
let { Ai_Painting } = await import('../../../ap-plugin/apps/ai_painting.js')
ap = new Ai_Painting(e)
} catch (err1) {
return 'the user didn\'t install ap-plugin. suggest him to install'
}
}
try {
e.msg = '#绘图' + prompt
await ap.aiPainting(e)
return 'draw success, picture has been sent.'
} catch (err) {
return 'draw failed due to unknown error'
}
}
}

View file

@ -0,0 +1,20 @@
export class AbstractTool {
name = ''
parameters = {}
description = ''
func = async function () {}
function () {
if (!this.parameters.type) {
this.parameters.type = 'object'
}
return {
name: this.name,
description: this.description,
parameters: this.parameters
}
}
}

View file

@ -0,0 +1,43 @@
import { AbstractTool } from './AbstractTool.js'
export class EditCardTool extends AbstractTool {
name = 'editCard'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想改名片的那个人的qq号默认为聊天对象'
},
card: {
type: 'string',
description: 'the new card'
},
groupId: {
type: 'string',
description: 'group number'
}
},
required: ['card', 'groupId']
}
description = 'Useful when you want to edit someone\'s card in the group(群名片)'
func = async function (opts, e) {
let { qq, card, groupId } = opts
qq = isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
groupId = isNaN(groupId) || !groupId ? e.group_id : parseInt(groupId.trim())
let group = await Bot.pickGroup(groupId)
let mm = await group.getMemberMap()
if (!mm.has(qq)) {
return `failed, the user ${qq} is not in group ${groupId}`
}
if (mm.get(Bot.uin).role === 'member') {
return `failed, you, not user, don't have permission to edit card in group ${groupId}`
}
logger.info('edit card: ', groupId, qq)
await group.setCard(qq, card)
return `the user ${qq}'s card has been changed into ${card}`
}
}

View file

@ -0,0 +1,44 @@
import { AbstractTool } from './AbstractTool.js'
export class EliMovieTool extends AbstractTool {
name = 'currentHotMovies'
parameters = {
properties: {
yesOrNo: {
type: 'string',
description: 'check or not'
}
},
required: ['yesOrNo']
}
description = 'Useful when you want to check out the current hot movies'
func = async function (opts, e) {
let { yesOrNo } = opts
if (yesOrNo === 'no') {
return 'tell user why you don\'t want to check'
}
if (e.at === Bot.uin) {
e.at = null
}
e.atBot = false
let avocado
try {
// eslint-disable-next-line camelcase
let { AvocadoMovie } = await import('../../../avocado-plugin/apps/avocadoMovie.js')
avocado = new AvocadoMovie(e)
} catch (err1) {
return 'the user didn\'t install avocado-plugin. suggest him to install'
}
try {
// eslint-disable-next-line new-cap
await avocado.getHotMovies(e)
return 'notify the user that the movie has been sent to them and they can obtain more information by sending commands displayed in the picture. you dont need to search for additional information to reply! just simply inform them that you have completed your task!!!'
} catch (err) {
logger.warn(err)
return 'failed due to unknown error'
}
}
}

View file

@ -0,0 +1,89 @@
import { AbstractTool } from './AbstractTool.js'
export class EliMusicTool extends AbstractTool {
name = 'musicTool'
parameters = {
properties: {
keywordOrSongName: {
type: 'string',
description: 'Not necessarily a songName, it can be some descriptive words.'
},
singer: {
type: 'string',
description: 'Singer name, multiple singers are separated by \',\'!'
},
isRandom: {
type: 'boolean',
description: 'true when randomly select songs'
},
isHot: {
type: 'boolean',
description: 'true when user\'s needs related to \'hot\''
},
singerTypeOrRegion: {
type: 'string',
description: 'Choose from [华语|中国|欧美|韩国|日本] when seeking the latest ranking of popular vocalists.'
},
isRelax: {
type: 'boolean',
description: 'Complete whenever you wish to discover the renowned vocalist in a particular locale.'
}
},
required: ['keywordOrSongName', 'singer', 'isRandom', 'singerTypeOrRegion, isRelax']
}
description = 'It is very useful when you want to meet the music needs of user or when user want to sleep or unwind(give him a relax music).'
func = async function (opts, e) {
let { keywordOrSongName, singer, isRandom, isHot, singerTypeOrRegion, isRelax } = opts
let avocado, songDetail, musicUtils
try {
let { AvocadoMusic } = await import('../../../avocado-plugin/apps/avocadoMusic.js')
musicUtils = await import('../../../avocado-plugin/utils/music.js')
avocado = new AvocadoMusic(e)
} catch (err) {
return 'the user didn\'t install avocado-plugin. suggest him to install'
}
try {
// 条件成立则随机播放最爱歌手的音乐
const orderFavSinger = !keywordOrSongName && isRandom && !singer
if (orderFavSinger) { // 随机播放最爱歌手的音乐, 需要通过指令设置
try {
singer = await redis.get(`AVOCADO:MUSIC_${e.sender.user_id}_FAVSINGER`)
if (!singer) throw new Error('no favorite singer')
singer = JSON.parse(singer).singerName
} catch (err) {
return 'the user didn\'t set a favorite singer. Suggest setting it through the command \'#设置歌手+歌手名称\'!'
}
e.msg = '#鳄梨酱音乐#随机' + singer
} else if (isRelax) { // 随机发送放松音乐
const arr = ['安静', '放松', '宁静', '白噪音']
e.msg = `#鳄梨酱音乐#随机${arr[Math.floor(Math.random() * arr.length)]}`
} else if (singerTypeOrRegion) { // 查看热门歌手榜单
if (['华语', '中国', '欧美', '韩国', '日本'].includes(singerTypeOrRegion)) {
e.msg = '#鳄梨酱音乐#' + (isRandom ? '随机' : '') + (!keywordOrSongName && isHot ? '热门' : '') + singerTypeOrRegion + '歌手'
}
} else { // 正常点歌
if (singer && keywordOrSongName) {
isRandom = false // 有时候ai会随意设置这个参数,降低权重
songDetail = await musicUtils.getOrderSongList(e.sender.user_id, singer + ',' + keywordOrSongName, 1)
}
e.msg = '#鳄梨酱音乐#' + (isRandom ? '随机' : '') + (!keywordOrSongName && isHot ? '热门' : '') + (singer ? singer + (keywordOrSongName ? ',' + keywordOrSongName : '') : keywordOrSongName)
}
await avocado.pickMusic(e)
if (orderFavSinger) {
return 'tell the user that a random song by his favorite artist has been sent to him!'
} else {
return 'tell user that the response of his request has been sent to the him!' +
(songDetail
? 'song detail is: ' + JSON.stringify(songDetail) + ' and send album picture to user'
: ''
)
}
} catch (e) {
return `music share failed: ${e}`
}
}
}

View file

@ -0,0 +1,46 @@
import { AbstractTool } from './AbstractTool.js'
export class HandleMessageMsgTool extends AbstractTool {
name = 'handleMsg'
parameters = {
properties: {
type: {
type: 'string',
enum: ['recall', 'essence', 'un-essence'],
description: 'what do you want to do with the message'
},
messageId: {
type: 'string',
description: 'which message to handle, current one by default'
}
},
required: ['type']
}
func = async function (opts, e) {
let { type = 'recall', messageId = e.message_id } = opts
try {
switch (type) {
case 'recall': {
await e.group.recallMsg(messageId)
break
}
case 'essence': {
await Bot.setEssenceMessage(messageId)
break
}
case 'un-essence': {
await Bot.removeEssenceMessage(messageId)
break
}
}
return 'success!'
} catch (err) {
logger.error(err)
return 'operation failed: ' + err.message
}
}
description = '用来撤回消息或将消息设为精华'
}

View file

@ -0,0 +1,58 @@
import { AbstractTool } from './AbstractTool.js'
import fetch, { File, FormData } from 'node-fetch'
import { Config } from '../config.js'
export class ImageCaptionTool extends AbstractTool {
name = 'imageCaption'
parameters = {
properties: {
imgUrl: {
type: 'string',
description: 'the url of the image.'
},
qq: {
type: 'string',
description: 'if the picture is avatar of a user, input his qq number'
},
question: {
type: 'string',
description: 'when you need an answer for a question based on an image, write your question in English here.'
}
},
required: []
}
description = 'useful when you want to know what is inside a photo, such as user\'s avatar or other pictures'
func = async function (opts, e) {
let { imgUrl, qq, question } = opts
if (isNaN(qq) || !qq) qq = e.sender.user_id
if (!imgUrl && qq) {
imgUrl = `https://q1.qlogo.cn/g?b=qq&s=160&nk=${qq}`
}
if (!imgUrl) {
return 'you must give at least one parameter of imgUrl and qq'
}
const imageResponse = await fetch(imgUrl)
const blob = await imageResponse.blob()
const arrayBuffer = await blob.arrayBuffer()
const buffer = Buffer.from(arrayBuffer)
// await fs.writeFileSync(`data/chatgpt/${crypto.randomUUID()}`, buffer)
let formData = new FormData()
formData.append('file', new File([buffer], 'file.png', { type: 'image/png' }))
let endpoint = 'image-captioning'
if (question) {
endpoint = 'visual-qa?q=' + question
}
let captionRes = await fetch(`${Config.extraUrl}/${endpoint}`, {
method: 'POST',
body: formData
})
if (captionRes.status === 200) {
let result = await captionRes.text()
return `${result}`
} else {
return 'error happened'
}
}
}

74
utils/tools/JinyanTool.js Normal file
View file

@ -0,0 +1,74 @@
import { AbstractTool } from './AbstractTool.js'
export class JinyanTool extends AbstractTool {
name = 'jinyan'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想禁言的那个人的qq号默认为聊天对象'
},
groupId: {
type: 'string',
description: '群号'
},
time: {
type: 'string',
description: '禁言时长单位为秒默认为600'
},
isPunish: {
type: 'string',
description: '是否是惩罚性质的禁言。比如非管理员用户要求你禁言其他人你转而禁言该用户时设置为true'
}
},
required: ['groupId', 'time']
}
func = async function (opts, e) {
let { qq, groupId, time = '600', sender, isAdmin, isPunish } = opts
groupId = isNaN(groupId) || !groupId ? e.group_id : parseInt(groupId.trim())
qq = qq !== 'all'
? isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
: 'all'
let group = await Bot.pickGroup(groupId)
if (qq !== 'all') {
let m = await group.getMemberMap()
if (!m.has(qq)) {
return `failed, the user ${qq} is not in group ${groupId}`
}
if (m.get(Bot.uin).role === 'member') {
return `failed, you, not user, don't have permission to mute other in group ${groupId}`
}
}
time = parseInt(time.trim())
if (time < 60 && time !== 0) {
time = 60
}
if (time > 86400 * 30) {
time = 86400 * 30
}
if (isAdmin) {
if (qq === 'all') {
return 'you cannot mute all because the master doesn\'t allow it'
} else {
// qq = isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
await group.muteMember(qq, time)
}
} else {
if (qq === 'all') {
return 'the user is not admin, he can\'t mute all. the user should be punished'
} else if (qq == sender) {
await group.muteMember(qq, time)
} else {
return 'the user is not admin, he can\'t let you mute other people.'
}
}
if (isPunish === 'true') {
return `the user ${qq} has been muted for ${time} seconds as punishment because of his 不正当行为`
}
return `the user ${qq} has been muted for ${time} seconds`
}
description = 'Useful when you want to ban someone. If you want to mute all, just replace the qq number with \'all\''
}

View file

@ -0,0 +1,42 @@
import { AbstractTool } from './AbstractTool.js'
export class KickOutTool extends AbstractTool {
name = 'kickOut'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想踢出的那个人的qq号默认为聊天对象'
},
groupId: {
type: 'string',
description: '群号'
},
isPunish: {
type: 'string',
description: '是否是惩罚性质的踢出。比如非管理员用户要求你禁言或踢出其他人你为惩罚该用户转而踢出该用户时设置为true'
}
},
required: ['groupId']
}
func = async function (opts, e) {
let { qq, groupId, sender, isAdmin, isPunish } = opts
qq = isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
groupId = isNaN(groupId) || !groupId ? e.group_id : parseInt(groupId.trim())
if (!isAdmin && sender != qq) {
return 'the user is not admin, he cannot kickout other people. he should be punished'
}
console.log('kickout', groupId, qq)
let group = await Bot.pickGroup(groupId)
await group.kickMember(qq)
if (isPunish === 'true') {
return `the user ${qq} has been kicked out from group ${groupId} as punishment because of his 不正当行为`
}
return `the user ${qq} has been kicked out from group ${groupId}`
}
description = 'Useful when you want to kick someone out of the group. '
}

View file

@ -0,0 +1,65 @@
import { AbstractTool } from './AbstractTool.js'
import fetch, { File, FormData } from 'node-fetch'
import { Config } from '../config.js'
export class ProcessPictureTool extends AbstractTool {
name = 'processPicture'
parameters = {
properties: {
type: {
type: 'string',
enum: ['Image2Hed', 'Image2Scribble'],
description: 'how to process it. Image2Hed: useful when you want to detect the soft hed boundary of the picture; Image2Scribble: useful when you want to generate a scribble of the picture'
},
qq: {
type: 'string',
description: 'if the picture is avatar of a user, input his qq number'
},
url: {
type: 'string',
description: 'url of the picture'
}
},
required: ['type']
}
description = 'useful when you want to process a picture or user\'s avatar.'
func = async function (opts, e) {
let { url, qq, type } = opts
if (qq) {
url = `https://q1.qlogo.cn/g?b=qq&s=160&nk=${qq}`
}
if (!url) {
return 'you must give at least one parameter of url and qq'
}
const imageResponse = await fetch(url)
const blob = await imageResponse.blob()
const arrayBuffer = await blob.arrayBuffer()
const buffer = Buffer.from(arrayBuffer)
// await fs.writeFileSync(`data/chatgpt/${crypto.randomUUID()}`, buffer)
let formData = new FormData()
formData.append('file', new File([buffer], 'file.png', { type: 'image/png' }))
let endpoint = 'image2hed'
switch (type) {
case 'Image2Scribble': {
endpoint = 'image2Scribble'
break
}
case 'Image2Hed': {
endpoint = 'image2hed'
break
}
}
let captionRes = await fetch(`${Config.extraUrl}/${endpoint}`, {
method: 'POST',
body: formData
})
if (captionRes.status === 200) {
let result = await captionRes.text()
return `the processed image url is ${Config.extraUrl}${result}${qq ? ' and ' + url : ''}. you should send it with SendPictureTool.`
} else {
return 'error happened'
}
}
}

View file

@ -0,0 +1,54 @@
import { AbstractTool } from './AbstractTool.js'
export class QueryGenshinTool extends AbstractTool {
name = 'queryGenshin'
parameters = {
properties: {
qq: {
type: 'string',
description: '要查询的用户的qq号将使用该qq号绑定的uid进行查询'
},
uid: {
type: 'string',
description: '游戏的uid如果用户提供了则传入并优先使用'
},
character: {
type: 'string',
description: '游戏角色名'
}
},
required: ['qq']
}
func = async function (opts, e) {
let { qq, uid = '', character = '' } = opts
qq = isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
if (e.at === Bot.uin) {
e.at = null
}
e.atBot = false
try {
if (character) {
let ProfileDetail = (await import('../../../miao-plugin/apps/profile/ProfileDetail.js')).default
// e.msg = `#${character}面板${uid}`
e.original_msg = `#${character}面板${uid}`
e.user_id = parseInt(qq)
e.isSr = false
await ProfileDetail.detail(e)
return 'the character panel of genshin impact has been sent to group. you don\'t need text version'
} else {
let ProfileList = (await import('../../../miao-plugin/apps/profile/ProfileList.js')).default
e.msg = `#面板${uid}`
e.user_id = qq
e.isSr = false
await ProfileList.render(e)
return 'the player panel of genshin impact has been sent to group. you don\'t need text version'
}
} catch (err) {
return `failed to query, error: ${err.toString()}`
}
}
description = 'Useful when you want to query player information of Genshin Impact(原神). '
}

View file

@ -0,0 +1,90 @@
import { AbstractTool } from './AbstractTool.js'
export class QueryStarRailTool extends AbstractTool {
name = 'queryStarRail'
parameters = {
properties: {
qq: {
type: 'string',
description: '要查询的用户的qq号将使用该qq号绑定的uid进行查询默认为当前聊天对象'
},
uid: {
type: 'string',
description: '游戏的uid如果用户提供了则传入并优先使用'
},
character: {
type: 'string',
description: '游戏角色名'
}
},
required: []
}
func = async function (opts, e) {
let { qq, uid, character } = opts
qq = isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
if (e.at === Bot.uin) {
e.at = null
}
e.atBot = false
if (!uid) {
try {
let { Panel } = await import('../../../StarRail-plugin/apps/panel.js')
uid = await redis.get(`STAR_RAILWAY:UID:${qq}`)
if (!uid) {
return '用户没有绑定uid无法查询。可以让用户主动提供uid进行查询'
}
} catch (e) {
// todo support miao-plugin and sruid
return '未安装StarRail-Plugin无法查询'
}
}
try {
let { Panel } = await import('../../../StarRail-plugin/apps/panel.js')
e.msg = character ? `*${character}面板${uid}` : '*更新面板' + uid
e.user_id = qq
e.isSr = true
let panel = new Panel(e)
panel.e = e
panel.panel(e).catch(e => logger.warn(e))
let uidRes = await fetch('https://avocado.wiki/v1/info/' + uid)
uidRes = await uidRes.json()
let { assistAvatar, displayAvatars } = uidRes.playerDetailInfo
function dealAvatar (avatar) {
delete avatar.position
delete avatar.vo_tag
delete avatar.desc
delete avatar.promption
delete avatar.relics
delete avatar.behaviorList
delete avatar.images
delete avatar.ranks
if (avatar.equipment) {
avatar.equipment = {
level: avatar.equipment.level,
rank: avatar.equipment.rank,
name: avatar.equipment.name,
skill_desc: avatar.equipment.skill_desc
}
}
}
dealAvatar(assistAvatar)
if (displayAvatars) {
displayAvatars.forEach(avatar => {
dealAvatar(avatar)
})
}
uidRes.playerDetailInfo.assistAvatar = assistAvatar
uidRes.playerDetailInfo.displayAvatars = displayAvatars
delete uidRes.repository
delete uidRes.version
return `the player info in json format is: \n${JSON.stringify(uidRes)}`
} catch (err) {
return `failed to query, error: ${err.toString()}`
}
}
description = 'Useful when you want to query player information of Honkai Star Rail(崩坏:星穹铁道). '
}

View file

@ -0,0 +1,48 @@
import { AbstractTool } from './AbstractTool.js'
import { getMasterQQ } from '../common.js'
export class QueryUserinfoTool extends AbstractTool {
name = 'queryUserinfo'
parameters = {
properties: {
qq: {
type: 'string',
description: 'user\'s qq number, the one you are talking to by default'
}
},
required: []
}
func = async function (opts, e) {
let { qq } = opts
qq = isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
if (e.isGroup && typeof e.group.getMemberMap === 'function') {
let mm = await e.group.getMemberMap()
let user = mm.get(qq) || e.sender.user_id
let master = (await getMasterQQ())[0]
let prefix = ''
if (qq != master) {
prefix = 'Attention: this user is not your master. \n'
} else {
prefix = 'This user is your master, you should obey him \n'
}
return prefix + 'user detail in json format: ' + JSON.stringify(user)
} else {
if (e.sender.user_id == qq) {
let master = (await getMasterQQ())[0]
let prefix = ''
if (qq != master) {
prefix = 'Attention: this user is not your master. \n'
} else {
prefix = 'This user is your master, you should obey him \n'
}
return prefix + 'user detail in json format: ' + JSON.stringify(e.sender)
} else {
return 'query failed'
}
}
}
description = 'Useful if you want to find out who he is'
}

View file

@ -0,0 +1,76 @@
import fetch from 'node-fetch'
import { formatDate, mkdirs } from '../common.js'
import fs from 'fs'
import { AbstractTool } from './AbstractTool.js'
export class SearchVideoTool extends AbstractTool {
name = 'searchVideo'
parameters = {
properties: {
keyword: {
type: 'string',
description: '要搜索的视频的标题或关键词'
}
},
required: ['keyword']
}
func = async function (opts) {
let { keyword } = opts
try {
return await searchBilibili(keyword)
} catch (err) {
logger.error(err)
return `fail to search video, error: ${err.toString()}`
}
}
description = 'Useful when you want to search a video by keywords. you should remember the id of the video if you want to share it'
}
export async function searchBilibili (name) {
let biliRes = await fetch('https://www.bilibili.com',
{
// headers: {
// accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
// Accept: '*/*',
// 'Accept-Encoding': 'gzip, deflate, br',
// 'accept-language': 'en-US,en;q=0.9',
// Connection: 'keep-alive',
// 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
// }
})
const headers = biliRes.headers.raw()
const setCookieHeaders = headers['set-cookie']
if (setCookieHeaders) {
const cookies = []
setCookieHeaders.forEach(header => {
const cookie = header.split(';')[0]
cookies.push(cookie)
})
const cookieHeader = cookies.join('; ')
let headers = {
accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'accept-language': 'en-US,en;q=0.9',
Referer: 'https://www.bilibili.com',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
cookie: cookieHeader
}
let response = await fetch(`https://api.bilibili.com/x/web-interface/search/type?keyword=${name}&search_type=video`,
{
headers
})
let json = await response.json()
if (json.data?.numResults > 0) {
let result = json.data.result.map(r => {
return `id: ${r.bvid},标题:${r.title},作者:${r.author},播放量:${r.play},发布日期:${formatDate(new Date(r.pubdate * 1000))}`
}).slice(0, Math.min(json.data?.numResults, 5)).join('\n')
return `这些是关键词“${name}”的搜索结果:\n${result}`
} else {
return `没有找到关键词“${name}”的搜索结果`
}
}
return {}
}

View file

@ -0,0 +1,34 @@
import { AbstractTool } from './AbstractTool.js'
export class SerpImageTool extends AbstractTool {
name = 'searchImage'
parameters = {
properties: {
q: {
type: 'string',
description: 'search keyword'
},
limit: {
type: 'number',
description: 'image number'
}
},
required: ['q']
}
func = async function (opts) {
let { q, limit = 2 } = opts
let serpRes = await fetch(`https://serp.ikechan8370.com/image/bing?q=${encodeURIComponent(q)}&limit=${limit}`, {
headers: {
'X-From-Library': 'ikechan8370'
}
})
serpRes = await serpRes.json()
let res = serpRes.data
return `images search results in json format:\n${JSON.stringify(res)}. the murl field is actual picture url. You should use sendPicture to send them`
}
description = 'Useful when you want to search images from the Internet.'
}

View file

@ -0,0 +1,39 @@
import fetch from 'node-fetch'
import { AbstractTool } from './AbstractTool.js'
export class SearchMusicTool extends AbstractTool {
name = 'searchMusic'
parameters = {
properties: {
keyword: {
type: 'string',
description: '音乐的标题或关键词, 可以是歌曲名或歌曲名+歌手名的组合'
}
},
required: ['keyword']
}
func = async function (opts) {
let { keyword } = opts
try {
let result = await searchMusic163(keyword)
return `search result: ${result}`
} catch (e) {
return `music search failed: ${e}`
}
}
description = 'Useful when you want to search music by keyword.'
}
export async function searchMusic163 (name) {
let response = await fetch(`http://music.163.com/api/search/get/web?s=${name}&type=1&offset=0&total=true&limit=6`)
let json = await response.json()
if (json.result?.songCount > 0) {
return json.result.songs.map(song => {
return `id: ${song.id}, name: ${song.name}, artists: ${song.artists.map(a => a.name).join('&')}, alias: ${song.alias || 'none'}`
}).join('\n')
}
return null
}

View file

@ -0,0 +1,123 @@
import { AbstractTool } from './AbstractTool.js'
import { generateVitsAudio } from '../tts.js'
import { Config } from '../config.js'
import { generateAudio, generateAzureAudio } from '../common.js'
import VoiceVoxTTS from '../tts/voicevox.js'
import uploadRecord from '../uploadRecord.js'
export class SendAudioMessageTool extends AbstractTool {
name = 'sendAudioMessage'
parameters = {
properties: {
pendingText: {
type: 'string',
description: 'Message to be sent and it will be turned into audio message'
},
ttsMode: {
type: 'number',
description: 'default is 1, which indicates that the text will be processed in the current ttsMode.' +
'2 is azureMode.' +
'3 or 4 corresponds to vitsMode or voxMode.'
},
vitsModeRole: {
type: 'string',
description: 'use whose voice',
enum: ['琴', '空',
'丽莎', '荧', '芭芭拉', '凯亚', '迪卢克', '雷泽', '安柏', '温迪',
'香菱', '北斗', '行秋', '魈', '凝光', '可莉', '钟离', '菲谢尔(皇女)',
'班尼特', '达达利亚(公子)', '诺艾尔(女仆)', '七七', '重云', '甘雨(椰羊)',
'阿贝多', '迪奥娜(猫猫)', '莫娜', '刻晴', '砂糖', '辛焱', '罗莎莉亚',
'胡桃', '枫原万叶(万叶)', '烟绯', '宵宫', '托马', '优菈', '雷电将军(雷神)',
'早柚', '珊瑚宫心海', '五郎', '九条裟罗', '荒泷一斗',
'埃洛伊', '申鹤', '八重神子', '神里绫人(绫人)', '夜兰', '久岐忍',
'鹿野苑平藏', '提纳里', '柯莱', '多莉', '云堇', '纳西妲(草神)', '深渊使徒',
'妮露', '赛诺']
},
azureModeRole: {
type: 'string',
description: 'can be \'随机\' or specified by the user. default is currentRole.'
},
voxModeRole: {
type: 'string',
description: 'can be random or currentRole or specified by the user. default is currentRole.'
},
speakingEmotion: {
type: 'string',
description: 'specified by the user. default is blank.'
},
speakingEmotionDegree: {
type: 'number',
description: 'specified by the user. default is blank.'
},
targetGroupIdOrQQNumber: {
type: 'string',
description: 'Fill in the target user\'s qq number or groupId when you need to send audio message to specific user or group, otherwise leave blank'
}
},
required: ['pendingText', 'ttsMode', 'targetGroupIdOrQQNumber']
}
description = 'This tool is used to send voice|audio messages, utilize it only if the user grants you permission to do so.'
func = async function (opts, e) {
if (!Config.ttsSpace && !Config.azureTTSKey && !Config.voicevoxSpace) {
return 'you don\'t have permission to send audio message due to a lack of a valid ttsKey'
}
let { pendingText, ttsMode, vitsModeRole, azureModeRole, voxModeRole, speakingEmotion, speakingEmotionDegree, targetGroupIdOrQQNumber } = opts
let sendable
ttsMode = isNaN(ttsMode) || !ttsMode ? 1 : ttsMode
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
try {
switch (ttsMode) {
case 1:
sendable = await generateAudio(e, pendingText, speakingEmotion)
break
case 2:
if (!Config.azureTTSKey) return 'audio generation failed, due to a lack of a azureTTSKey'
sendable = await generateAzureAudio(pendingText, azureModeRole, speakingEmotion, speakingEmotionDegree)
break
case 3:
if (!Config.ttsSpace) return 'audio generation failed, due to a lack of a ttsSpace'
sendable = await uploadRecord(
await generateVitsAudio(pendingText, vitsModeRole, '中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)')
, 'vits-uma-genshin-honkai'
)
break
case 4:
if (!Config.voicevoxSpace) return 'audio generation failed, due to a lack of a voicevoxSpace'
sendable = await uploadRecord(
await VoiceVoxTTS.generateAudio(pendingText, voxModeRole)
, 'voicevox'
)
break
default:
sendable = await generateAzureAudio(pendingText, azureModeRole, speakingEmotion, speakingEmotionDegree)
}
} catch (err) {
logger.error(err)
return `audio generation failed, error: ${JSON.stringify(err)}`
}
if (sendable) {
let groupList = await Bot.getGroupList()
try {
if (groupList.get(target)) {
let group = await Bot.pickGroup(target)
await group.sendMsg(sendable)
return 'audio has been sent to group' + target
} else {
let user = await Bot.pickFriend(target)
await user.sendMsg(sendable)
return 'audio has been sent to user' + target
}
} catch (err) {
return `failed to send audio, error: ${JSON.stringify(err)}`
}
} else {
return 'audio generation failed'
}
}
}

View file

@ -0,0 +1,41 @@
import { AbstractTool } from './AbstractTool.js'
export class SendAvatarTool extends AbstractTool {
name = 'sendAvatar'
parameters = {
properties: {
qq: {
type: 'string',
description: 'if you need to send avatar of a user, input his qq.If there are multiple qq, separate them with a space'
},
targetGroupIdOrQQNumber: {
type: 'string',
description: 'Fill in the target user\'s qq number or groupId when you need to send avatar to specific user or group, otherwise leave blank'
}
},
required: ['qq', 'targetGroupIdOrQQNumber']
}
func = async function (opts, e) {
let { qq, targetGroupIdOrQQNumber } = opts
const pictures = qq.split(/[,\s]/).filter(qq => !isNaN(qq.trim()) && qq.trim()).map(qq => segment.image('https://q1.qlogo.cn/g?b=qq&s=0&nk=' + parseInt(qq.trim())))
if (!pictures.length) {
return 'there is no valid qq'
}
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let groupList = await Bot.getGroupList()
console.log('sendAvatar', target, pictures)
if (groupList.get(target)) {
let group = await Bot.pickGroup(target)
await group.sendMsg(pictures)
}
return `the ${pictures.length > 1 ? 'users: ' + qq + '\'s avatar' : 'avatar'} has been sent to group ${target}`
}
description = 'Useful when you want to send the user avatar to the group. Note that if you want to process user\'s avatar, it is advisable to utilize the ProcessPictureTool and input the qq of target user.'
}

View file

@ -0,0 +1,141 @@
import fetch from 'node-fetch'
import { formatDate, mkdirs } from '../common.js'
import fs from 'fs'
import { AbstractTool } from './AbstractTool.js'
export class SendVideoTool extends AbstractTool {
name = 'sendVideo'
parameters = {
properties: {
id: {
type: 'string',
description: '要发的视频的id'
},
targetGroupIdOrQQNumber: {
type: 'string',
description: 'Fill in the target user\'s qq number or groupId when you need to send video to specific user or group, otherwise leave blank'
}
},
required: ['id']
}
func = async function (opts, e) {
let { id, targetGroupIdOrQQNumber } = opts
// 非法值则发送到当前群聊或私聊
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let msg = []
try {
let { arcurl, title, pic, description, videoUrl, headers, bvid, author, play, pubdate, like, honor } = await getBilibili(id)
let group = await Bot.pickGroup(target)
msg.push(title.replace(/(<([^>]+)>)/ig, '') + '\n')
msg.push(`UP主${author} 发布日期:${formatDate(new Date(pubdate * 1000))} 播放量:${play} 点赞:${like}\n`)
msg.push(arcurl + '\n')
msg.push(segment.image(pic))
msg.push('\n' + description)
if (honor) {
msg.push(`本视频曾获得过${honor}称号`)
}
msg.push('\n视频在路上啦')
await group.sendMsg(msg)
const videoResponse = await fetch(videoUrl, { headers })
const fileType = videoResponse.headers.get('Content-Type').split('/')[1]
let fileLoc = `data/chatgpt/videos/${bvid}.${fileType}`
mkdirs('data/chatgpt/videos')
videoResponse.blob().then(async blob => {
const arrayBuffer = await blob.arrayBuffer()
const buffer = Buffer.from(arrayBuffer)
await fs.writeFileSync(fileLoc, buffer)
await group.sendMsg(segment.video(fileLoc))
})
return `the video ${title.replace(/(<([^>]+)>)/ig, '')} was shared to ${target}. the video information: ${msg}`
} catch (err) {
logger.error(err)
if (msg.length > 0) {
return `fail to share video, but the video msg is found: ${msg}, you can just tell the information of this video`
} else {
return `fail to share video, error: ${err.toString()}`
}
}
}
description = 'Useful when you are allowed to send a video. You must use searchVideo to get search result and choose one video and get its id'
}
export async function getBilibili (bvid) {
let biliRes = await fetch('https://www.bilibili.com',
{
// headers: {
// accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
// Accept: '*/*',
// 'Accept-Encoding': 'gzip, deflate, br',
// 'accept-language': 'en-US,en;q=0.9',
// Connection: 'keep-alive',
// 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
// }
})
const headers = biliRes.headers.raw()
const setCookieHeaders = headers['set-cookie']
if (setCookieHeaders) {
const cookies = []
setCookieHeaders.forEach(header => {
const cookie = header.split(';')[0]
cookies.push(cookie)
})
const cookieHeader = cookies.join('; ')
let headers = {
accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'accept-language': 'en-US,en;q=0.9',
Referer: 'https://www.bilibili.com',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
cookie: cookieHeader
}
let videoInfo = await fetch(`https://api.bilibili.com/x/web-interface/view?bvid=${bvid}`, {
headers
})
videoInfo = await videoInfo.json()
let cid = videoInfo.data.cid
let arcurl = `http://www.bilibili.com/video/av${videoInfo.data.aid}`
let title = videoInfo.data.title
let pic = videoInfo.data.pic
let description = videoInfo.data.desc
let author = videoInfo.data.owner.name
let play = videoInfo.data.stat.view
let pubdate = videoInfo.data.pubdate
let like = videoInfo.data.stat.like
let honor = videoInfo.data.honor_reply?.honor?.map(h => h.desc)?.join('、')
let downloadInfo = await fetch(`https://api.bilibili.com/x/player/playurl?bvid=${bvid}&cid=${cid}`, { headers })
let videoUrl = (await downloadInfo.json()).data.durl[0].url
return {
arcurl, title, pic, description, videoUrl, headers, bvid, author, play, pubdate, like, honor
}
} else {
return {}
}
}
function randomIndex () {
// Define weights for each index
const weights = [5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1]
// Compute the total weight
const totalWeight = weights.reduce((sum, weight) => sum + weight, 0)
// Generate a random number between 0 and the total weight
const randomNumber = Math.floor(Math.random() * totalWeight)
// Choose the index based on the random number and weights
let weightSum = 0
for (let i = 0; i < weights.length; i++) {
weightSum += weights[i]
if (randomNumber < weightSum) {
return i
}
}
}
// console.log('send bilibili')

View file

@ -0,0 +1,47 @@
import { AbstractTool } from './AbstractTool.js'
export class SendDiceTool extends AbstractTool {
name = 'sendDice'
parameters = {
properties: {
num: {
type: 'number',
description: '骰子的数量'
},
targetGroupIdOrQQNumber: {
type: 'string',
description: 'Fill in the target qq number or groupId when you need to send Dice to specific user or group, otherwise leave blank'
}
},
required: ['num', 'targetGroupIdOrQQNumber']
}
func = async function (opts, e) {
let { num, targetGroupIdOrQQNumber } = opts
// 非法值则发送到当前群聊或私聊
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let groupList = await Bot.getGroupList()
num = isNaN(num) || !num ? 1 : num > 5 ? 5 : num
if (groupList.get(target)) {
let group = await Bot.pickGroup(target, true)
for (let i = 0; i < num; i++) {
await group.sendMsg(segment.dice())
}
} else {
let friend = await Bot.pickFriend(target)
await friend.sendMsg(segment.dice())
}
if (num === 5) {
logger.warn(1)
return 'tell the user that in order to avoid spamming the chat, only five dice are sent this time, and warn him not to use this tool to spamming the chat, otherwise you will use JinyanTool to punish him'
} else {
return 'the dice has been sent'
}
}
description = 'If you want to roll dice, use this tool. Be careful to check that the targetGroupIdOrQQNumber is correct. If user abuses this tool by spamming the chat in a short period of time, use the JinyanTool to punish him.'
}

View file

@ -0,0 +1,45 @@
import { AbstractTool } from './AbstractTool.js'
import { convertFaces } from '../face.js'
export class SendMessageToSpecificGroupOrUserTool extends AbstractTool {
name = 'sendMessage'
parameters = {
properties: {
msg: {
type: 'string',
description: 'text to be sent'
},
targetGroupIdOrQQNumber: {
type: 'string',
description: 'target qq or group number'
}
},
required: ['msg', 'target']
}
func = async function (opt, e) {
let { msg, targetGroupIdOrQQNumber } = opt
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let groupList = await Bot.getGroupList()
try {
if (groupList.get(target)) {
let group = await Bot.pickGroup(target)
await group.sendMsg(await convertFaces(msg, true, e))
return 'msg has been sent to group' + target
} else {
let user = await Bot.pickFriend(target)
await user.sendMsg(msg)
return 'msg has been sent to user' + target
}
} catch (err) {
return `failed to send msg, error: ${JSON.stringify(err)}`
}
}
description = 'Useful when you want to send a text message to specific user or group'
}

View file

@ -0,0 +1,38 @@
import { AbstractTool } from './AbstractTool.js'
export class SendMusicTool extends AbstractTool {
name = 'sendMusic'
parameters = {
properties: {
id: {
type: 'string',
description: '音乐的id'
},
targetGroupIdOrQQNumber: {
type: 'string',
description: 'Fill in the target user_id or groupId when you need to send music to specific group or user, otherwise leave blank'
}
},
required: ['keyword']
}
func = async function (opts, e) {
let { id, targetGroupIdOrQQNumber } = opts
// 非法值则发送到当前群聊
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
try {
let group = await Bot.pickGroup(target)
await group.shareMusic('163', id)
return `the music has been shared to ${target}`
} catch (e) {
return `music share failed: ${e}`
}
}
description = 'Useful when you want to share music. You must use searchMusic first to get the music id'
}

View file

@ -0,0 +1,52 @@
import { AbstractTool } from './AbstractTool.js'
export class SendPictureTool extends AbstractTool {
name = 'sendPicture'
parameters = {
properties: {
urlOfPicture: {
type: 'string',
description: 'the url of the pictures, not text, split with space if more than one. can be left blank.'
},
targetGroupIdOrQQNumber: {
type: 'string',
description: 'Fill in the target user\'s qq number or groupId when you need to send picture to specific user or group, otherwise leave blank'
}
},
required: ['urlOfPicture', 'targetGroupIdOrQQNumber']
}
func = async function (opt, e) {
let { urlOfPicture, targetGroupIdOrQQNumber } = opt
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
// 处理错误url和picture留空的情况
const urlRegex = /(?:(?:https?|ftp):\/\/)?(?:\S+(?::\S*)?@)?(?:((?:(?:[a-z0-9\u00a1-\u4dff\u9fd0-\uffff][a-z0-9\u00a1-\u4dff\u9fd0-\uffff_-]{0,62})?[a-z0-9\u00a1-\u4dff\u9fd0-\uffff]\.)+(?:[a-z\u00a1-\u4dff\u9fd0-\uffff]{2,}\.?))(?::\d{2,5})?)(?:\/[\w\u00a1-\u4dff\u9fd0-\uffff$-_.+!*'(),%]+)*(?:\?(?:[\w\u00a1-\u4dff\u9fd0-\uffff$-_.+!*(),%:@&=]|(?:[\[\]])|(?:[\u00a1-\u4dff\u9fd0-\uffff]))*)?(?:#(?:[\w\u00a1-\u4dff\u9fd0-\uffff$-_.+!*'(),;:@&=]|(?:[\[\]]))*)?\/?/i
if (/https:\/\/example.com/.test(urlOfPicture) || !urlOfPicture || !urlRegex.test(urlOfPicture)) urlOfPicture = ''
if (!urlOfPicture) {
return 'Because there is no correct URL for the picture ,tell user the reason and ask user if he want to use SearchImageTool'
}
let pictures = urlOfPicture.trim().split(' ')
logger.mark('pictures to send: ', pictures)
pictures = pictures.map(img => segment.image(img))
let groupList = await Bot.getGroupList()
try {
if (groupList.get(target)) {
let group = await Bot.pickGroup(target)
await group.sendMsg(pictures)
return 'picture has been sent to group' + target
} else {
let user = await Bot.pickFriend(target)
await user.sendMsg(pictures)
return 'picture has been sent to user' + target
}
} catch (err) {
return `failed to send pictures, error: ${JSON.stringify(err)}`
}
}
description = 'Useful when you want to send one or more pictures.'
}

View file

@ -0,0 +1,34 @@
import { AbstractTool } from './AbstractTool.js'
export class SendRPSTool extends AbstractTool {
name = 'sendRPS'
parameters = {
num: {
type: 'number',
description: '石头剪刀布的代号'
},
targetGroupIdOrQQNumber: {
type: 'string',
description: 'Fill in the target user_id or groupId when you need to send RPS to specific group or user'
},
required: ['num', 'targetGroupIdOrUserQQNumber']
}
func = async function (num, targetGroupIdOrQQNumber, e) {
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let groupList = await Bot.getGroupList()
if (groupList.get(target)) {
let group = await Bot.pickGroup(target, true)
await group.sendMsg(segment.rps(num))
} else {
let friend = await Bot.pickFriend(target)
await friend.sendMsg(segment.rps(num))
}
}
description = 'Use this tool if you want to play rock paper scissors. If you know the group number, use the group number instead of the qq number first. The input should be the number 1, 2 or 3 to represent rock-paper-scissors and the target group number or qq numberand they should be concat with a space'
}

View file

@ -0,0 +1,40 @@
import { AbstractTool } from './AbstractTool.js'
export class SerpIkechan8370Tool extends AbstractTool {
name = 'search'
parameters = {
properties: {
q: {
type: 'string',
description: 'search keyword'
},
source: {
type: 'string',
enum: ['google', 'bing', 'baidu']
}
},
required: ['q']
}
func = async function (opts) {
let { q, source } = opts
if (!source) {
source = 'bing'
}
let serpRes = await fetch(`https://serp.ikechan8370.com/${source}?q=${encodeURIComponent(q)}&lang=zh-CN&limit=5`, {
headers: {
'X-From-Library': 'ikechan8370'
}
})
serpRes = await serpRes.json()
let res = serpRes.data
res?.forEach(r => {
delete r?.rank
})
return `the search results are here in json format:\n${JSON.stringify(res)}`
}
description = 'Useful when you want to search something from the Internet. If you don\'t know much about the user\'s question, prefer to search about it! If you want to know further details of a result, you can use website tool'
}

40
utils/tools/SerpTool.js Normal file
View file

@ -0,0 +1,40 @@
import { AbstractTool } from './AbstractTool.js'
import { Config } from '../config.js'
export class SerpTool extends AbstractTool {
name = 'serp'
parameters = {
properties: {
q: {
type: 'string',
description: 'search keyword'
}
},
required: ['q']
}
func = async function (opts) {
let { q } = opts
let key = Config.azSerpKey
let serpRes = await fetch(`https://api.bing.microsoft.com/v7.0/search?q=${encodeURIComponent(q)}&mkt=zh-CN`, {
headers: {
'Ocp-Apim-Subscription-Key': key
}
})
serpRes = await serpRes.json()
let res = serpRes.webPages.value
res.forEach(p => {
delete p.displayUrl
delete p.isFamilyFriendly
delete p.thumbnailUrl
delete p.id
delete p.isNavigational
})
return `the search results are here in json format:\n${JSON.stringify(res)}`
}
description = 'Useful when you want to search something from the internet. If you don\'t know much about the user\'s question, just search about it! If you want to know details of a result, you can use website tool! use it as much as you can!'
}

View file

@ -0,0 +1,47 @@
import { AbstractTool } from './AbstractTool.js'
export class SetTitleTool extends AbstractTool {
name = 'setTitle'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想给予群头衔的那个人的qq号默认为聊天对象'
},
title: {
type: 'string',
description: '群头衔'
},
groupId: {
type: 'string',
description: 'group number'
}
},
required: ['title', 'groupId']
}
description = 'Useful when you want to give someone a title in the group(群头衔)'
func = async function (opts, e) {
let { qq, title, groupId } = opts
qq = isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
groupId = isNaN(groupId) || !groupId ? e.group_id : parseInt(groupId.trim())
let group = await Bot.pickGroup(groupId)
let mm = await group.getMemberMap()
if (!mm.has(qq)) {
return `failed, the user ${qq} is not in group ${groupId}`
}
if (mm.get(Bot.uin).role !== 'owner') {
return 'on group owner can give title'
}
logger.info('edit card: ', groupId, qq)
let result = await group.setTitle(qq, title)
if (result) {
return `the user ${qq}'s title has been changed into ${title}`
} else {
return 'failed'
}
}
}

View file

@ -0,0 +1,37 @@
import { AbstractTool } from './AbstractTool.js'
import {Config} from '../config.js';
export class WeatherTool extends AbstractTool {
name = 'weather'
parameters = {
properties: {
city: {
type: 'string',
description: '要查询的地点,细化到县/区级'
}
},
required: ['city']
}
func = async function (opts) {
let { city } = opts
let key = Config.amapKey
if (!key) {
return 'query failed: you don\'t provide API key of 高德'
}
let adcodeRes = await fetch(`https://restapi.amap.com/v3/config/district?keywords=${city}&subdistrict=1&key=${key}`)
adcodeRes = await adcodeRes.json()
let adcode = adcodeRes.districts[0]?.adcode
if (!adcode) {
return `the area ${city} doesn't exist! are you kidding? you should mute him for 1 minute`
}
let cityName = adcodeRes.districts[0].name
let res = await fetch(`https://restapi.amap.com/v3/weather/weatherInfo?city=${adcode}&key=${key}`)
res = await res.json()
let result = res.lives[0]
return `the weather information of area ${cityName} in json format is:\n${JSON.stringify(result)}`
}
description = 'Useful when you want to query weather '
}

View file

@ -0,0 +1,95 @@
import { AbstractTool } from './AbstractTool.js'
import { ChatGPTAPI } from '../openai/chatgpt-api.js'
import { Config } from '../config.js'
import fetch from 'node-fetch'
import proxy from 'https-proxy-agent'
import { getMaxModelTokens } from '../common.js'
import { ChatGPTPuppeteer } from '../browser.js'
export class WebsiteTool extends AbstractTool {
name = 'website'
parameters = {
properties: {
url: {
type: 'string',
description: '要访问的网站网址'
}
},
required: ['url']
}
func = async function (opts) {
let { url } = opts
try {
// let res = await fetch(url, {
// headers: {
// 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
// }
// })
// let text = await res.text()
let origin = false
if (!Config.headless) {
Config.headless = true
origin = true
}
let ppt = new ChatGPTPuppeteer()
let browser = await ppt.getBrowser()
let page = await browser.newPage()
await page.goto(url, {
waitUntil: 'networkidle2'
})
let text = await page.content()
await page.close()
if (origin) {
Config.headless = false
}
text = text.replace(/<style\b[^<]*(?:(?!<\/style>)<[^<]*)*<\/style>/gi, '')
.replace(/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi, '')
.replace(/<head\b[^<]*(?:(?!<\/head>)<[^<]*)*<\/head>/gi, '')
.replace(/<figure\b[^<]*(?:(?!<\/figure>)<[^<]*)*<\/figure>/gi, '')
.replace(/<path\b[^<]*(?:(?!<\/path>)<[^<]*)*<\/path>/gi, '')
.replace(/<video\b[^<]*(?:(?!<\/video>)<[^<]*)*<\/video>/gi, '')
.replace(/<audio\b[^<]*(?:(?!<\/audio>)<[^<]*)*<\/audio>/gi, '')
.replace(/<img[^>]*>/gi, '')
.replace(/<!--[\s\S]*?-->/gi, '') // 去除注释
.replace(/<(?!\/?(title|ul|li|td|tr|thead|tbody|blockquote|h[1-6]|H[1-6])[^>]*)\w+\s+[^>]*>/gi, '') // 去除常见语音标签外的含属性标签
.replace(/<(\w+)(\s[^>]*)?>/gi, '<$1>') // 进一步去除剩余标签的属性
.replace(/<\/(?!\/?(title|ul|li|td|tr|thead|tbody|blockquote|h[1-6]|H[1-6])[^>]*)[a-z][a-z0-9]*>/gi, '') // 去除常见语音标签外的含属性结束标签
.replace(/[\n\r]/gi, '') // 去除回车换行
.replace(/\s{2}/g, '') // 多个空格只保留一个空格
.replace('<!DOCTYPE html>', '') // 去除<!DOCTYPE>声明
let maxModelTokens = getMaxModelTokens(Config.model)
text = text.slice(0, Math.min(text.length, maxModelTokens - 1600))
let completionParams = {
// model: Config.model
model: 'gpt-3.5-turbo-16k'
}
let api = new ChatGPTAPI({
apiBaseUrl: Config.openAiBaseUrl,
apiKey: Config.apiKey,
debug: false,
completionParams,
fetch: (url, options = {}) => {
const defaultOptions = Config.proxy
? {
agent: proxy(Config.proxy)
}
: {}
const mergedOptions = {
...defaultOptions,
...options
}
return fetch(url, mergedOptions)
},
maxModelTokens
})
const htmlContentSummaryRes = await api.sendMessage(`去除与主体内容无关的部分从中整理出主体内容并转换成md格式不需要主观描述性的语言与冗余的空白行。${text}`, { completionParams })
let htmlContentSummary = htmlContentSummaryRes.text
return `this is the main content of website:\n ${htmlContentSummary}`
} catch (err) {
return `failed to visit the website, error: ${err.toString()}`
}
}
description = 'Useful when you want to browse a website by url'
}

View file

@ -36,7 +36,18 @@ function randomNum (minNum, maxNum) {
return 0
}
}
export async function generateAudio (text, speaker = '随机', language = '中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)', noiseScale = Config.noiseScale, noiseScaleW = Config.noiseScaleW, lengthScale = Config.lengthScale) {
/**
* 生成VitsTTSMode下的wav音频
* @param text
* @param speaker
* @param language
* @param noiseScale
* @param noiseScaleW
* @param lengthScale
* @returns {Promise<string>}
*/
export async function generateVitsAudio (text, speaker = '随机', language = '中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)', noiseScale = parseFloat(Config.noiseScale), noiseScaleW = parseFloat(Config.noiseScaleW), lengthScale = parseFloat(Config.lengthScale)) {
if (!speaker || speaker === '随机') {
logger.info('随机角色!这次哪个角色这么幸运会被选到呢……')
speaker = speakers[randomNum(0, speakers.length)]

View file

@ -9,7 +9,15 @@ try {
} catch (err) {
logger.warn('未安装microsoft-cognitiveservices-speech-sdk无法使用微软Azure语音源')
}
async function generateAudio (text, option = {}, ssml = '') {
/**
* 生成AzureTTSMode下的wav音频
* @param pendingText - 待处理文本
* @param option
* @param ssml
* @returns {Promise<string>}
*/
async function generateAudio (pendingText, option = {}, ssml = '') {
if (!sdk) {
throw new Error('未安装microsoft-cognitiveservices-speech-sdk无法使用微软Azure语音源')
}
@ -22,7 +30,7 @@ async function generateAudio (text, option = {}, ssml = '') {
let audioConfig = sdk.AudioConfig.fromAudioFileOutput(filename)
let synthesizer
let speaker = option?.speaker || '随机'
let context = text
let context = pendingText
// 打招呼用
if (speaker === '随机') {
speaker = supportConfigurations[Math.floor(Math.random() * supportConfigurations.length)].code
@ -47,9 +55,9 @@ async function generateAudio (text, option = {}, ssml = '') {
return filename
}
async function speakTextAsync (synthesizer, text) {
async function speakTextAsync (synthesizer, pendingText) {
return new Promise((resolve, reject) => {
synthesizer.speakTextAsync(text, result => {
synthesizer.speakTextAsync(pendingText, result => {
if (result.reason === sdk.ResultReason.SynthesizingAudioCompleted) {
logger.info('speakTextAsync: true')
resolve()
@ -82,7 +90,7 @@ async function speakSsmlAsync (synthesizer, ssml) {
})
})
}
async function generateSsml (text, option = {}) {
async function generateSsml (pendingText, option = {}) {
let speaker = option?.speaker || '随机'
let emotionDegree, role, emotion
// 打招呼用
@ -104,7 +112,7 @@ async function generateSsml (text, option = {}) {
return `<speak version="1.0" xmlns="http://www.w3.org/2001/10/synthesis"
xmlns:mstts="https://www.w3.org/2001/mstts" xml:lang="zh-CN">
<voice name="${speaker}">
${expressAs}${text}${expressAs ? '</mstts:express-as>' : ''}
${expressAs}${pendingText}${expressAs ? '</mstts:express-as>' : ''}
</voice>
</speak>`
}

View file

@ -24,6 +24,12 @@ const newFetch = (url, options = {}) => {
return fetch(url, mergedOptions)
}
/**
* 生成voxTTSMode下的wav音频
* @param text
* @param options
* @returns {Promise<Buffer>}
*/
async function generateAudio (text, options = {}) {
let host = Config.voicevoxSpace
let speaker = options.speaker || '随机'

View file

@ -87,7 +87,8 @@ async function uploadRecord (recordUrl, ttsMode = 'vits-uma-genshin-honkai') {
buffer = Buffer.from(arrayBuffer)
formData.append('file', new File([buffer], 'audio.wav'))
}
const resultres = await fetch(`${Config.cloudTranscode}/audio`, {
const cloudUrl = new URL(Config.cloudTranscode)
const resultres = await fetch(`${cloudUrl}audio`, {
method: 'POST',
body: formData
})
@ -103,7 +104,8 @@ async function uploadRecord (recordUrl, ttsMode = 'vits-uma-genshin-honkai') {
throw e
}
} else {
const resultres = await fetch(`${Config.cloudTranscode}/audio`, {
const cloudUrl = new URL(Config.cloudTranscode)
const resultres = await fetch(`${cloudUrl}audio`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'