Merge branch 'ikechan8370:v2' into v2

This commit is contained in:
ifeif 2024-01-29 10:36:31 +08:00 committed by GitHub
commit ca72d76cc7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 353 additions and 60 deletions

View file

@ -81,6 +81,7 @@ import { getChatHistoryGroup } from '../utils/chat.js'
import { CustomGoogleGeminiClient } from '../client/CustomGoogleGeminiClient.js' import { CustomGoogleGeminiClient } from '../client/CustomGoogleGeminiClient.js'
import { resizeAndCropImage } from '../utils/dalle.js' import { resizeAndCropImage } from '../utils/dalle.js'
import fs from 'fs' import fs from 'fs'
import { ChatGLM4Client } from '../client/ChatGLM4Client.js'
const roleMap = { const roleMap = {
owner: 'group owner', owner: 'group owner',
@ -106,8 +107,8 @@ try {
let version = Config.version let version = Config.version
let proxy = getProxy() let proxy = getProxy()
const originalValues = ['星火', '通义千问', '克劳德', '克劳德2', '必应', 'api', 'API', 'api3', 'API3', 'glm', '巴德'] const originalValues = ['星火', '通义千问', '克劳德', '克劳德2', '必应', 'api', 'API', 'api3', 'API3', 'glm', '巴德', '双子星', '双子座', '智谱']
const correspondingValues = ['xh', 'qwen', 'claude', 'claude2', 'bing', 'api', 'api', 'api3', 'api3', 'chatglm', 'bard'] const correspondingValues = ['xh', 'qwen', 'claude', 'claude2', 'bing', 'api', 'api', 'api3', 'api3', 'chatglm', 'bard', 'gemini', 'gemini', 'chatglm4']
/** /**
* 每个对话保留的时长单个对话内ai是保留上下文的超时后销毁对话再次对话创建新的对话 * 每个对话保留的时长单个对话内ai是保留上下文的超时后销毁对话再次对话创建新的对话
* 单位 * 单位
@ -196,6 +197,12 @@ export class chatgpt extends plugin {
reg: '^#星火(搜索|查找)助手', reg: '^#星火(搜索|查找)助手',
fnc: 'searchxhBot' fnc: 'searchxhBot'
}, },
{
/** 命令正则匹配 */
reg: '^#glm4[sS]*',
/** 执行方法 */
fnc: 'glm4'
},
{ {
/** 命令正则匹配 */ /** 命令正则匹配 */
reg: '^#qwen[sS]*', reg: '^#qwen[sS]*',
@ -221,11 +228,11 @@ export class chatgpt extends plugin {
permission: 'master' permission: 'master'
}, },
{ {
reg: '^#(chatgpt|星火|通义千问|克劳德|克劳德2|必应|api|API|api3|API3|glm|巴德)?(结束|新开|摧毁|毁灭|完结)对话([sS]*)', reg: `^#?(${originalValues.join('|')})?(结束|新开|摧毁|毁灭|完结)对话([sS]*)$`,
fnc: 'destroyConversations' fnc: 'destroyConversations'
}, },
{ {
reg: '^#(chatgpt|星火|通义千问|克劳德|克劳德2|必应|api|API|api3|API3|glm|巴德)?(结束|新开|摧毁|毁灭|完结)全部对话$', reg: `^#?(${originalValues.join('|')})?(结束|新开|摧毁|毁灭|完结)全部对话$`,
fnc: 'endAllConversations', fnc: 'endAllConversations',
permission: 'master' permission: 'master'
}, },
@ -419,6 +426,14 @@ export class chatgpt extends plugin {
await redis.del(`CHATGPT:CONVERSATIONS_GEMINI:${e.sender.user_id}`) await redis.del(`CHATGPT:CONVERSATIONS_GEMINI:${e.sender.user_id}`)
await this.reply('已结束当前对话,请@我进行聊天以开启新的对话', true) await this.reply('已结束当前对话,请@我进行聊天以开启新的对话', true)
} }
} else if (use === 'chatglm4') {
let c = await redis.get(`CHATGPT:CONVERSATIONS_CHATGLM4:${e.sender.user_id}`)
if (!c) {
await this.reply('当前没有开启对话', true)
} else {
await redis.del(`CHATGPT:CONVERSATIONS_CHATGLM4:${e.sender.user_id}`)
await this.reply('已结束当前对话,请@我进行聊天以开启新的对话', true)
}
} else if (use === 'bing') { } else if (use === 'bing') {
let c = await redis.get(`CHATGPT:CONVERSATIONS_BING:${e.sender.user_id}`) let c = await redis.get(`CHATGPT:CONVERSATIONS_BING:${e.sender.user_id}`)
if (!c) { if (!c) {
@ -496,6 +511,14 @@ export class chatgpt extends plugin {
await redis.del(`CHATGPT:CONVERSATIONS_GEMINI:${qq}`) await redis.del(`CHATGPT:CONVERSATIONS_GEMINI:${qq}`)
await this.reply(`已结束${atUser}的对话TA仍可以@我进行聊天以开启新的对话`, true) await this.reply(`已结束${atUser}的对话TA仍可以@我进行聊天以开启新的对话`, true)
} }
} else if (use === 'chatglm4') {
let c = await redis.get(`CHATGPT:CONVERSATIONS_CHATGLM4:${qq}`)
if (!c) {
await this.reply(`当前${atUser}没有开启对话`, true)
} else {
await redis.del(`CHATGPT:CONVERSATIONS_CHATGLM4:${qq}`)
await this.reply(`已结束${atUser}的对话TA仍可以@我进行聊天以开启新的对话`, true)
}
} else if (use === 'bing') { } else if (use === 'bing') {
let c = await redis.get(`CHATGPT:CONVERSATIONS_BING:${qq}`) let c = await redis.get(`CHATGPT:CONVERSATIONS_BING:${qq}`)
if (!c) { if (!c) {
@ -639,6 +662,18 @@ export class chatgpt extends plugin {
} }
break break
} }
case 'chatglm4': {
let qcs = await redis.keys('CHATGPT:CONVERSATIONS_CHATGLM4:*')
for (let i = 0; i < qcs.length; i++) {
await redis.del(qcs[i])
// todo clean last message id
if (Config.debug) {
logger.info('delete chatglm4 conversation bind: ' + qcs[i])
}
deleted++
}
break
}
} }
await this.reply(`结束了${deleted}个用户的对话。`, true) await this.reply(`结束了${deleted}个用户的对话。`, true)
} }
@ -972,24 +1007,8 @@ export class chatgpt extends plugin {
} }
} }
} }
let userSetting = await getUserReplySetting(this.e) let userSetting = await getUserReplySetting(this.e)
let useTTS = !!userSetting.useTTS let useTTS = !!userSetting.useTTS
let speaker
if (Config.ttsMode === 'vits-uma-genshin-honkai') {
speaker = convertSpeaker(userSetting.ttsRole || Config.defaultTTSRole)
} else if (Config.ttsMode === 'azure') {
speaker = userSetting.ttsRoleAzure || Config.azureTTSSpeaker
} else if (Config.ttsMode === 'voicevox') {
speaker = userSetting.ttsRoleVoiceVox || Config.voicevoxTTSSpeaker
}
// 每个回答可以指定
let trySplit = prompt.split('回答:')
if (trySplit.length > 1 && speakers.indexOf(convertSpeaker(trySplit[0])) > -1) {
useTTS = true
speaker = convertSpeaker(trySplit[0])
prompt = trySplit[1]
}
const isImg = await getImg(e) const isImg = await getImg(e)
if (Config.imgOcr && !!isImg) { if (Config.imgOcr && !!isImg) {
let imgOcrText = await getImageOcrText(e) let imgOcrText = await getImageOcrText(e)
@ -1138,6 +1157,10 @@ export class chatgpt extends plugin {
key = `CHATGPT:CONVERSATIONS_GEMINI:${(e.isGroup && Config.groupMerge) ? e.group_id.toString() : e.sender.user_id}` key = `CHATGPT:CONVERSATIONS_GEMINI:${(e.isGroup && Config.groupMerge) ? e.group_id.toString() : e.sender.user_id}`
break break
} }
case 'chatglm4': {
key = `CHATGPT:CONVERSATIONS_CHATGLM4:${(e.isGroup && Config.groupMerge) ? e.group_id.toString() : e.sender.user_id}`
break
}
} }
let ctime = new Date() let ctime = new Date()
previousConversation = (key ? await redis.get(key) : null) || JSON.stringify({ previousConversation = (key ? await redis.get(key) : null) || JSON.stringify({
@ -1177,6 +1200,7 @@ export class chatgpt extends plugin {
await e.reply([element.tag, segment.image(element.url)]) await e.reply([element.tag, segment.image(element.url)])
}) })
} }
// chatglm4图片调整至sendMessage中处理
if (use === 'api' && !chatMessage) { if (use === 'api' && !chatMessage) {
// 字数超限直接返回 // 字数超限直接返回
return false return false
@ -1446,7 +1470,11 @@ export class chatgpt extends plugin {
} }
async qwen (e) { async qwen (e) {
return await this.otherMode(e, 'gemini') return await this.otherMode(e, 'qwen')
}
async glm4 (e) {
return await this.otherMode(e, 'chatglm4')
} }
async gemini (e) { async gemini (e) {
@ -2148,6 +2176,15 @@ export class chatgpt extends plugin {
} }
option.system = system option.system = system
return await client.sendMessage(prompt, option) return await client.sendMessage(prompt, option)
} else if (use === 'chatglm4') {
const client = new ChatGLM4Client({
refreshToken: Config.chatglmRefreshToken
})
let resp = await client.sendMessage(prompt, conversation)
if (resp.image) {
e.reply(segment.image(resp.image), true)
}
return resp
} else { } else {
// openai api // openai api
let completionParams = {} let completionParams = {}

View file

@ -97,11 +97,11 @@ export class ChatgptManagement extends plugin {
fnc: 'useOpenAIAPIBasedSolution', fnc: 'useOpenAIAPIBasedSolution',
permission: 'master' permission: 'master'
}, },
{ // {
reg: '^#chatgpt切换(ChatGLM|chatglm)$', // reg: '^#chatgpt切换(ChatGLM|chatglm)$',
fnc: 'useChatGLMSolution', // fnc: 'useChatGLMSolution',
permission: 'master' // permission: 'master'
}, // },
{ {
reg: '^#chatgpt切换API3$', reg: '^#chatgpt切换API3$',
fnc: 'useReversedAPIBasedSolution2', fnc: 'useReversedAPIBasedSolution2',
@ -152,6 +152,11 @@ export class ChatgptManagement extends plugin {
fnc: 'useQwenSolution', fnc: 'useQwenSolution',
permission: 'master' permission: 'master'
}, },
{
reg: '^#chatgpt切换(智谱|智谱清言|ChatGLM|ChatGLM4|chatglm)$',
fnc: 'useGLM4Solution',
permission: 'master'
},
{ {
reg: '^#chatgpt(必应|Bing)切换', reg: '^#chatgpt(必应|Bing)切换',
fnc: 'changeBingTone', fnc: 'changeBingTone',
@ -1032,6 +1037,16 @@ azure语音Azure 语音是微软 Azure 平台提供的一项语音服务,
} }
} }
async useGLM4Solution () {
let use = await redis.get('CHATGPT:USE')
if (use !== 'chatglm4') {
await redis.set('CHATGPT:USE', 'chatglm4')
await this.reply('已切换到基于ChatGLM的解决方案')
} else {
await this.reply('当前已经是ChatGLM模式了')
}
}
async changeBingTone (e) { async changeBingTone (e) {
let tongStyle = e.msg.replace(/^#chatgpt(必应|Bing)切换/, '') let tongStyle = e.msg.replace(/^#chatgpt(必应|Bing)切换/, '')
if (!tongStyle) { if (!tongStyle) {

185
client/ChatGLM4Client.js Normal file
View file

@ -0,0 +1,185 @@
import { BaseClient } from './BaseClient.js'
import https from 'https'
import { Config } from '../utils/config.js'
import { createParser } from 'eventsource-parser'
const BASEURL = 'https://chatglm.cn/chatglm/backend-api/assistant/stream'
export class ChatGLM4Client extends BaseClient {
constructor (props) {
super(props)
this.baseUrl = props.baseUrl || BASEURL
this.supportFunction = false
this.debug = props.debug
this._refreshToken = props.refreshToken
}
async getAccessToken (refreshToken = this._refreshToken) {
if (redis) {
let lastToken = await redis.get('CHATGPT:CHATGLM4_ACCESS_TOKEN')
if (lastToken) {
this._accessToken = lastToken
// todo check token through user info endpoint
return
}
}
let res = await fetch('https://chatglm.cn/chatglm/backend-api/v1/user/refresh', {
method: 'POST',
body: '{}',
headers: {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
Origin: 'https://www.chatglm.cn',
Referer: 'https://www.chatglm.cn/main/detail',
Authorization: `Bearer ${refreshToken}`
}
})
let tokenRsp = await res.json()
let token = tokenRsp?.result?.accessToken
if (token) {
this._accessToken = token
redis && await redis.set('CHATGPT:CHATGLM4_ACCESS_TOKEN', token, { EX: 7000 })
// accessToken will expire in 2 hours
}
}
// todo https://chatglm.cn/chatglm/backend-api/v3/user/info query remain times
/**
*
* @param text
* @param {{conversationId: string?, stream: boolean?, onProgress: function?, image: string?}} opt
* @returns {Promise<{conversationId: string?, parentMessageId: string?, text: string, id: string, image: string?}>}
*/
async sendMessage (text, opt = {}) {
await this.getAccessToken()
if (!this._accessToken) {
throw new Error('accessToken for www.chatglm.cn not set')
}
let { conversationId, onProgress } = opt
const body = {
assistant_id: '65940acff94777010aa6b796', // chatglm4
conversation_id: conversationId || '',
meta_data: {
is_test: false,
input_question_type: 'xxxx',
channel: ''
},
messages: [
{
role: 'user',
content: [
{
type: 'text',
text
}
]
}
]
}
let conversationResponse
let statusCode
let messageId
let image
let requestP = new Promise((resolve, reject) => {
let option = {
method: 'POST',
headers: {
accept: 'text/event-stream',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
authorization: `Bearer ${this._accessToken}`,
'content-type': 'application/json',
referer: 'https://www.chatglm.cn/main/alltoolsdetail',
origin: 'https://www.chatglm.cn'
},
referrer: 'https://www.chatglm.cn/main/alltoolsdetail',
timeout: 60000
}
const req = https.request(BASEURL, option, (res) => {
statusCode = res.statusCode
let response
function onMessage (data) {
try {
const convoResponseEvent = JSON.parse(data)
conversationResponse = convoResponseEvent
if (convoResponseEvent.conversation_id) {
conversationId = convoResponseEvent.conversation_id
}
if (convoResponseEvent.id) {
messageId = convoResponseEvent.id
}
const partialResponse =
convoResponseEvent?.parts?.[0]
if (partialResponse) {
if (Config.debug) {
logger.info(JSON.stringify(convoResponseEvent))
}
response = partialResponse
if (onProgress && typeof onProgress === 'function') {
onProgress(partialResponse)
}
}
let content = partialResponse?.content[0]
if (content?.type === 'image' && content?.status === 'finish') {
image = content.image[0].image_url
}
if (convoResponseEvent.status === 'finish') {
resolve({
error: null,
response,
conversationId,
messageId,
conversationResponse,
image
})
}
} catch (err) {
console.warn('fetchSSE onMessage unexpected error', err)
reject(err)
}
}
const parser = createParser((event) => {
if (event.type === 'event') {
onMessage(event.data)
}
})
const errBody = []
res.on('data', (chunk) => {
if (statusCode === 200) {
let str = chunk.toString()
parser.feed(str)
}
errBody.push(chunk)
})
// const body = []
// res.on('data', (chunk) => body.push(chunk))
res.on('end', () => {
const resString = Buffer.concat(errBody).toString()
reject(resString)
})
})
req.on('error', (err) => {
reject(err)
})
req.on('timeout', () => {
req.destroy()
reject(new Error('Request time out'))
})
req.write(JSON.stringify(body))
req.end()
})
const res = await requestP
return {
text: res?.response?.content[0]?.text,
conversationId: res.conversationId,
id: res.messageId,
image,
raw: res?.response
}
}
}

View file

@ -71,7 +71,7 @@ export class CustomGoogleGeminiClient extends GoogleGeminiClient {
* @param {{conversationId: string?, parentMessageId: string?, stream: boolean?, onProgress: function?, functionResponse: FunctionResponse?, system: string?, image: string?}} opt * @param {{conversationId: string?, parentMessageId: string?, stream: boolean?, onProgress: function?, functionResponse: FunctionResponse?, system: string?, image: string?}} opt
* @returns {Promise<{conversationId: string?, parentMessageId: string, text: string, id: string}>} * @returns {Promise<{conversationId: string?, parentMessageId: string, text: string, id: string}>}
*/ */
async sendMessage (text, opt) { async sendMessage (text, opt = {}) {
let history = await this.getHistory(opt.parentMessageId) let history = await this.getHistory(opt.parentMessageId)
let systemMessage = opt.system let systemMessage = opt.system
if (systemMessage) { if (systemMessage) {
@ -208,9 +208,10 @@ export class CustomGoogleGeminiClient extends GoogleGeminiClient {
// execute function // execute function
try { try {
let args = Object.assign(functionCall.args, { let args = Object.assign(functionCall.args, {
isAdmin: this.e.group.is_admin, isAdmin: this.e.group?.is_admin,
isOwner: this.e.group.is_owner, isOwner: this.e.group?.is_owner,
sender: this.e.sender sender: this.e.sender,
mode: 'gemini'
}) })
functionResponse.response.content = await chosenTool.func(args, this.e) functionResponse.response.content = await chosenTool.func(args, this.e)
if (this.debug) { if (this.debug) {

View file

@ -0,0 +1,17 @@
import { ChatGLM4Client } from '../ChatGLM4Client.js'
async function sendMsg () {
const client = new ChatGLM4Client({
refreshToken: '',
debug: true
})
let res = await client.sendMessage('你好啊')
console.log(res)
}
// global.redis = null
// global.logger = {
// info: console.log,
// warn: console.warn,
// error: console.error
// }
// sendMsg()

View file

@ -1,4 +1,4 @@
import { GoogleGeminiClient } from './GoogleGeminiClient.js' import { GoogleGeminiClient } from '../GoogleGeminiClient.js'
async function test () { async function test () {
const client = new GoogleGeminiClient({ const client = new GoogleGeminiClient({

View file

@ -409,6 +409,12 @@ export function supportGuoba () {
bottomHelpMessage: '加强主人认知。希望机器人认清主人避免NTR可开启。开启后可能会与自设定的内容有部分冲突。sydney模式可以放心开启', bottomHelpMessage: '加强主人认知。希望机器人认清主人避免NTR可开启。开启后可能会与自设定的内容有部分冲突。sydney模式可以放心开启',
component: 'Switch' component: 'Switch'
}, },
{
field: 'sydneyGPT4Turbo',
label: '使用GPT4-turbo',
bottomHelpMessage: '目前仅Copilot Pro可开启。非pro用户开启会报错。',
component: 'Switch'
},
{ {
field: 'enableGenerateContents', field: 'enableGenerateContents',
label: '允许生成图像等内容', label: '允许生成图像等内容',
@ -514,6 +520,16 @@ export function supportGuoba () {
bottomHelpMessage: '使用GPT-4注意试用配额较低如果用不了就关掉', bottomHelpMessage: '使用GPT-4注意试用配额较低如果用不了就关掉',
component: 'Switch' component: 'Switch'
}, },
{
label: '以下为智谱清言ChatGLM方式的配置。',
component: 'Divider'
},
{
field: 'chatglmRefreshToken',
label: 'refresh token',
bottomHelpMessage: 'chatglm_refresh_token 6个月有效期',
component: 'Input'
},
{ {
label: '以下为Slack Claude方式的配置', label: '以下为Slack Claude方式的配置',
component: 'Divider' component: 'Divider'

View file

@ -383,6 +383,9 @@ export default class SydneyAIClient {
if (!Config.sydneyEnableSearch || toSummaryFileContent?.content) { if (!Config.sydneyEnableSearch || toSummaryFileContent?.content) {
optionsSets.push(...['nosearchall']) optionsSets.push(...['nosearchall'])
} }
if (Config.sydneyGPT4Turbo) {
optionsSets.push('gpt4tmnc')
}
let maxConv = Config.maxNumUserMessagesInConversation let maxConv = Config.maxNumUserMessagesInConversation
const currentDate = moment().format('YYYY-MM-DDTHH:mm:ssZ') const currentDate = moment().format('YYYY-MM-DDTHH:mm:ssZ')
const imageDate = await this.kblobImage(opts.imageUrl) const imageDate = await this.kblobImage(opts.imageUrl)
@ -482,6 +485,7 @@ export default class SydneyAIClient {
// } // }
] ]
} }
if (encryptedconversationsignature) { if (encryptedconversationsignature) {
delete argument0.conversationSignature delete argument0.conversationSignature
} }

View file

@ -39,6 +39,7 @@ const defaultConfig = {
sydneyBrainWashStrength: 15, sydneyBrainWashStrength: 15,
sydneyBrainWashName: 'Sydney', sydneyBrainWashName: 'Sydney',
sydneyMood: false, sydneyMood: false,
sydneyGPT4Turbo: false,
sydneyImageRecognition: false, sydneyImageRecognition: false,
sydneyMoodTip: 'Your response should be divided into two parts, namely, the text and your mood. The mood available to you can only include: blandness, happy, shy, frustrated, disgusted, and frightened.All content should be replied in this format {"text": "", "mood": ""}.All content except mood should be placed in text, It is important to ensure that the content you reply to can be parsed by json.', sydneyMoodTip: 'Your response should be divided into two parts, namely, the text and your mood. The mood available to you can only include: blandness, happy, shy, frustrated, disgusted, and frightened.All content should be replied in this format {"text": "", "mood": ""}.All content except mood should be placed in text, It is important to ensure that the content you reply to can be parsed by json.',
enableSuggestedResponses: false, enableSuggestedResponses: false,
@ -169,7 +170,8 @@ const defaultConfig = {
geminiPrompt: 'You are Gemini. Your answer shouldn\'t be too verbose. Prefer to answer in Chinese.', geminiPrompt: 'You are Gemini. Your answer shouldn\'t be too verbose. Prefer to answer in Chinese.',
// origin: https://generativelanguage.googleapis.com // origin: https://generativelanguage.googleapis.com
geminiBaseUrl: 'https://gemini.ikechan8370.com', geminiBaseUrl: 'https://gemini.ikechan8370.com',
version: 'v2.7.8' chatglmRefreshToken: '',
version: 'v2.7.9'
} }
const _path = process.cwd() const _path = process.cwd()
let config = {} let config = {}

View file

@ -5,6 +5,7 @@ import fetch from 'node-fetch'
import proxy from 'https-proxy-agent' import proxy from 'https-proxy-agent'
import { getMaxModelTokens } from '../common.js' import { getMaxModelTokens } from '../common.js'
import { ChatGPTPuppeteer } from '../browser.js' import { ChatGPTPuppeteer } from '../browser.js'
import { CustomGoogleGeminiClient } from '../../client/CustomGoogleGeminiClient.js'
export class WebsiteTool extends AbstractTool { export class WebsiteTool extends AbstractTool {
name = 'website' name = 'website'
@ -19,7 +20,7 @@ export class WebsiteTool extends AbstractTool {
} }
func = async function (opts) { func = async function (opts) {
let { url } = opts let { url, mode, e } = opts
try { try {
// let res = await fetch(url, { // let res = await fetch(url, {
// headers: { // headers: {
@ -58,34 +59,49 @@ export class WebsiteTool extends AbstractTool {
.replace(/[\n\r]/gi, '') // 去除回车换行 .replace(/[\n\r]/gi, '') // 去除回车换行
.replace(/\s{2}/g, '') // 多个空格只保留一个空格 .replace(/\s{2}/g, '') // 多个空格只保留一个空格
.replace('<!DOCTYPE html>', '') // 去除<!DOCTYPE>声明 .replace('<!DOCTYPE html>', '') // 去除<!DOCTYPE>声明
let maxModelTokens = getMaxModelTokens(Config.model)
text = text.slice(0, Math.min(text.length, maxModelTokens - 1600)) if (mode === 'gemini') {
let completionParams = { let client = new CustomGoogleGeminiClient({
// model: Config.model e,
model: 'gpt-3.5-turbo-16k' userId: e?.sender?.user_id,
key: Config.geminiKey,
model: Config.geminiModel,
baseUrl: Config.geminiBaseUrl,
debug: Config.debug
})
const htmlContentSummaryRes = await client.sendMessage(`去除与主体内容无关的部分从中整理出主体内容并转换成md格式不需要主观描述性的语言与冗余的空白行。${text}`)
let htmlContentSummary = htmlContentSummaryRes.text
return `this is the main content of website:\n ${htmlContentSummary}`
} else {
let maxModelTokens = getMaxModelTokens(Config.model)
text = text.slice(0, Math.min(text.length, maxModelTokens - 1600))
let completionParams = {
// model: Config.model
model: 'gpt-3.5-turbo-16k'
}
let api = new ChatGPTAPI({
apiBaseUrl: Config.openAiBaseUrl,
apiKey: Config.apiKey,
debug: false,
completionParams,
fetch: (url, options = {}) => {
const defaultOptions = Config.proxy
? {
agent: proxy(Config.proxy)
}
: {}
const mergedOptions = {
...defaultOptions,
...options
}
return fetch(url, mergedOptions)
},
maxModelTokens
})
const htmlContentSummaryRes = await api.sendMessage(`去除与主体内容无关的部分从中整理出主体内容并转换成md格式不需要主观描述性的语言与冗余的空白行。${text}`, { completionParams })
let htmlContentSummary = htmlContentSummaryRes.text
return `this is the main content of website:\n ${htmlContentSummary}`
} }
let api = new ChatGPTAPI({
apiBaseUrl: Config.openAiBaseUrl,
apiKey: Config.apiKey,
debug: false,
completionParams,
fetch: (url, options = {}) => {
const defaultOptions = Config.proxy
? {
agent: proxy(Config.proxy)
}
: {}
const mergedOptions = {
...defaultOptions,
...options
}
return fetch(url, mergedOptions)
},
maxModelTokens
})
const htmlContentSummaryRes = await api.sendMessage(`去除与主体内容无关的部分从中整理出主体内容并转换成md格式不需要主观描述性的语言与冗余的空白行。${text}`, { completionParams })
let htmlContentSummary = htmlContentSummaryRes.text
return `this is the main content of website:\n ${htmlContentSummary}`
} catch (err) { } catch (err) {
return `failed to visit the website, error: ${err.toString()}` return `failed to visit the website, error: ${err.toString()}`
} }