Merge branch 'ikechan8370:v2' into v2

This commit is contained in:
ifeif 2023-11-24 22:11:27 +08:00 committed by GitHub
commit 9723e6db17
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
92 changed files with 4289 additions and 1073 deletions

View file

@ -7,10 +7,11 @@ import fetch, {
import crypto from 'crypto'
import WebSocket from 'ws'
import { Config, pureSydneyInstruction } from './config.js'
import { formatDate, getMasterQQ, isCN, getUserData } from './common.js'
import { formatDate, getMasterQQ, isCN, getUserData, limitString } from './common.js'
import delay from 'delay'
import moment from 'moment'
import { getProxy } from './proxy.js'
import Version from './version.js'
if (!globalThis.fetch) {
globalThis.fetch = fetch
@ -80,7 +81,7 @@ export default class SydneyAIClient {
// 'x-ms-client-request-id': crypto.randomUUID(),
// 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.3 OS/macOS',
// cookie: this.opts.cookies || `_U=${this.opts.userToken}`,
Referer: 'https://edgeservices.bing.com/edgesvc/chat?udsframed=1&form=SHORUN&clientscopes=chat,noheader,channelstable,',
Referer: 'https://edgeservices.bing.com/edgesvc/chat?udsframed=1&form=SHORUN&clientscopes=chat,noheader,channelstable,'
// 'Referrer-Policy': 'origin-when-cross-origin',
// Workaround for request being blocked due to geolocation
// 'x-forwarded-for': '1.1.1.1'
@ -100,12 +101,12 @@ export default class SydneyAIClient {
this.opts.host = 'https://edgeservices.bing.com/edgesvc'
}
logger.mark('使用host' + this.opts.host)
let response = await fetch(`${this.opts.host}/turing/conversation/create?bundleVersion=1.1055.6`, fetchOptions)
let response = await fetch(`${this.opts.host}/turing/conversation/create?bundleVersion=1.1055.10`, fetchOptions)
let text = await response.text()
let retry = 10
while (retry >= 0 && response.status === 200 && !text) {
await delay(400)
response = await fetch(`${this.opts.host}/turing/conversation/create`, fetchOptions)
response = await fetch(`${this.opts.host}/turing/conversation/create?bundleVersion=1.1055.10`, fetchOptions)
text = await response.text()
retry--
}
@ -137,7 +138,11 @@ export default class SydneyAIClient {
agent = proxy(this.opts.proxy)
}
if (Config.sydneyWebsocketUseProxy) {
sydneyHost = Config.sydneyReverseProxy.replace('https://', 'wss://').replace('http://', 'ws://')
if (!Config.sydneyReverseProxy) {
logger.warn('用户开启了对话反代,但是没有配置反代,忽略反代配置')
} else {
sydneyHost = Config.sydneyReverseProxy.replace('https://', 'wss://').replace('http://', 'ws://')
}
}
logger.mark(`use sydney websocket host: ${sydneyHost}`)
let host = sydneyHost + '/sydney/ChatHub'
@ -221,8 +226,8 @@ export default class SydneyAIClient {
timeout = Config.defaultTimeoutMs,
firstMessageTimeout = Config.sydneyFirstMessageTimeout,
groupId, nickname, qq, groupName, chats, botName, masterName,
messageType = 'Chat'
messageType = 'Chat',
toSummaryFileContent
} = opts
// if (messageType === 'Chat') {
// logger.warn('该Bing账户token已被限流降级至使用非搜索模式。本次对话AI将无法使用Bing搜索返回的内容')
@ -371,6 +376,10 @@ export default class SydneyAIClient {
let maxConv = Config.maxNumUserMessagesInConversation
const currentDate = moment().format('YYYY-MM-DDTHH:mm:ssZ')
const imageDate = await this.kblobImage(opts.imageUrl)
if (toSummaryFileContent?.content) {
// message = `请不要进行搜索,用户的问题是:"${message}"`
messageType = 'Chat'
}
let argument0 = {
source: 'cib',
optionsSets,
@ -414,10 +423,12 @@ export default class SydneyAIClient {
text: message,
messageType,
userIpAddress: await generateRandomIP(),
timestamp: currentDate
timestamp: currentDate,
privacy: 'Internal'
// messageType: 'SearchQuery'
},
tone: 'Creative',
privacy: 'Internal',
conversationSignature,
participant: {
id: clientId
@ -439,7 +450,7 @@ export default class SydneyAIClient {
}
// simulates document summary function on Edge's Bing sidebar
// unknown character limit, at least up to 7k
if (groupId) {
if (groupId && !toSummaryFileContent?.content) {
context += '注意你现在正在一个qq群里和人聊天现在问你问题的人是' + `${nickname}(${qq})。`
if (Config.enforceMaster && master) {
if (qq === master) {
@ -461,13 +472,11 @@ export default class SydneyAIClient {
admin: '管理员'
}
if (chats) {
context += `以下是一段qq群内的对话提供给你作为上下文你在回答所有问题时必须优先考虑这些信息结合这些上下文进行回答这很重要。"
`
context += '以下是一段qq群内的对话提供给你作为上下文你在回答所有问题时必须优先考虑这些信息结合这些上下文进行回答这很重要。"'
context += chats
.map(chat => {
let sender = chat.sender || {}
// if (sender.user_id === Bot.uin && chat.raw_message.startsWith('建议的回复')) {
if (chat.raw_message.startsWith('建议的回复')) {
let sender = chat.sender || chat || {}
if (chat.raw_message?.startsWith('建议的回复')) {
// 建议的回复太容易污染设定导致对话太固定跑偏了
return ''
}
@ -486,7 +495,19 @@ export default class SydneyAIClient {
}).join('\n')
context += '\n'
}
if (context) {
if (toSummaryFileContent?.content) {
// 忽略context 不然可能会爆炸
obj.arguments[0].previousMessages.push({
author: 'user',
description: limitString(toSummaryFileContent?.content, 20000, true),
contextType: 'WebPage',
messageType: 'Context',
sourceName: toSummaryFileContent?.name,
sourceUrl: 'file:///C:/Users/turing/Downloads/Documents/' + toSummaryFileContent?.name || 'file.pdf',
// locale: 'und',
// privacy: 'Internal'
})
} else if (context) {
obj.arguments[0].previousMessages.push({
author: 'user',
description: context,
@ -655,7 +676,7 @@ export default class SydneyAIClient {
text: replySoFar.join('')
}
// 获取到图片内容
if (messages.some(obj => obj.contentType === "IMAGE")) {
if (messages.some(obj => obj.contentType === 'IMAGE')) {
message.imageTag = messages.filter(m => m.contentType === 'IMAGE').map(m => m.text).join('')
}
message.text = messages.filter(m => m.author === 'bot' && m.contentType != 'IMAGE').map(m => m.text).join('')

394
utils/alibaba/qwen-api.js Normal file
View file

@ -0,0 +1,394 @@
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
if (ar || !(i in from)) {
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
ar[i] = from[i];
}
}
return to.concat(ar || Array.prototype.slice.call(from));
};
import Keyv from 'keyv';
import pTimeout from 'p-timeout';
import QuickLRU from 'quick-lru';
import { v4 as uuidv4 } from 'uuid';
import * as tokenizer from './tokenizer.js';
import * as types from './types.js';
import globalFetch from 'node-fetch';
var CHATGPT_MODEL = 'qwen-turbo'; // qwen-plus
var USER_LABEL_DEFAULT = 'User';
var ASSISTANT_LABEL_DEFAULT = '同义千问';
var QwenApi = /** @class */ (function () {
/**
* Creates a new client wrapper around Qwen's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param opts
*/
function QwenApi(opts) {
var apiKey = opts.apiKey, _a = opts.apiBaseUrl, apiBaseUrl = _a === void 0 ? 'https://dashscope.aliyuncs.com/api/v1' : _a, _b = opts.debug, debug = _b === void 0 ? false : _b, messageStore = opts.messageStore, completionParams = opts.completionParams, parameters = opts.parameters, systemMessage = opts.systemMessage, getMessageById = opts.getMessageById, upsertMessage = opts.upsertMessage, _c = opts.fetch, fetch = _c === void 0 ? globalFetch : _c;
this._apiKey = apiKey;
this._apiBaseUrl = apiBaseUrl;
this._debug = !!debug;
this._fetch = fetch;
this._completionParams = __assign({ model: CHATGPT_MODEL, parameters: __assign({ top_p: 0.5, top_k: 50, temperature: 1.0, seed: 114514, enable_search: true, result_format: "text", incremental_output: false }, parameters) }, completionParams);
this._systemMessage = systemMessage;
if (this._systemMessage === undefined) {
var currentDate = new Date().toISOString().split('T')[0];
this._systemMessage = "You are ChatGPT, a large language model trained by Qwen. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ".concat(currentDate);
}
this._getMessageById = getMessageById !== null && getMessageById !== void 0 ? getMessageById : this._defaultGetMessageById;
this._upsertMessage = upsertMessage !== null && upsertMessage !== void 0 ? upsertMessage : this._defaultUpsertMessage;
if (messageStore) {
this._messageStore = messageStore;
}
else {
this._messageStore = new Keyv({
store: new QuickLRU({ maxSize: 10000 })
});
}
if (!this._apiKey) {
throw new Error('Qwen missing required apiKey');
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined');
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function');
}
}
/**
* Sends a message to the Qwen chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the Qwen chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param completionParams - Optional overrides to send to the [Qwen chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
QwenApi.prototype.sendMessage = function (text, opts, role) {
if (opts === void 0) { opts = {}; }
if (role === void 0) { role = 'user'; }
return __awaiter(this, void 0, void 0, function () {
var parentMessageId, _a, messageId, timeoutMs, completionParams, conversationId, abortSignal, abortController, message, latestQuestion, _b, messages, maxTokens, numTokens, result, responseP;
var _this = this;
return __generator(this, function (_c) {
switch (_c.label) {
case 0:
parentMessageId = opts.parentMessageId, _a = opts.messageId, messageId = _a === void 0 ? uuidv4() : _a, timeoutMs = opts.timeoutMs, completionParams = opts.completionParams, conversationId = opts.conversationId;
abortSignal = opts.abortSignal;
abortController = null;
if (timeoutMs && !abortSignal) {
abortController = new AbortController();
abortSignal = abortController.signal;
}
message = {
role: role,
id: messageId,
conversationId: conversationId,
parentMessageId: parentMessageId,
text: text,
};
latestQuestion = message;
return [4 /*yield*/, this._buildMessages(text, role, opts, completionParams)];
case 1:
_b = _c.sent(), messages = _b.messages, maxTokens = _b.maxTokens, numTokens = _b.numTokens;
console.log("maxTokens: ".concat(maxTokens, ", numTokens: ").concat(numTokens));
result = {
role: 'assistant',
id: uuidv4(),
conversationId: conversationId,
parentMessageId: messageId,
text: undefined,
};
this._completionParams.input = { messages: messages };
responseP = new Promise(function (resolve, reject) { return __awaiter(_this, void 0, void 0, function () {
var url, headers, body, res, reason, msg, error, response, err_1;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
url = "".concat(this._apiBaseUrl, "/services/aigc/text-generation/generation");
headers = {
'Content-Type': 'application/json',
Authorization: "Bearer ".concat(this._apiKey)
};
body = __assign(__assign({}, this._completionParams), completionParams);
if (this._debug) {
console.log(JSON.stringify(body));
}
if (this._debug) {
console.log("sendMessage (".concat(numTokens, " tokens)"), body);
}
_a.label = 1;
case 1:
_a.trys.push([1, 6, , 7]);
return [4 /*yield*/, this._fetch(url, {
method: 'POST',
headers: headers,
body: JSON.stringify(body),
signal: abortSignal
})];
case 2:
res = _a.sent();
if (!!res.ok) return [3 /*break*/, 4];
return [4 /*yield*/, res.text()];
case 3:
reason = _a.sent();
msg = "Qwen error ".concat(res.status || res.statusText, ": ").concat(reason);
error = new types.ChatGPTError(msg, { cause: res });
error.statusCode = res.status;
error.statusText = res.statusText;
return [2 /*return*/, reject(error)];
case 4: return [4 /*yield*/, res.json()];
case 5:
response = _a.sent();
if (this._debug) {
console.log(response);
}
if (response === null || response === void 0 ? void 0 : response.request_id) {
result.id = response.request_id;
}
result.detail = response;
result.text = response.output.text;
return [2 /*return*/, resolve(result)];
case 6:
err_1 = _a.sent();
return [2 /*return*/, reject(err_1)];
case 7: return [2 /*return*/];
}
});
}); }).then(function (message) { return __awaiter(_this, void 0, void 0, function () {
return __generator(this, function (_a) {
return [2 /*return*/, Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(function () { return message; })];
});
}); });
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;
responseP.cancel = function () {
abortController.abort();
};
}
return [2 /*return*/, pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'Qwen timed out waiting for response'
})];
}
else {
return [2 /*return*/, responseP];
}
return [2 /*return*/];
}
});
});
};
Object.defineProperty(QwenApi.prototype, "apiKey", {
get: function () {
return this._apiKey;
},
set: function (apiKey) {
this._apiKey = apiKey;
},
enumerable: false,
configurable: true
});
QwenApi.prototype._buildMessages = function (text, role, opts, completionParams) {
return __awaiter(this, void 0, void 0, function () {
var _a, systemMessage, parentMessageId, userLabel, assistantLabel, maxNumTokens, messages, systemMessageOffset, nextMessages, functionToken, numTokens, prompt_1, nextNumTokensEstimate, _i, nextMessages_1, m1, _b, isValidPrompt, parentMessage, parentMessageRole, maxTokens;
return __generator(this, function (_c) {
switch (_c.label) {
case 0:
_a = opts.systemMessage, systemMessage = _a === void 0 ? this._systemMessage : _a;
parentMessageId = opts.parentMessageId;
userLabel = USER_LABEL_DEFAULT;
assistantLabel = ASSISTANT_LABEL_DEFAULT;
maxNumTokens = 6000;
messages = [];
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
});
}
systemMessageOffset = messages.length;
nextMessages = text
? messages.concat([
{
role: role,
content: text
}
])
: messages;
functionToken = 0;
numTokens = functionToken;
_c.label = 1;
case 1:
prompt_1 = nextMessages
.reduce(function (prompt, message) {
switch (message.role) {
case 'system':
return prompt.concat(["Instructions:\n".concat(message.content)]);
case 'user':
return prompt.concat(["".concat(userLabel, ":\n").concat(message.content)]);
default:
return message.content ? prompt.concat(["".concat(assistantLabel, ":\n").concat(message.content)]) : prompt;
}
}, [])
.join('\n\n');
return [4 /*yield*/, this._getTokenCount(prompt_1)];
case 2:
nextNumTokensEstimate = _c.sent();
_i = 0, nextMessages_1 = nextMessages;
_c.label = 3;
case 3:
if (!(_i < nextMessages_1.length)) return [3 /*break*/, 6];
m1 = nextMessages_1[_i];
_b = nextNumTokensEstimate;
return [4 /*yield*/, this._getTokenCount('')];
case 4:
nextNumTokensEstimate = _b + _c.sent();
_c.label = 5;
case 5:
_i++;
return [3 /*break*/, 3];
case 6:
isValidPrompt = nextNumTokensEstimate + functionToken <= maxNumTokens;
if (prompt_1 && !isValidPrompt) {
return [3 /*break*/, 9];
}
messages = nextMessages;
numTokens = nextNumTokensEstimate + functionToken;
if (!isValidPrompt) {
return [3 /*break*/, 9];
}
if (!parentMessageId) {
return [3 /*break*/, 9];
}
return [4 /*yield*/, this._getMessageById(parentMessageId)];
case 7:
parentMessage = _c.sent();
if (!parentMessage) {
return [3 /*break*/, 9];
}
parentMessageRole = parentMessage.role || 'user';
nextMessages = nextMessages.slice(0, systemMessageOffset).concat(__spreadArray([
{
role: parentMessageRole,
content: parentMessage.text
}
], nextMessages.slice(systemMessageOffset), true));
parentMessageId = parentMessage.parentMessageId;
_c.label = 8;
case 8:
if (true) return [3 /*break*/, 1];
_c.label = 9;
case 9:
maxTokens = Math.max(1, Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens));
return [2 /*return*/, { messages: messages, maxTokens: maxTokens, numTokens: numTokens }];
}
});
});
};
QwenApi.prototype._getTokenCount = function (text) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
if (!text) {
return [2 /*return*/, 0];
}
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '');
return [2 /*return*/, tokenizer.encode(text).length];
});
});
};
QwenApi.prototype._defaultGetMessageById = function (id) {
return __awaiter(this, void 0, void 0, function () {
var res;
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.get(id)];
case 1:
res = _a.sent();
return [2 /*return*/, res];
}
});
});
};
QwenApi.prototype._defaultUpsertMessage = function (message) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.set(message.request_id, message)];
case 1:
_a.sent();
return [2 /*return*/];
}
});
});
};
return QwenApi;
}());
export { QwenApi };

382
utils/alibaba/qwen-api.ts Normal file
View file

@ -0,0 +1,382 @@
import Keyv from 'keyv'
import pTimeout from 'p-timeout'
import QuickLRU from 'quick-lru'
import { v4 as uuidv4 } from 'uuid'
import * as tokenizer from './tokenizer'
import * as types from './types'
import globalFetch from 'node-fetch'
import {qwen, Role} from "./types";
const CHATGPT_MODEL = 'qwen-turbo' // qwen-plus
const USER_LABEL_DEFAULT = 'User'
const ASSISTANT_LABEL_DEFAULT = '同义千问'
export class QwenApi {
protected _apiKey: string
protected _apiBaseUrl: string
protected _debug: boolean
protected _systemMessage: string
protected _completionParams: Omit<
types.qwen.CreateChatCompletionRequest,
'messages' | 'n'
>
protected _maxModelTokens: number
protected _maxResponseTokens: number
protected _fetch: types.FetchFn
protected _getMessageById: types.GetMessageByIdFunction
protected _upsertMessage: types.UpsertMessageFunction
protected _messageStore: Keyv<types.ChatMessage>
/**
* Creates a new client wrapper around Qwen's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param opts
*/
constructor(opts: types.QWenAPIOptions) {
const {
apiKey,
apiBaseUrl = 'https://dashscope.aliyuncs.com/api/v1',
debug = false,
messageStore,
completionParams,
parameters,
systemMessage,
getMessageById,
upsertMessage,
fetch = globalFetch
} = opts
this._apiKey = apiKey
this._apiBaseUrl = apiBaseUrl
this._debug = !!debug
this._fetch = fetch
this._completionParams = {
model: CHATGPT_MODEL,
parameters: {
top_p: 0.5,
top_k: 50,
temperature: 1.0,
seed: 114514,
enable_search: true,
result_format: "text",
incremental_output: false,
...parameters
},
...completionParams
}
this._systemMessage = systemMessage
if (this._systemMessage === undefined) {
const currentDate = new Date().toISOString().split('T')[0]
this._systemMessage = `You are ChatGPT, a large language model trained by Qwen. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ${currentDate}`
}
this._getMessageById = getMessageById ?? this._defaultGetMessageById
this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage
if (messageStore) {
this._messageStore = messageStore
} else {
this._messageStore = new Keyv<types.ChatMessage, any>({
store: new QuickLRU<string, types.ChatMessage>({ maxSize: 10000 })
})
}
if (!this._apiKey) {
throw new Error('Qwen missing required apiKey')
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined')
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function')
}
}
/**
* Sends a message to the Qwen chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the Qwen chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param completionParams - Optional overrides to send to the [Qwen chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
async sendMessage(
text: string,
opts: types.SendMessageOptions = {},
role: Role = 'user',
): Promise<types.ChatMessage> {
const {
parentMessageId,
messageId = uuidv4(),
timeoutMs,
completionParams,
conversationId
} = opts
let { abortSignal } = opts
let abortController: AbortController = null
if (timeoutMs && !abortSignal) {
abortController = new AbortController()
abortSignal = abortController.signal
}
const message: types.ChatMessage = {
role,
id: messageId,
conversationId,
parentMessageId,
text,
}
const latestQuestion = message
const { messages, maxTokens, numTokens } = await this._buildMessages(
text,
role,
opts,
completionParams
)
console.log(`maxTokens: ${maxTokens}, numTokens: ${numTokens}`)
const result: types.ChatMessage = {
role: 'assistant',
id: uuidv4(),
conversationId,
parentMessageId: messageId,
text: undefined,
}
this._completionParams.input = { messages }
const responseP = new Promise<types.ChatMessage>(
async (resolve, reject) => {
const url = `${this._apiBaseUrl}/services/aigc/text-generation/generation`
const headers = {
'Content-Type': 'application/json',
Authorization: `Bearer ${this._apiKey}`
}
const body = {
...this._completionParams,
...completionParams
}
if (this._debug) {
console.log(JSON.stringify(body))
}
if (this._debug) {
console.log(`sendMessage (${numTokens} tokens)`, body)
}
try {
const res = await this._fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: abortSignal
})
if (!res.ok) {
const reason = await res.text()
const msg = `Qwen error ${
res.status || res.statusText
}: ${reason}`
const error = new types.ChatGPTError(msg, { cause: res })
error.statusCode = res.status
error.statusText = res.statusText
return reject(error)
}
const response: types.qwen.CreateChatCompletionResponse =
await res.json()
if (this._debug) {
console.log(response)
}
if (response?.request_id) {
result.id = response.request_id
}
result.detail = response
result.text = response.output.text
return resolve(result)
} catch (err) {
return reject(err)
}
}
).then(async (message) => {
return Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(() => message)
})
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;(responseP as any).cancel = () => {
abortController.abort()
}
}
return pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'Qwen timed out waiting for response'
})
} else {
return responseP
}
}
get apiKey(): string {
return this._apiKey
}
set apiKey(apiKey: string) {
this._apiKey = apiKey
}
protected async _buildMessages(text: string, role: Role, opts: types.SendMessageOptions, completionParams: Partial<
Omit<qwen.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>) {
const { systemMessage = this._systemMessage } = opts
let { parentMessageId } = opts
const userLabel = USER_LABEL_DEFAULT
const assistantLabel = ASSISTANT_LABEL_DEFAULT
// fix number of qwen
const maxNumTokens = 6000
let messages: types.qwen.ChatCompletionRequestMessage[] = []
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
})
}
const systemMessageOffset = messages.length
let nextMessages = text
? messages.concat([
{
role,
content: text
}
])
: messages
let functionToken = 0
let numTokens = functionToken
do {
const prompt = nextMessages
.reduce((prompt, message) => {
switch (message.role) {
case 'system':
return prompt.concat([`Instructions:\n${message.content}`])
case 'user':
return prompt.concat([`${userLabel}:\n${message.content}`])
default:
return message.content ? prompt.concat([`${assistantLabel}:\n${message.content}`]) : prompt
}
}, [] as string[])
.join('\n\n')
let nextNumTokensEstimate = await this._getTokenCount(prompt)
for (const m1 of nextMessages) {
nextNumTokensEstimate += await this._getTokenCount('')
}
const isValidPrompt = nextNumTokensEstimate + functionToken <= maxNumTokens
if (prompt && !isValidPrompt) {
break
}
messages = nextMessages
numTokens = nextNumTokensEstimate + functionToken
if (!isValidPrompt) {
break
}
if (!parentMessageId) {
break
}
const parentMessage = await this._getMessageById(parentMessageId)
if (!parentMessage) {
break
}
const parentMessageRole = parentMessage.role || 'user'
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
{
role: parentMessageRole,
content: parentMessage.text
},
...nextMessages.slice(systemMessageOffset)
])
parentMessageId = parentMessage.parentMessageId
} while (true)
// Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens
// for the response.
const maxTokens = Math.max(
1,
Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)
)
return { messages, maxTokens, numTokens }
}
protected async _getTokenCount(text: string) {
if (!text) {
return 0
}
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '')
return tokenizer.encode(text).length
}
protected async _defaultGetMessageById(
id: string
): Promise<types.ChatMessage> {
const res = await this._messageStore.get(id)
return res
}
protected async _defaultUpsertMessage(
message: types.ChatMessage
): Promise<void> {
await this._messageStore.set(message.request_id, message)
}
}

View file

@ -0,0 +1,6 @@
import { getEncoding } from 'js-tiktoken';
// TODO: make this configurable
var tokenizer = getEncoding('cl100k_base');
export function encode(input) {
return new Uint32Array(tokenizer.encode(input));
}

View file

@ -0,0 +1,8 @@
import { getEncoding } from 'js-tiktoken'
// TODO: make this configurable
const tokenizer = getEncoding('cl100k_base')
export function encode(input: string): Uint32Array {
return new Uint32Array(tokenizer.encode(input))
}

View file

@ -0,0 +1,5 @@
{
"compilerOptions": {
"module": "es2020"
}
}

26
utils/alibaba/types.js Normal file
View file

@ -0,0 +1,26 @@
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
if (typeof b !== "function" && b !== null)
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var ChatGPTError = /** @class */ (function (_super) {
__extends(ChatGPTError, _super);
function ChatGPTError() {
return _super !== null && _super.apply(this, arguments) || this;
}
return ChatGPTError;
}(Error));
export { ChatGPTError };
export var qwen;
(function (qwen) {
})(qwen || (qwen = {}));

313
utils/alibaba/types.ts Normal file
View file

@ -0,0 +1,313 @@
import Keyv from 'keyv'
export type Role = 'user' | 'assistant' | 'system'
export type FetchFn = typeof fetch
export type QWenAPIOptions = {
apiKey: string
/** @defaultValue `'https://dashscope.aliyuncs.com/api/v1'` **/
apiBaseUrl?: string
apiOrg?: string
/** @defaultValue `false` **/
debug?: boolean
completionParams?: Partial<
Omit<qwen.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>
parameters?: qwen.QWenParameters,
systemMessage?: string
messageStore?: Keyv
getMessageById?: GetMessageByIdFunction
upsertMessage?: UpsertMessageFunction
fetch?: FetchFn
}
export type SendMessageOptions = {
/**
* function role name
*/
name?: string
messageId?: string
stream?: boolean
systemMessage?: string
parentMessageId?: string
conversationId?: string
timeoutMs?: number
onProgress?: (partialResponse: ChatMessage) => void
abortSignal?: AbortSignal
completionParams?: Partial<
Omit<qwen.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>
}
export type MessageActionType = 'next' | 'variant'
export type SendMessageBrowserOptions = {
conversationId?: string
parentMessageId?: string
messageId?: string
action?: MessageActionType
timeoutMs?: number
onProgress?: (partialResponse: ChatMessage) => void
abortSignal?: AbortSignal
}
export interface ChatMessage {
id: string
text: string
role: Role
parentMessageId?: string
conversationId?: string
detail?:
| qwen.CreateChatCompletionResponse
| CreateChatCompletionStreamResponse
}
export class ChatGPTError extends Error {
statusCode?: number
statusText?: string
isFinal?: boolean
accountId?: string
}
/** Returns a chat message from a store by it's ID (or null if not found). */
export type GetMessageByIdFunction = (id: string) => Promise<ChatMessage>
/** Upserts a chat message to a store. */
export type UpsertMessageFunction = (message: ChatMessage) => Promise<void>
export interface CreateChatCompletionStreamResponse
extends openai.CreateChatCompletionDeltaResponse {
usage: CreateCompletionStreamResponseUsage
}
export interface CreateCompletionStreamResponseUsage
extends openai.CreateCompletionResponseUsage {
estimated: true
}
/**
* https://chat.openapi.com/backend-api/conversation
*/
export type ConversationJSONBody = {
/**
* The action to take
*/
action: string
/**
* The ID of the conversation
*/
conversation_id?: string
/**
* Prompts to provide
*/
messages: Prompt[]
/**
* The model to use
*/
model: string
/**
* The parent message ID
*/
parent_message_id: string
}
export type Prompt = {
/**
* The content of the prompt
*/
content: PromptContent
/**
* The ID of the prompt
*/
id: string
/**
* The role played in the prompt
*/
role: Role
}
export type ContentType = 'text'
export type PromptContent = {
/**
* The content type of the prompt
*/
content_type: ContentType
/**
* The parts to the prompt
*/
parts: string[]
}
export type ConversationResponseEvent = {
message?: Message
conversation_id?: string
error?: string | null
}
export type Message = {
id: string
content: MessageContent
role: Role
user: string | null
create_time: string | null
update_time: string | null
end_turn: null
weight: number
recipient: string
metadata: MessageMetadata
}
export type MessageContent = {
content_type: string
parts: string[]
}
export type MessageMetadata = any
export namespace qwen {
export interface CreateChatCompletionDeltaResponse {
id: string
object: 'chat.completion.chunk'
created: number
model: string
choices: [
{
delta: {
role: Role
content?: string,
function_call?: {name: string, arguments: string}
}
index: number
finish_reason: string | null
}
]
}
/**
*
* @export
* @interface ChatCompletionRequestMessage
*/
export interface ChatCompletionRequestMessage {
/**
* The role of the author of this message.
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
role: ChatCompletionRequestMessageRoleEnum
/**
* The contents of the message
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
content: string
}
export declare const ChatCompletionRequestMessageRoleEnum: {
readonly System: 'system'
readonly User: 'user'
readonly Assistant: 'assistant'
}
export declare type ChatCompletionRequestMessageRoleEnum =
(typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum]
export interface QWenInput {
messages: Array<ChatCompletionRequestMessage>
}
export interface QWenParameters {
result_format: string
top_p: number
top_k: number
seed: number
temperature: number
enable_search: boolean
incremental_output: boolean
}
/**
*
* @export
* @interface CreateChatCompletionRequest
*/
export interface CreateChatCompletionRequest {
/**
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
* @type {string}
* @memberof CreateChatCompletionRequest
*/
model: string
/**
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
* @type {Array<ChatCompletionRequestMessage>}
* @memberof CreateChatCompletionRequest
*/
input?: QWenInput
parameters: QWenParameters
}
/**
*
* @export
* @interface CreateChatCompletionResponse
*/
export interface CreateChatCompletionResponse {
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
request_id: string
/**
*
* @type {QWenOutput}
* @memberof CreateChatCompletionResponse
*/
output: QWenOutput
/**
*
* @type {CreateCompletionResponseUsage}
* @memberof CreateChatCompletionResponse
*/
usage?: CreateCompletionResponseUsage
}
export interface QWenOutput {
finish_reason: string
text: string
}
/**
*
* @export
* @interface CreateCompletionResponseUsage
*/
export interface CreateCompletionResponseUsage {
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
input_tokens: number
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
output_tokens: number
}
}

71
utils/bilibili/wbi.js Normal file
View file

@ -0,0 +1,71 @@
import md5 from 'md5'
import fetch from 'node-fetch'
const mixinKeyEncTab = [
46, 47, 18, 2, 53, 8, 23, 32, 15, 50, 10, 31, 58, 3, 45, 35, 27, 43, 5, 49,
33, 9, 42, 19, 29, 28, 14, 39, 12, 38, 41, 13, 37, 48, 7, 16, 24, 55, 40,
61, 26, 17, 0, 1, 60, 51, 30, 4, 22, 25, 54, 21, 56, 59, 6, 63, 57, 62, 11,
36, 20, 34, 44, 52
]
// 对 imgKey 和 subKey 进行字符顺序打乱编码
function getMixinKey (orig) {
let temp = ''
mixinKeyEncTab.forEach((n) => {
temp += orig[n]
})
return temp.slice(0, 32)
}
// 为请求参数进行 wbi 签名
function encWbi (params, imgKey, subKey) {
const mixinKey = getMixinKey(imgKey + subKey)
const currTime = Math.round(Date.now() / 1000)
const chrFilter = /[!'()*]/g
let query = []
Object.assign(params, { wts: currTime }) // 添加 wts 字段
// 按照 key 重排参数
Object.keys(params).sort().forEach((key) => {
query.push(
`${encodeURIComponent(key)}=${encodeURIComponent(
// 过滤 value 中的 "!'()*" 字符
params[key].toString().replace(chrFilter, '')
)}`
)
})
query = query.join('&')
const wbiSign = md5(query + mixinKey) // 计算 w_rid
return query + '&w_rid=' + wbiSign
}
// 获取最新的 img_key 和 sub_key
async function getWbiKeys () {
const resp = await fetch('https://api.bilibili.com/x/web-interface/nav')
const jsonContent = resp.data
const imgUrl = jsonContent.data.wbi_img.img_url
const subUrl = jsonContent.data.wbi_img.sub_url
return {
img_key: imgUrl.slice(
imgUrl.lastIndexOf('/') + 1,
imgUrl.lastIndexOf('.')
),
sub_key: subUrl.slice(
subUrl.lastIndexOf('/') + 1,
subUrl.lastIndexOf('.')
)
}
}
// getWbiKeys().then((wbi_keys) => {
// const query = encWbi(
// {
// foo: '114',
// bar: '514',
// baz: 1919810
// },
// wbi_keys.img_key,
// wbi_keys.sub_key
// )
// console.log(query)
// })

34
utils/chat.js Normal file
View file

@ -0,0 +1,34 @@
export async function getChatHistoryGroup (e, num) {
// if (e.adapter === 'shamrock') {
// return await e.group.getChatHistory(0, num, false)
// } else {
let latestChats = await e.group.getChatHistory(0, 1)
if (latestChats.length > 0) {
let latestChat = latestChats[0]
if (latestChat) {
let seq = latestChat.seq || latestChat.message_id
let chats = []
while (chats.length < num) {
let chatHistory = await e.group.getChatHistory(seq, 20)
chats.push(...chatHistory)
seq = chatHistory[0].seq || chatHistory[0].message_id
}
chats = chats.slice(0, num)
try {
let mm = await e.group.getMemberMap()
chats.forEach(chat => {
let sender = mm.get(chat.sender.user_id)
if (sender) {
chat.sender = sender
}
})
} catch (err) {
logger.warn(err)
}
// console.log(chats)
return chats
}
}
// }
return []
}

View file

@ -186,6 +186,7 @@ export class ClaudeAIClient {
} else if (streamDataRes.status === 408) {
throw new Error('claude.ai响应超时可能是回复文本太多请调高超时时间重试')
} else {
logger.error(streamDataRes.status, streamDataRes.body)
throw new Error('unknown error')
}
}

View file

@ -1,24 +1,38 @@
// import { remark } from 'remark'
// import stripMarkdown from 'strip-markdown'
import {exec} from 'child_process'
import { exec } from 'child_process'
import lodash from 'lodash'
import fs from 'node:fs'
import path from 'node:path'
import buffer from 'buffer'
import yaml from 'yaml'
import puppeteer from '../../../lib/puppeteer/puppeteer.js'
import {Config} from './config.js'
import {convertSpeaker, generateVitsAudio, speakers as vitsRoleList} from './tts.js'
import VoiceVoxTTS, {supportConfigurations as voxRoleList} from './tts/voicevox.js'
import AzureTTS, {supportConfigurations as azureRoleList} from './tts/microsoft-azure.js'
import {translate} from './translate.js'
import common from '../../../lib/common/common.js'
import { Config } from './config.js'
import { convertSpeaker, generateVitsAudio, speakers as vitsRoleList } from './tts.js'
import VoiceVoxTTS, { supportConfigurations as voxRoleList } from './tts/voicevox.js'
import AzureTTS, { supportConfigurations as azureRoleList } from './tts/microsoft-azure.js'
import { translate } from './translate.js'
import uploadRecord from './uploadRecord.js'
// export function markdownToText (markdown) {
// return remark()
// .use(stripMarkdown)
// .processSync(markdown ?? '')
// .toString()
// }
import Version from './version.js'
import fetch from 'node-fetch'
let pdfjsLib
try {
pdfjsLib = (await import('pdfjs-dist')).default
} catch (err) {}
let mammoth
try {
mammoth = (await import('mammoth')).default
} catch (err) {}
let XLSX
try {
XLSX = (await import('xlsx')).default
} catch (err) {}
let PPTX
try {
PPTX = (await import('nodejs-pptx')).default
} catch (err) {}
let _puppeteer
try {
@ -59,12 +73,18 @@ export function randomString (length = 5) {
return str.substr(0, length)
}
export async function upsertMessage (message) {
await redis.set(`CHATGPT:MESSAGE:${message.id}`, JSON.stringify(message))
export async function upsertMessage (message, suffix = '') {
if (suffix) {
suffix = '_' + suffix
}
await redis.set(`CHATGPT:MESSAGE${suffix}:${message.id}`, JSON.stringify(message))
}
export async function getMessageById (id) {
let messageStr = await redis.get(`CHATGPT:MESSAGE:${id}`)
export async function getMessageById (id, suffix = '') {
if (suffix) {
suffix = '_' + suffix
}
let messageStr = await redis.get(`CHATGPT:MESSAGE${suffix}:${id}`)
return JSON.parse(messageStr)
}
@ -81,17 +101,20 @@ export async function tryTimes (promiseFn, maxTries = 10) {
}
export async function makeForwardMsg (e, msg = [], dec = '') {
let nickname = Bot.nickname
if (Version.isTrss) {
return common.makeForwardMsg(e, msg, dec)
}
let nickname = e.bot.nickname
if (e.isGroup) {
try {
let info = await Bot.getGroupMemberInfo(e.group_id, Bot.uin)
let info = await e.bot.getGroupMemberInfo(e.group_id, getUin(e))
nickname = info.card || info.nickname
} catch (err) {
console.error(`Failed to get group member info: ${err}`)
}
}
let userInfo = {
user_id: Bot.uin,
user_id: getUin(e),
nickname
}
@ -109,7 +132,7 @@ export async function makeForwardMsg (e, msg = [], dec = '') {
} else if (e.friend) {
forwardMsg = await e.friend.makeForwardMsg(forwardMsg)
} else {
return false
return msg.join('\n')
}
let forwardMsg_json = forwardMsg.data
if (typeof (forwardMsg_json) === 'object') {
@ -127,9 +150,9 @@ export async function makeForwardMsg (e, msg = [], dec = '') {
}
}
forwardMsg.data = forwardMsg.data
.replace(/\n/g, '')
.replace(/<title color="#777777" size="26">(.+?)<\/title>/g, '___')
.replace(/___+/, `<title color="#777777" size="26">${dec}</title>`)
.replace(/\n/g, '')
.replace(/<title color="#777777" size="26">(.+?)<\/title>/g, '___')
.replace(/___+/, `<title color="#777777" size="26">${dec}</title>`)
if (!is_sign) {
forwardMsg.data = forwardMsg.data
.replace('转发的', '不可转发的')
@ -787,7 +810,7 @@ export async function getImageOcrText (e) {
let resultArr = []
let eachImgRes = ''
for (let i in img) {
const imgOCR = await Bot.imageOcr(img[i])
const imgOCR = await e.bot.imageOcr(img[i])
for (let text of imgOCR.wordslist) {
eachImgRes += (`${text?.words} \n`)
}
@ -797,6 +820,7 @@ export async function getImageOcrText (e) {
// logger.warn('resultArr', resultArr)
return resultArr
} catch (err) {
logger.warn('OCR失败可能使用的适配器不支持OCR')
return false
// logger.error(err)
}
@ -809,8 +833,10 @@ export function getMaxModelTokens (model = 'gpt-3.5-turbo') {
if (model.startsWith('gpt-3.5-turbo')) {
if (model.includes('16k')) {
return 16000
} else {
} else if (model.includes('0613') || model.includes('0314')) {
return 4000
} else {
return 16000
}
} else {
if (model.includes('32k')) {
@ -821,6 +847,20 @@ export function getMaxModelTokens (model = 'gpt-3.5-turbo') {
}
}
export function getUin (e) {
if (e?.bot?.uin) return e.bot.uin
if (Array.isArray(Bot.uin)) {
if (Config.trssBotUin && Bot.uin.indexOf(Config.trssBotUin) > -1) { return Config.trssBotUin } else {
Bot.uin.forEach((u) => {
if (Bot[u].self_id) {
return Bot[u].self_id
}
})
return Bot.uin[Bot.uin.length - 1]
}
} else return Bot.uin
}
/**
* 生成当前语音模式下可发送的音频信息
* @param e - 上下文对象
@ -833,6 +873,7 @@ export async function generateAudio (e, pendingText, speakingEmotion, emotionDeg
if (!Config.ttsSpace && !Config.azureTTSKey && !Config.voicevoxSpace) return false
let wav
const speaker = getUserSpeaker(await getUserReplySetting(e))
let ignoreEncode = e.adapter === 'shamrock'
try {
if (Config.ttsMode === 'vits-uma-genshin-honkai' && Config.ttsSpace) {
if (Config.autoJapanese) {
@ -845,7 +886,7 @@ export async function generateAudio (e, pendingText, speakingEmotion, emotionDeg
}
wav = await generateVitsAudio(pendingText, speaker, '中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)')
} else if (Config.ttsMode === 'azure' && Config.azureTTSKey) {
return await generateAzureAudio(pendingText, speaker, speakingEmotion, emotionDegree)
return await generateAzureAudio(pendingText, speaker, speakingEmotion, emotionDegree, ignoreEncode)
} else if (Config.ttsMode === 'voicevox' && Config.voicevoxSpace) {
pendingText = (await translate(pendingText, '日')).replace('\n', '')
wav = await VoiceVoxTTS.generateAudio(pendingText, {
@ -859,7 +900,7 @@ export async function generateAudio (e, pendingText, speakingEmotion, emotionDeg
let sendable
try {
try {
sendable = await uploadRecord(wav, Config.ttsMode)
sendable = await uploadRecord(wav, Config.ttsMode, ignoreEncode)
if (!sendable) {
// 如果合成失败尝试使用ffmpeg合成
sendable = segment.record(wav)
@ -889,9 +930,10 @@ export async function generateAudio (e, pendingText, speakingEmotion, emotionDeg
* @param role - 发言人
* @param speakingEmotion - 发言人情绪
* @param emotionDegree - 发言人情绪强度
* @param ignoreEncode - 不在客户端处理编码
* @returns {Promise<{file: string, type: string}|boolean>}
*/
export async function generateAzureAudio (pendingText, role = '随机', speakingEmotion, emotionDegree = 1) {
export async function generateAzureAudio (pendingText, role = '随机', speakingEmotion, emotionDegree = 1, ignoreEncode = false) {
if (!Config.azureTTSKey) return false
let speaker
try {
@ -911,7 +953,6 @@ export async function generateAzureAudio (pendingText, role = '随机', speaking
let languagePrefix = azureRoleList.find(config => config.code === speaker).languageDetail.charAt(0)
languagePrefix = languagePrefix.startsWith('E') ? '英' : languagePrefix
pendingText = (await translate(pendingText, languagePrefix)).replace('\n', '')
} else {
let role, languagePrefix
role = azureRoleList[Math.floor(Math.random() * azureRoleList.length)]
@ -933,11 +974,13 @@ export async function generateAzureAudio (pendingText, role = '随机', speaking
pendingText,
emotionDegree
})
let record = await AzureTTS.generateAudio(pendingText, {
speaker
}, await ssml)
return await uploadRecord(
await AzureTTS.generateAudio(pendingText, {
speaker
}, await ssml)
, Config.ttsMode
record
, Config.ttsMode,
ignoreEncode
)
} catch (err) {
logger.error(err)
@ -954,3 +997,208 @@ export function getUserSpeaker (userSetting) {
}
}
/**
*
* @param url 要下载的文件链接
* @param destPath 目标路径如received/abc.pdf. 目前如果文件名重复会覆盖
* @param absolute 是否是绝对路径默认为false此时拼接在data/chatgpt下
* @returns {Promise<string>} 最终下载文件的存储位置
*/
export async function downloadFile (url, destPath, absolute = false) {
let response = await fetch(url)
if (!response.ok) {
throw new Error(`download file http error: status: ${response.status}`)
}
let dest = destPath
if (!absolute) {
const _path = process.cwd()
dest = path.join(_path, 'data', 'chatgpt', dest)
const lastLevelDirPath = path.dirname(dest)
mkdirs(lastLevelDirPath)
}
const fileStream = fs.createWriteStream(dest)
await new Promise((resolve, reject) => {
response.body.pipe(fileStream)
response.body.on('error', err => {
reject(err)
})
fileStream.on('finish', function () {
resolve()
})
})
logger.info(`File downloaded successfully! URL: ${url}, Destination: ${dest}`)
return dest
}
export function isPureText (filename) {
const ext = path.extname(filename).toLowerCase()
// List of file extensions that can be treated as pure text
const textFileExtensions = ['.txt', '.log', '.md', '.csv', '.html', '.css', '.js', '.json', '.xml', '.py', '.java', '.cpp', '.c', '.rb', '.php', '.sql', '.sh', '.pl', '.r', '.swift', '.go', '.ts', '.htm', '.yaml', '.yml', '.ini', '.properties', '.tsv']
// File types that require additional processing
const processingExtensions = ['.docx', '.pptx', '.xlsx', '.pdf', '.epub']
if (textFileExtensions.includes(ext)) {
return 'text'
} else if (processingExtensions.includes(ext)) {
// Return the file extension if additional processing is needed
return ext.replace('.', '')
} else {
return false
}
}
/**
* 从文件中提取文本内容
* @param fileMsgElem MessageElem
* @param e
* @returns {Promise<{}>} 提取的文本内容和文件名
*/
export async function extractContentFromFile (fileMsgElem, e) {
logger.info('filename: ' + fileMsgElem.name)
let fileType = isPureText(fileMsgElem.name)
if (fileType) {
// 可读的文件类型
let fileUrl = e.isGroup ? await e.group.getFileUrl(fileMsgElem.fid) : await e.friend.getFileUrl(fileMsgElem.fid)
let filePath = await downloadFile(fileUrl, path.join('received', fileMsgElem.name))
switch (fileType) {
case 'pdf': {
if (!pdfjsLib) {
return {}
}
const data = new Uint8Array(fs.readFileSync(filePath))
let loadingTask = pdfjsLib.getDocument(data)
try {
const pdfDocument = await loadingTask.promise
const numPages = pdfDocument.numPages
let pdfText = ''
// limit pages to prevent OOM or LLM down
let maxPage = 100
// Iterate through each page and extract text
for (let pageNum = 1; pageNum <= Math.min(numPages, maxPage); ++pageNum) {
const page = await pdfDocument.getPage(pageNum)
const textContent = await page.getTextContent()
const pageText = textContent.items.map(item => item.str).join(' ')
pdfText += pageText
}
return {
content: pdfText,
name: fileMsgElem.name
}
} catch (error) {
console.error('Error reading PDF file:', error)
return {}
}
}
case 'doc': {
logger.error('not supported file type now')
return ''
}
case 'docx': {
if (!mammoth) {
return {}
}
try {
const { value } = await mammoth.extractRawText({ path: filePath })
return {
content: value,
name: fileMsgElem.name
}
} catch (error) {
logger.error('Error reading .docx file:', error)
return {}
}
}
case 'xls': {
logger.error('not supported file type now')
return {}
}
case 'xlsx': {
if (!XLSX) {
return {}
}
try {
const workbook = XLSX.readFile(filePath)
const sheetName = workbook.SheetNames[0] // Assuming the first sheet is the one you want to read
const sheet = workbook.Sheets[sheetName]
const data = XLSX.utils.sheet_to_json(sheet, { header: 1 })
// Convert the 2D array to plain text
return {
content: data.map(row => row.join('\t')).join('\n'),
name: fileMsgElem.name
}
} catch (error) {
console.error('Error reading .xlsx file:', error)
return {}
}
}
case 'ppt': {
logger.error('not supported file type now')
return {}
}
case 'pptx': {
if (!PPTX) {
return {}
}
try {
let pptx = new PPTX.Composer()
await pptx.load(filePath)
let presentationContent = []
let slideNumber = 1
let maxSlideNumber = 60
while (slideNumber <= maxSlideNumber) {
let slide
try {
slide = pptx.getSlide(slideNumber)
} catch (error) {
// Slide number out of range, break the loop
break
}
let slideContent = []
// Iterate through slide elements and extract text content
slide.elements.forEach(element => {
if (element.text) {
slideContent.push(element.text)
}
})
// Add slide content to the presentation content array
presentationContent.push(slideContent.join('\n'))
// Move to the next slide
slideNumber++
}
return {
content: presentationContent.join('\n'),
name: fileMsgElem.name
}
} catch (error) {
console.error('Error reading .pptx file:', error)
return {}
}
}
case 'epub': {
logger.error('not supported file type now')
return {}
}
default: {
// text type
const data = fs.readFileSync(filePath)
let text = String(data)
if (text) {
return {
content: text,
name: fileMsgElem.name
}
}
}
}
return {}
}
}

View file

@ -30,11 +30,11 @@ const defaultConfig = {
drawCD: 30,
model: '',
temperature: 0.8,
toneStyle: 'balanced', // or creative, precise
toneStyle: 'Sydney', // or creative, precise
sydney: pureSydneyInstruction,
sydneyReverseProxy: 'https://666102.201666.xyz',
sydneyForceUseReverse: false,
sydneyWebsocketUseProxy: false,
sydneyWebsocketUseProxy: true,
sydneyBrainWash: true,
sydneyBrainWashStrength: 15,
sydneyBrainWashName: 'Sydney',
@ -93,9 +93,10 @@ const defaultConfig = {
groupContextTip: '你看看我们群里的聊天记录吧,回答问题的时候要主动参考我们的聊天记录进行回答或提问。但要看清楚哦,不要把我和其他人弄混啦,也不要把自己看晕啦~~',
groupContextLength: 50,
enableRobotAt: true,
maxNumUserMessagesInConversation: 20,
maxNumUserMessagesInConversation: 30,
sydneyApologyIgnored: true,
enforceMaster: false,
bingAPDraw: false,
serverPort: 3321,
serverHost: '',
viewHost: '',
@ -144,7 +145,7 @@ const defaultConfig = {
serpSource: 'ikechan8370',
extraUrl: 'https://cpe.ikechan8370.com',
smartMode: false,
bingCaptchaOneShotUrl: 'http://bingcaptcha.ikechan8370.com/bing',
bingCaptchaOneShotUrl: '',
// claude2
claudeAIOrganizationId: '',
claudeAISessionKey: '',
@ -152,7 +153,17 @@ const defaultConfig = {
claudeAITimeout: 120,
claudeAIJA3: '772,4865-4866-4867-49195-49199-49196-49200-52393-52392-49171-49172-156-157-47-53,27-5-65281-13-35-0-51-18-16-43-10-45-11-17513-23,29-23-24,0',
claudeAIUA: 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
version: 'v2.7.5'
// trss配置
trssBotUin: '',
// 同义千问
qwenApiKey: '',
qwenModel: 'qwen-turbo',
qwenTopP: 0.5,
qwenTopK: 50,
qwenSeed: 0,
qwenTemperature: 1,
qwenEnableSearch: true,
version: 'v2.7.7'
}
const _path = process.cwd()
let config = {}

View file

@ -1,278 +0,0 @@
import { readFileSync } from 'fs'
import { scrape } from './credential.js'
import fetch from 'node-fetch'
import crypto from 'crypto'
// used when test as a single file
// const _path = process.cwd()
const _path = process.cwd() + '/plugins/chatgpt-plugin/utils/poe'
const gqlDir = `${_path}/graphql`
const queries = {
// chatViewQuery: readFileSync(gqlDir + '/ChatViewQuery.graphql', 'utf8'),
addMessageBreakMutation: readFileSync(gqlDir + '/AddMessageBreakMutation.graphql', 'utf8'),
chatPaginationQuery: readFileSync(gqlDir + '/ChatPaginationQuery.graphql', 'utf8'),
addHumanMessageMutation: readFileSync(gqlDir + '/AddHumanMessageMutation.graphql', 'utf8'),
loginMutation: readFileSync(gqlDir + '/LoginWithVerificationCodeMutation.graphql', 'utf8'),
signUpWithVerificationCodeMutation: readFileSync(gqlDir + '/SignupWithVerificationCodeMutation.graphql', 'utf8'),
sendVerificationCodeMutation: readFileSync(gqlDir + '/SendVerificationCodeForLoginMutation.graphql', 'utf8')
}
const optionMap = [
{ title: 'Claude (Powered by Anthropic)', value: 'a2' },
{ title: 'Sage (Powered by OpenAI - logical)', value: 'capybara' },
{ title: 'Dragonfly (Powered by OpenAI - simpler)', value: 'nutria' },
{ title: 'ChatGPT (Powered by OpenAI - current)', value: 'chinchilla' },
{ title: 'Claude+', value: 'a2_2' },
{ title: 'GPT-4', value: 'beaver' }
]
export class PoeClient {
constructor (props) {
this.config = props
}
headers = {
'Content-Type': 'application/json',
Referrer: 'https://poe.com/',
Origin: 'https://poe.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
}
chatId = 0
bot = ''
reConnectWs = false
async setCredentials () {
let result = await scrape(this.config.quora_cookie)
console.log(result)
this.config.quora_formkey = result.appSettings.formkey
this.config.channel_name = result.channelName
this.config.app_settings = result.appSettings
// set value
this.headers['poe-formkey'] = this.config.quora_formkey
this.headers['poe-tchannel'] = this.config.channel_name
this.headers.Cookie = this.config.quora_cookie
console.log(this.headers)
}
async subscribe () {
const query = {
queryName: 'subscriptionsMutation',
variables: {
subscriptions: [
{
subscriptionName: 'messageAdded',
query: 'subscription subscriptions_messageAdded_Subscription(\n $chatId: BigInt!\n) {\n messageAdded(chatId: $chatId) {\n id\n messageId\n creationTime\n state\n ...ChatMessage_message\n ...chatHelpers_isBotMessage\n }\n}\n\nfragment ChatMessageDownvotedButton_message on Message {\n ...MessageFeedbackReasonModal_message\n ...MessageFeedbackOtherModal_message\n}\n\nfragment ChatMessageDropdownMenu_message on Message {\n id\n messageId\n vote\n text\n ...chatHelpers_isBotMessage\n}\n\nfragment ChatMessageFeedbackButtons_message on Message {\n id\n messageId\n vote\n voteReason\n ...ChatMessageDownvotedButton_message\n}\n\nfragment ChatMessageOverflowButton_message on Message {\n text\n ...ChatMessageDropdownMenu_message\n ...chatHelpers_isBotMessage\n}\n\nfragment ChatMessageSuggestedReplies_SuggestedReplyButton_message on Message {\n messageId\n}\n\nfragment ChatMessageSuggestedReplies_message on Message {\n suggestedReplies\n ...ChatMessageSuggestedReplies_SuggestedReplyButton_message\n}\n\nfragment ChatMessage_message on Message {\n id\n messageId\n text\n author\n linkifiedText\n state\n ...ChatMessageSuggestedReplies_message\n ...ChatMessageFeedbackButtons_message\n ...ChatMessageOverflowButton_message\n ...chatHelpers_isHumanMessage\n ...chatHelpers_isBotMessage\n ...chatHelpers_isChatBreak\n ...chatHelpers_useTimeoutLevel\n ...MarkdownLinkInner_message\n}\n\nfragment MarkdownLinkInner_message on Message {\n messageId\n}\n\nfragment MessageFeedbackOtherModal_message on Message {\n id\n messageId\n}\n\nfragment MessageFeedbackReasonModal_message on Message {\n id\n messageId\n}\n\nfragment chatHelpers_isBotMessage on Message {\n ...chatHelpers_isHumanMessage\n ...chatHelpers_isChatBreak\n}\n\nfragment chatHelpers_isChatBreak on Message {\n author\n}\n\nfragment chatHelpers_isHumanMessage on Message {\n author\n}\n\nfragment chatHelpers_useTimeoutLevel on Message {\n id\n state\n text\n messageId\n}\n'
},
{
subscriptionName: 'viewerStateUpdated',
query: 'subscription subscriptions_viewerStateUpdated_Subscription {\n viewerStateUpdated {\n id\n ...ChatPageBotSwitcher_viewer\n }\n}\n\nfragment BotHeader_bot on Bot {\n displayName\n ...BotImage_bot\n}\n\nfragment BotImage_bot on Bot {\n profilePicture\n displayName\n}\n\nfragment BotLink_bot on Bot {\n displayName\n}\n\nfragment ChatPageBotSwitcher_viewer on Viewer {\n availableBots {\n id\n ...BotLink_bot\n ...BotHeader_bot\n }\n}\n'
}
]
},
query: 'mutation subscriptionsMutation(\n $subscriptions: [AutoSubscriptionQuery!]!\n) {\n autoSubscribe(subscriptions: $subscriptions) {\n viewer {\n id\n }\n }\n}\n'
}
await this.makeRequest(query)
}
async makeRequest (request) {
let payload = JSON.stringify(request)
let baseString = payload + this.headers['poe-formkey'] + 'WpuLMiXEKKE98j56k'
const md5 = crypto.createHash('md5').update(baseString).digest('hex')
const response = await fetch('https://poe.com/api/gql_POST', {
method: 'POST',
headers: Object.assign(this.headers, {
'poe-tag-id': md5,
'content-type': 'application/json'
}),
body: payload
})
let text = await response.text()
try {
let result = JSON.parse(text)
console.log({ result })
return result
} catch (e) {
console.error(text)
throw e
}
}
async getBot (displayName) {
let r
let retry = 10
while (retry >= 0) {
let url = `https://poe.com/_next/data/${this.nextData.buildId}/${displayName}.json`
let r = await fetch(url, {
headers: this.headers
})
let res = await r.text()
try {
let chatData = (JSON.parse(res)).pageProps.payload.chatOfBotDisplayName
return chatData
} catch (e) {
r = res
retry--
}
}
throw new Error(r)
}
async getChatId () {
let r = await fetch('https://poe.com', {
headers: this.headers
})
let text = await r.text()
const jsonRegex = /<script id="__NEXT_DATA__" type="application\/json">(.+?)<\/script>/
const jsonText = text.match(jsonRegex)[1]
const nextData = JSON.parse(jsonText)
this.nextData = nextData
this.viewer = nextData.props.pageProps.payload.viewer
this.formkey = nextData.props.formkey
let bots = this.viewer.availableBots
this.bots = {}
for (let i = 0; i < bots.length; i++) {
let bot = bots[i]
let chatData = await this.getBot(bot.displayName)
this.bots[chatData.defaultBotObject.nickname] = chatData
}
console.log(this.bots)
}
async clearContext (bot) {
try {
const data = await this.makeRequest({
query: `${queries.addMessageBreakMutation}`,
variables: { chatId: this.config.chat_ids[bot] }
})
if (!data.data) {
this.reConnectWs = true // for websocket purpose
console.log('ON TRY! Could not clear context! Trying to reLogin..')
}
return data
} catch (e) {
this.reConnectWs = true // for websocket purpose
console.log('ON CATCH! Could not clear context! Trying to reLogin..')
return e
}
}
async sendMsg (bot, query) {
try {
const data = await this.makeRequest({
query: `${queries.addHumanMessageMutation}`,
variables: {
bot,
chatId: this.bots[bot].chatId,
query,
source: null,
withChatBreak: false
}
})
console.log(data)
if (!data.data) {
this.reConnectWs = true // for cli websocket purpose
console.log('Could not send message! Trying to reLogin..')
}
return data
} catch (e) {
this.reConnectWs = true // for cli websocket purpose
console.error(e)
return e
}
}
async getHistory (bot) {
try {
let response = await this.makeRequest({
query: `${queries.chatPaginationQuery}`,
variables: {
before: null,
bot,
last: 25
}
})
return response.data.chatOfBot.messagesConnection.edges
.map(({ node: { messageId, text, authorNickname } }) => ({
messageId,
text,
authorNickname
}))
} catch (e) {
console.log('There has been an error while fetching your history!')
}
}
async deleteMessages (msgIds) {
await this.makeRequest({
queryName: 'MessageDeleteConfirmationModal_deleteMessageMutation_Mutation',
variables: {
messageIds: msgIds
},
query: 'mutation MessageDeleteConfirmationModal_deleteMessageMutation_Mutation(\n $messageIds: [BigInt!]!\n){\n messagesDelete(messageIds: $messageIds) {\n edgeIds\n }\n}\n'
})
}
async getResponse (bot) {
let text
let state
let authorNickname
try {
while (true) {
await new Promise((resolve) => setTimeout(resolve, 2000))
let response = await this.makeRequest({
query: `${queries.chatPaginationQuery}`,
variables: {
before: null,
bot,
last: 1
}
})
let base = response.data.chatOfBot.messagesConnection.edges
let lastEdgeIndex = base.length - 1
text = base[lastEdgeIndex].node.text
authorNickname = base[lastEdgeIndex].node.authorNickname
state = base[lastEdgeIndex].node.state
if (state === 'complete' && authorNickname === bot) {
break
}
}
} catch (e) {
console.log('Could not get response!')
return {
status: false,
message: 'failed',
data: null
}
}
return {
status: true,
message: 'success',
data: text
}
}
}
async function testPoe () {
// const key = 'deb04db9f2332a3287b7d2545061af62'
// const channel = 'poe-chan55-8888-ujygckefewomybvkqfrp'
const cookie = 'p-b=WSvmyvjHVJoMtQVkirtn-A%3D%3D'
let client = new PoeClient({
// quora_formkey: key,
// channel_name: channel,
quora_cookie: cookie
})
await client.setCredentials()
await client.getChatId()
let ai = 'a2'
await client.sendMsg(ai, '你说话不是很通顺啊')
const response = await client.getResponse(ai)
return response
}
// testPoe().then(res => {
// console.log(res)
// })

View file

@ -17,7 +17,7 @@ export class APTool extends AbstractTool {
func = async function (opts, e) {
let { prompt } = opts
if (e.at === Bot.uin) {
if (e.at === e.bot.uin) {
e.at = null
}
e.atBot = false

View file

@ -28,13 +28,17 @@ export class EditCardTool extends AbstractTool {
qq = isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
groupId = isNaN(groupId) || !groupId ? e.group_id : parseInt(groupId.trim())
let group = await Bot.pickGroup(groupId)
let mm = await group.getMemberMap()
if (!mm.has(qq)) {
return `failed, the user ${qq} is not in group ${groupId}`
}
if (mm.get(Bot.uin).role === 'member') {
return `failed, you, not user, don't have permission to edit card in group ${groupId}`
let group = await e.bot.pickGroup(groupId)
try {
let mm = await group.getMemberMap()
if (!mm.has(qq)) {
return `failed, the user ${qq} is not in group ${groupId}`
}
if (mm.get(e.bot.uin) && mm.get(e.bot.uin).role === 'member') {
return `failed, you, not user, don't have permission to edit card in group ${groupId}`
}
} catch (err) {
logger.error('获取群信息失败,可能使用的底层协议不完善')
}
logger.info('edit card: ', groupId, qq)
await group.setCard(qq, card)

View file

@ -20,7 +20,7 @@ export class EliMovieTool extends AbstractTool {
if (yesOrNo === 'no') {
return 'tell user why you don\'t want to check'
}
if (e.at === Bot.uin) {
if (e.at === e.bot.uin) {
e.at = null
}
e.atBot = false

View file

@ -27,11 +27,11 @@ export class HandleMessageMsgTool extends AbstractTool {
break
}
case 'essence': {
await Bot.setEssenceMessage(messageId)
await e.bot.setEssenceMessage(messageId)
break
}
case 'un-essence': {
await Bot.removeEssenceMessage(messageId)
await e.bot.removeEssenceMessage(messageId)
break
}
}

View file

@ -31,13 +31,13 @@ export class JinyanTool extends AbstractTool {
qq = qq !== 'all'
? isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
: 'all'
let group = await Bot.pickGroup(groupId)
let group = await e.bot.pickGroup(groupId)
if (qq !== 'all') {
let m = await group.getMemberMap()
if (!m.has(qq)) {
return `failed, the user ${qq} is not in group ${groupId}`
}
if (m.get(Bot.uin).role === 'member') {
if (m.get(e.bot.uin).role === 'member') {
return `failed, you, not user, don't have permission to mute other in group ${groupId}`
}
}

View file

@ -30,7 +30,7 @@ export class KickOutTool extends AbstractTool {
return 'the user is not admin, he cannot kickout other people. he should be punished'
}
console.log('kickout', groupId, qq)
let group = await Bot.pickGroup(groupId)
let group = await e.bot.pickGroup(groupId)
await group.kickMember(qq)
if (isPunish === 'true') {
return `the user ${qq} has been kicked out from group ${groupId} as punishment because of his 不正当行为`

View file

@ -24,7 +24,7 @@ export class QueryGenshinTool extends AbstractTool {
func = async function (opts, e) {
let { qq, uid = '', character = '' } = opts
qq = isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
if (e.at === Bot.uin) {
if (e.at === e.bot.uin) {
e.at = null
}
e.atBot = false

View file

@ -24,7 +24,7 @@ export class QueryStarRailTool extends AbstractTool {
func = async function (opts, e) {
let { qq, uid, character } = opts
qq = isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
if (e.at === Bot.uin) {
if (e.at === e.bot.uin) {
e.at = null
}
e.atBot = false

View file

@ -70,7 +70,7 @@ export class SendAudioMessageTool extends AbstractTool {
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
: parseInt(targetGroupIdOrQQNumber) === e.bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
try {
switch (ttsMode) {
case 1:
@ -102,14 +102,19 @@ export class SendAudioMessageTool extends AbstractTool {
return `audio generation failed, error: ${JSON.stringify(err)}`
}
if (sendable) {
let groupList = await Bot.getGroupList()
let groupList
try {
groupList = await e.bot.getGroupList()
} catch (err) {
groupList = e.bot.gl
}
try {
if (groupList.get(target)) {
let group = await Bot.pickGroup(target)
let group = await e.bot.pickGroup(target)
await group.sendMsg(sendable)
return 'audio has been sent to group' + target
} else {
let user = await Bot.pickFriend(target)
let user = await e.bot.pickFriend(target)
await user.sendMsg(sendable)
return 'audio has been sent to user' + target
}

View file

@ -26,12 +26,16 @@ export class SendAvatarTool extends AbstractTool {
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let groupList = await Bot.getGroupList()
: parseInt(targetGroupIdOrQQNumber) === e.bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let groupList
try {
groupList = await e.bot.getGroupList()
} catch (err) {
groupList = e.bot.gl
}
console.log('sendAvatar', target, pictures)
if (groupList.get(target)) {
let group = await Bot.pickGroup(target)
let group = await e.bot.pickGroup(target)
await group.sendMsg(pictures)
}
return `the ${pictures.length > 1 ? 'users: ' + qq + '\'s avatar' : 'avatar'} has been sent to group ${target}`

View file

@ -26,12 +26,12 @@ export class SendVideoTool extends AbstractTool {
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
: parseInt(targetGroupIdOrQQNumber) === e.bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let msg = []
try {
let { arcurl, title, pic, description, videoUrl, headers, bvid, author, play, pubdate, like, honor } = await getBilibili(id)
let group = await Bot.pickGroup(target)
let group = await e.bot.pickGroup(target)
msg.push(title.replace(/(<([^>]+)>)/ig, '') + '\n')
msg.push(`UP主${author} 发布日期:${formatDate(new Date(pubdate * 1000))} 播放量:${play} 点赞:${like}\n`)
msg.push(arcurl + '\n')

View file

@ -23,16 +23,21 @@ export class SendDiceTool extends AbstractTool {
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let groupList = await Bot.getGroupList()
: parseInt(targetGroupIdOrQQNumber) === e.bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let groupList
try {
groupList = await e.bot.getGroupList()
} catch (err) {
groupList = e.bot.gl
}
num = isNaN(num) || !num ? 1 : num > 5 ? 5 : num
if (groupList.get(target)) {
let group = await Bot.pickGroup(target, true)
let group = await e.bot.pickGroup(target, true)
for (let i = 0; i < num; i++) {
await group.sendMsg(segment.dice())
}
} else {
let friend = await Bot.pickFriend(target)
let friend = await e.bot.pickFriend(target)
await friend.sendMsg(segment.dice())
}
if (num === 5) {

View file

@ -23,16 +23,21 @@ export class SendMessageToSpecificGroupOrUserTool extends AbstractTool {
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
: parseInt(targetGroupIdOrQQNumber) === e.bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let groupList = await Bot.getGroupList()
let groupList
try {
groupList = await e.bot.getGroupList()
} catch (err) {
groupList = e.bot.gl
}
try {
if (groupList.get(target)) {
let group = await Bot.pickGroup(target)
let group = await e.bot.pickGroup(target)
await group.sendMsg(await convertFaces(msg, true, e))
return 'msg has been sent to group' + target
} else {
let user = await Bot.pickFriend(target)
let user = await e.bot.pickFriend(target)
await user.sendMsg(msg)
return 'msg has been sent to user' + target
}

View file

@ -23,10 +23,10 @@ export class SendMusicTool extends AbstractTool {
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
: parseInt(targetGroupIdOrQQNumber) === e.bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
try {
let group = await Bot.pickGroup(target)
let group = await e.bot.pickGroup(target)
await group.shareMusic('163', id)
return `the music has been shared to ${target}`
} catch (e) {

View file

@ -22,7 +22,7 @@ export class SendPictureTool extends AbstractTool {
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
: parseInt(targetGroupIdOrQQNumber) === e.bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
// 处理错误url和picture留空的情况
const urlRegex = /(?:(?:https?|ftp):\/\/)?(?:\S+(?::\S*)?@)?(?:((?:(?:[a-z0-9\u00a1-\u4dff\u9fd0-\uffff][a-z0-9\u00a1-\u4dff\u9fd0-\uffff_-]{0,62})?[a-z0-9\u00a1-\u4dff\u9fd0-\uffff]\.)+(?:[a-z\u00a1-\u4dff\u9fd0-\uffff]{2,}\.?))(?::\d{2,5})?)(?:\/[\w\u00a1-\u4dff\u9fd0-\uffff$-_.+!*'(),%]+)*(?:\?(?:[\w\u00a1-\u4dff\u9fd0-\uffff$-_.+!*(),%:@&=]|(?:[\[\]])|(?:[\u00a1-\u4dff\u9fd0-\uffff]))*)?(?:#(?:[\w\u00a1-\u4dff\u9fd0-\uffff$-_.+!*'(),;:@&=]|(?:[\[\]]))*)?\/?/i
if (/https:\/\/example.com/.test(urlOfPicture) || !urlOfPicture || !urlRegex.test(urlOfPicture)) urlOfPicture = ''
@ -32,14 +32,19 @@ export class SendPictureTool extends AbstractTool {
let pictures = urlOfPicture.trim().split(' ')
logger.mark('pictures to send: ', pictures)
pictures = pictures.map(img => segment.image(img))
let groupList = await Bot.getGroupList()
let groupList
try {
groupList = await e.bot.getGroupList()
} catch (err) {
groupList = e.bot.gl
}
try {
if (groupList.get(target)) {
let group = await Bot.pickGroup(target)
let group = await e.bot.pickGroup(target)
await group.sendMsg(pictures)
return 'picture has been sent to group' + target
} else {
let user = await Bot.pickFriend(target)
let user = await e.bot.pickFriend(target)
await user.sendMsg(pictures)
return 'picture has been sent to user' + target
}

View file

@ -19,13 +19,18 @@ export class SendRPSTool extends AbstractTool {
const defaultTarget = e.isGroup ? e.group_id : e.sender.user_id
const target = isNaN(targetGroupIdOrQQNumber) || !targetGroupIdOrQQNumber
? defaultTarget
: parseInt(targetGroupIdOrQQNumber) === Bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let groupList = await Bot.getGroupList()
: parseInt(targetGroupIdOrQQNumber) === e.bot.uin ? defaultTarget : parseInt(targetGroupIdOrQQNumber)
let groupList
try {
groupList = await e.bot.getGroupList()
} catch (err) {
groupList = e.bot.gl
}
if (groupList.get(target)) {
let group = await Bot.pickGroup(target, true)
let group = await e.bot.pickGroup(target, true)
await group.sendMsg(segment.rps(num))
} else {
let friend = await Bot.pickFriend(target)
let friend = await e.bot.pickFriend(target)
await friend.sendMsg(segment.rps(num))
}
}

View file

@ -28,12 +28,12 @@ export class SetTitleTool extends AbstractTool {
qq = isNaN(qq) || !qq ? e.sender.user_id : parseInt(qq.trim())
groupId = isNaN(groupId) || !groupId ? e.group_id : parseInt(groupId.trim())
let group = await Bot.pickGroup(groupId)
let group = await e.bot.pickGroup(groupId)
let mm = await group.getMemberMap()
if (!mm.has(qq)) {
return `failed, the user ${qq} is not in group ${groupId}`
}
if (mm.get(Bot.uin).role !== 'owner') {
if (mm.get(e.bot.uin).role !== 'owner') {
return 'on group owner can give title'
}
logger.info('edit card: ', groupId, qq)

View file

@ -2,14 +2,8 @@ import { Config } from './config.js'
import fetch from 'node-fetch'
import _ from 'lodash'
import { wrapTextByLanguage } from './common.js'
let proxy
if (Config.proxy) {
try {
proxy = (await import('https-proxy-agent')).default
} catch (e) {
console.warn('未安装https-proxy-agent请在插件目录下执行pnpm add https-proxy-agent')
}
}
import { getProxy } from './proxy.js'
let proxy = getProxy()
const newFetch = (url, options = {}) => {
const defaultOptions = Config.proxy

View file

@ -9,7 +9,7 @@ import crypto from 'crypto'
import child_process from 'child_process'
import { Config } from './config.js'
import path from 'path'
import { mkdirs } from './common.js'
import { mkdirs, getUin } from './common.js'
let module
try {
module = await import('oicq')
@ -41,7 +41,7 @@ if (module) {
// import { pcm2slk } from 'node-silk'
let errors = {}
async function uploadRecord (recordUrl, ttsMode = 'vits-uma-genshin-honkai') {
async function uploadRecord (recordUrl, ttsMode = 'vits-uma-genshin-honkai', ignoreEncode = false) {
let recordType = 'url'
let tmpFile = ''
if (ttsMode === 'azure') {
@ -50,6 +50,9 @@ async function uploadRecord (recordUrl, ttsMode = 'vits-uma-genshin-honkai') {
recordType = 'buffer'
tmpFile = `data/chatgpt/tts/tmp/${crypto.randomUUID()}.wav`
}
if (ignoreEncode) {
return segment.record(recordUrl)
}
let result
if (Config.ttsHD) {
result = await getPttBuffer(recordUrl, Bot.config.ffmpeg_path, false)
@ -144,7 +147,7 @@ async function uploadRecord (recordUrl, ttsMode = 'vits-uma-genshin-honkai') {
2: 3,
5: {
1: Contactable.target,
2: Bot.uin,
2: getUin(),
3: 0,
4: hash,
5: buf.length,
@ -188,7 +191,7 @@ async function uploadRecord (recordUrl, ttsMode = 'vits-uma-genshin-honkai') {
const fid = rsp[11].toBuffer()
const b = core.pb.encode({
1: 4,
2: Bot.uin,
2: getUin(),
3: fid,
4: hash,
5: hash.toString('hex') + '.amr',

33
utils/version.js Normal file
View file

@ -0,0 +1,33 @@
import fs from 'fs'
/**
* from miao-plugin
*
* @type {any}
*/
let packageJson = JSON.parse(fs.readFileSync('package.json', 'utf8'))
const yunzaiVersion = packageJson.version
const isV3 = yunzaiVersion[0] === '3'
let isMiao = false; let isTrss = false
let name = 'Yunzai-Bot'
if (packageJson.name === 'miao-yunzai') {
isMiao = true
name = 'Miao-Yunzai'
} else if (packageJson.name === 'trss-yunzai') {
isMiao = true
isTrss = true
name = 'TRSS-Yunzai'
}
let Version = {
isV3,
isMiao,
isTrss,
name,
get yunzai () {
return yunzaiVersion
}
}
export default Version

View file

@ -10,11 +10,11 @@ try {
}
export class Tokenizer {
async getHistory (groupId, date = new Date(), duration = 0) {
async getHistory (e, groupId, date = new Date(), duration = 0, userId) {
if (!groupId) {
throw new Error('no valid group id')
}
let group = Bot.pickGroup(groupId, true)
let group = e.bot.pickGroup(groupId, true)
let latestChat = await group.getChatHistory(0, 1)
let seq = latestChat[0].seq
let chats = latestChat
@ -41,14 +41,15 @@ export class Tokenizer {
let startOfSpecifiedDate = date.getTime()
// if duration > 0, go back to the specified number of hours
if (duration > 0) {
// duration should be in range [0, 24]
duration = Math.min(duration, 24)
startOfSpecifiedDate = currentTime - (duration * 60 * 60 * 1000)
// duration should be in range [0, 24]
// duration = Math.min(duration, 24)
startOfSpecifiedDate = currentTime - (duration * 60 * 60 * 1000)
}
// Step 4: Get the end of the specified date by adding 24 hours (in milliseconds)
const endOfSpecifiedDate = startOfSpecifiedDate + (24 * 60 * 60 * 1000)
while (isTimestampInDateRange(chats[0]?.time, startOfSpecifiedDate, endOfSpecifiedDate) && isTimestampInDateRange(chats[chats.length - 1]?.time, startOfSpecifiedDate, endOfSpecifiedDate)) {
// Step 4: Get the end of the specified date by current time
const endOfSpecifiedDate = currentTime
while (isTimestampInDateRange(chats[0]?.time, startOfSpecifiedDate, endOfSpecifiedDate) &&
isTimestampInDateRange(chats[chats.length - 1]?.time, startOfSpecifiedDate, endOfSpecifiedDate)) {
let chatHistory = await group.getChatHistory(seq, 20)
if (chatHistory.length === 1) {
if (chats[0].seq === chatHistory[0].seq) {
@ -58,45 +59,51 @@ export class Tokenizer {
}
chats.push(...chatHistory)
chats.sort(compareByTime)
seq = chatHistory[0].seq
seq = chatHistory?.[0]?.seq
if (!seq) {
break
}
if (Config.debug) {
logger.info(`拉取到${chatHistory.length}条聊天记录,当前已累计获取${chats.length}条聊天记录,继续拉...`)
}
}
chats = chats.filter(chat => isTimestampInDateRange(chat.time, startOfSpecifiedDate, endOfSpecifiedDate))
if (userId) {
chats = chats.filter(chat => chat.sender.user_id === userId)
}
return chats
}
async getKeywordTopK (groupId, topK = 100, duration = 0) {
async getKeywordTopK (e, groupId, topK = 100, duration = 0, userId) {
if (!nodejieba) {
throw new Error('未安装node-rs/jieba娱乐功能-词云统计不可用')
}
// duration represents the number of hours to go back, should in range [0, 24]
let chats = await this.getHistory(groupId, new Date(), duration)
let duration_str = duration > 0 ? `${duration}小时` : '今日'
logger.mark(`聊天记录拉取完成,获取到${duration_str}${chats.length}条聊天记录,准备分词中`)
let chats = await this.getHistory(e, groupId, new Date(), duration, userId)
let durationStr = duration > 0 ? `${duration}小时` : '今日'
logger.mark(`聊天记录拉取完成,获取到${durationStr}${chats.length}条聊天记录,准备分词中`)
const _path = process.cwd()
let stopWordsPath = `${_path}/plugins/chatgpt-plugin/utils/wordcloud/cn_stopwords.txt`
const data = fs.readFileSync(stopWordsPath)
const stopWords = String(data)?.split('\n') || []
let chatContent = chats
.map(c => c.message
//只统计文本内容
.filter(item => item.type == 'text')
.map(textItem => `${textItem.text}`)
.join("").trim()
// 只统计文本内容
.filter(item => item.type == 'text')
.map(textItem => `${textItem.text}`)
.join('').trim()
)
.map(c => {
let length = c.length
let threshold = 10
if (length < 100 && length > 50) {
threshold = 6
} else if (length <= 50 && length > 25) {
threshold = 3
} else if (length <= 25) {
threshold = 2
}
// let length = c.length
let threshold = 2
// if (length < 100 && length > 50) {
// threshold = 6
// } else if (length <= 50 && length > 25) {
// threshold = 3
// } else if (length <= 25) {
// threshold = 2
// }
return nodejieba.extract(c, threshold)
})
.reduce((acc, curr) => acc.concat(curr), [])
@ -132,6 +139,85 @@ export class Tokenizer {
}
}
export class ShamrockTokenizer extends Tokenizer {
async getHistory (e, groupId, date = new Date(), duration = 0, userId) {
logger.mark('当前使用Shamrock适配器')
if (!groupId) {
throw new Error('no valid group id')
}
let group = e.bot.pickGroup(groupId, true)
// 直接加大力度
let pageSize = 500
let chats = (await group.getChatHistory(0, pageSize, false)) || []
// Get the current timestamp
let currentTime = date.getTime()
// Step 2: Set the hours, minutes, seconds, and milliseconds to 0
date.setHours(0, 0, 0, 0)
// Step 3: Calculate the timestamp representing the start of the specified date
// duration represents the number of hours to go back
// if duration is 0, keeping the original date (start of today)
let startOfSpecifiedDate = date.getTime()
// if duration > 0, go back to the specified number of hours
if (duration > 0) {
// duration should be in range [0, 24]
// duration = Math.min(duration, 24)
startOfSpecifiedDate = currentTime - (duration * 60 * 60 * 1000)
}
// Step 4: Get the end of the specified date by currentTime
const endOfSpecifiedDate = currentTime
let cursor = chats.length
// -------------------------------------------------------
// | | |
// -------------------------------------------------------
// ^ ^
// long ago cursor+pageSize cursor current
while (isTimestampInDateRange(chats[0]?.time, startOfSpecifiedDate, endOfSpecifiedDate)) {
// 由于Shamrock消息是从最新的开始拉结束时由于动态更新一旦有人发送消息就会立刻停止所以不判断结束时间
// 拉到后面会巨卡所以增大page减少次数
pageSize = Math.floor(Math.max(cursor / 2, pageSize))
cursor = cursor + pageSize
let retries = 3
let chatHistory
while (retries >= 0) {
try {
chatHistory = await group.getChatHistory(0, cursor, false)
break
} catch (err) {
if (retries === 0) {
logger.error(err)
}
retries--
}
}
if (retries < 0) {
logger.warn('拉不动了,就这样吧')
break
}
if (chatHistory.length === 1) {
break
}
if (chatHistory.length === chats.length) {
// 没有了!再拉也没有了
break
}
let oldLength = chats.length
chats = chatHistory
// chats.sort(compareByTime)
if (Config.debug) {
logger.info(`拉取到${chats.length - oldLength}条聊天记录,当前已累计获取${chats.length}条聊天记录,继续拉...`)
}
}
chats = chats.filter(chat => isTimestampInDateRange(chat.time, startOfSpecifiedDate, endOfSpecifiedDate))
if (userId) {
chats = chats.filter(chat => chat.sender.user_id === userId)
}
return chats
}
}
function isTimestampInDateRange (timestamp, startOfSpecifiedDate, endOfSpecifiedDate) {
if (!timestamp) {
return false

View file

@ -1,11 +1,19 @@
import { Tokenizer } from './tokenizer.js'
import { ShamrockTokenizer, Tokenizer } from './tokenizer.js'
import { render } from '../common.js'
export async function makeWordcloud (e, groupId, duration = 0) {
let tokenizer = new Tokenizer()
let topK = await tokenizer.getKeywordTopK(groupId, 100, duration)
export async function makeWordcloud (e, groupId, duration = 0, userId) {
let tokenizer = getTokenizer(e)
let topK = await tokenizer.getKeywordTopK(e, groupId, 100, duration, userId)
let list = JSON.stringify(topK)
// let list = topK
console.log(list)
await render(e, 'chatgpt-plugin', 'wordcloud/index', { list })
logger.info(list)
let img = await render(e, 'chatgpt-plugin', 'wordcloud/index', { list }, { retType: 'base64' })
await e.reply(img, true)
}
function getTokenizer (e) {
if (e.adapter === 'shamrock') {
return new ShamrockTokenizer()
} else {
return new Tokenizer()
}
}

View file

@ -3,6 +3,7 @@ import { Config } from '../config.js'
import { createParser } from 'eventsource-parser'
import https from 'https'
import WebSocket from 'ws'
import { createHmac } from 'crypto'
const referer = atob('aHR0cHM6Ly94aW5naHVvLnhmeXVuLmNuL2NoYXQ/aWQ9')
const origin = atob('aHR0cHM6Ly94aW5naHVvLnhmeXVuLmNu')
@ -14,13 +15,7 @@ try {
} catch (err) {
logger.warn('未安装form-data无法使用星火模式')
}
let crypto
try {
crypto = (await import('crypto')).default
} catch (err) {
logger.warn('未安装crypto无法使用星火api模式')
}
async function getKeyv() {
async function getKeyv () {
let Keyv
try {
Keyv = (await import('keyv')).default
@ -30,7 +25,7 @@ async function getKeyv() {
return Keyv
}
export default class XinghuoClient {
constructor(opts) {
constructor (opts) {
this.cache = opts.cache
this.ssoSessionId = opts.ssoSessionId
this.headers = {
@ -41,7 +36,7 @@ export default class XinghuoClient {
}
}
apiErrorInfo(code) {
apiErrorInfo (code) {
switch (code) {
case 10000: return '升级为ws出现错误'
case 10001: return '通过ws读取用户的消息出错'
@ -74,7 +69,7 @@ export default class XinghuoClient {
}
}
async initCache() {
async initCache () {
if (!this.conversationsCache) {
const cacheOptions = this.cache || {}
cacheOptions.namespace = cacheOptions.namespace || 'xh'
@ -83,36 +78,37 @@ export default class XinghuoClient {
}
}
async getWsUrl() {
if (!crypto) return false
async getWsUrl () {
const APISecret = Config.xhAPISecret
const APIKey = Config.xhAPIKey
let APILink = '/v1.1/chat'
if (Config.xhmode == 'apiv2') {
if (Config.xhmode === 'apiv2') {
APILink = '/v2.1/chat'
} else if (Config.xhmode === 'apiv3') {
APILink = '/v3.1/chat'
}
const date = new Date().toGMTString()
const algorithm = 'hmac-sha256'
const headers = 'host date request-line'
const signatureOrigin = `host: spark-api.xf-yun.com\ndate: ${date}\nGET ${APILink} HTTP/1.1`
const hmac = crypto.createHmac('sha256', APISecret)
const hmac = createHmac('sha256', APISecret)
hmac.update(signatureOrigin)
const signature = hmac.digest('base64')
const authorizationOrigin = `api_key="${APIKey}", algorithm="${algorithm}", headers="${headers}", signature="${signature}"`
const authorization = Buffer.from(authorizationOrigin).toString('base64')
const v = {
authorization: authorization,
date: date,
host: "spark-api.xf-yun.com"
authorization,
date,
host: 'spark-api.xf-yun.com'
}
const url = `wss://spark-api.xf-yun.com${APILink}?${Object.keys(v).map(key => `${key}=${v[key]}`).join('&')}`
return url
}
async uploadImage(url) {
async uploadImage (url) {
// 获取图片
let response = await fetch(url, {
method: 'GET',
method: 'GET'
})
const blob = await response.blob()
const arrayBuffer = await blob.arrayBuffer()
@ -123,7 +119,7 @@ export default class XinghuoClient {
const respOss = await fetch('https://xinghuo.xfyun.cn/iflygpt/oss/sign', {
method: 'POST',
headers: {
Cookie: 'ssoSessionId=' + this.ssoSessionId + ';',
Cookie: 'ssoSessionId=' + this.ssoSessionId + ';'
},
body: formData
})
@ -165,7 +161,7 @@ export default class XinghuoClient {
}
}
async apiMessage(prompt, chatId, ePrompt = []) {
async apiMessage (prompt, chatId, ePrompt = []) {
if (!chatId) chatId = (Math.floor(Math.random() * 1000000) + 100000).toString()
// 初始化缓存
@ -178,8 +174,9 @@ export default class XinghuoClient {
// 获取ws链接
const wsUrl = Config.xhmode == 'assistants' ? Config.xhAssistants : await this.getWsUrl()
if (!wsUrl) throw new Error('缺少依赖crypto。请安装依赖后重试')
if (!wsUrl) throw new Error('获取ws链接失败')
let domain = 'general'
if (Config.xhmode == 'apiv2') { domain = 'generalv2' } else if (Config.xhmode == 'apiv3') { domain = 'generalv3' }
// 编写消息内容
const wsSendData = {
header: {
@ -188,7 +185,7 @@ export default class XinghuoClient {
},
parameter: {
chat: {
domain: Config.xhmode == 'api' ? "general" : "generalv2",
domain,
temperature: Config.xhTemperature, // 核采样阈值
max_tokens: Config.xhMaxTokens, // tokens最大长度
chat_id: chatId,
@ -197,10 +194,10 @@ export default class XinghuoClient {
},
payload: {
message: {
"text": [
text: [
...ePrompt,
...conversation.messages,
{ "role": "user", "content": prompt }
{ role: 'user', content: prompt }
]
}
}
@ -223,8 +220,8 @@ export default class XinghuoClient {
const half = Math.floor(conversation.messages.length / 2)
conversation.messages.splice(0, half)
await this.conversationsCache.set(conversationKey, conversation)
resolve({
id: (Math.floor(Math.random() * 1000000) + 100000).toString() ,
resolve({
id: (Math.floor(Math.random() * 1000000) + 100000).toString(),
response: '对话以达到上限,已自动清理对话,请重试'
})
} else {
@ -250,8 +247,8 @@ export default class XinghuoClient {
conversation.messages.splice(0, half)
}
await this.conversationsCache.set(conversationKey, conversation)
resolve({
id: chatId ,
resolve({
id: chatId,
response: resMessage
})
}
@ -265,7 +262,7 @@ export default class XinghuoClient {
})
}
async webMessage(prompt, chatId, botId) {
async webMessage (prompt, chatId, botId) {
if (!FormData) {
throw new Error('缺少依赖form-data。请安装依赖后重试')
}
@ -275,7 +272,7 @@ export default class XinghuoClient {
formData.append('clientType', '2')
formData.append('chatId', chatId)
if (prompt.image) {
prompt.text = prompt.text.replace("[图片]", "") // 清理消息中中首个被使用的图片
prompt.text = prompt.text.replace('[图片]', '') // 清理消息中中首个被使用的图片
const imgdata = await this.uploadImage(prompt.image)
if (imgdata) {
formData.append('fileUrl', imgdata.url)
@ -307,7 +304,7 @@ export default class XinghuoClient {
logger.error('星火statusCode' + statusCode)
}
let response = ''
function onMessage(data) {
function onMessage (data) {
// console.log(data)
if (data === '<end>') {
return resolve({
@ -374,11 +371,11 @@ export default class XinghuoClient {
})
}
async sendMessage(prompt, option) {
async sendMessage (prompt, option) {
let chatId = option?.chatId
let image = option?.image
if (Config.xhmode == 'api' || Config.xhmode == 'apiv2' || Config.xhmode == 'assistants') {
if (Config.xhmode == 'api' || Config.xhmode == 'apiv2' || Config.xhmode == 'apiv3' || Config.xhmode == 'assistants') {
if (!Config.xhAppId || !Config.xhAPISecret || !Config.xhAPIKey) throw new Error('未配置api')
let Prompt = []
// 设定
@ -390,9 +387,9 @@ export default class XinghuoClient {
logger.warn('星火设定序列化失败,本次对话不附带设定')
}
} else {
Prompt = Config.xhPrompt ? [{ "role": "user", "content": Config.xhPrompt }] : []
Prompt = Config.xhPrompt ? [{ role: 'user', content: Config.xhPrompt }] : []
}
if(Config.xhPromptEval) {
if (Config.xhPromptEval) {
Prompt.forEach(obj => {
try {
obj.content = obj.content.replace(/{{(.*?)}}/g, (match, variable) => {
@ -421,7 +418,7 @@ export default class XinghuoClient {
if (!chatId) {
chatId = (await this.createChatList()).chatListId
}
let { response } = await this.webMessage({ text: prompt, image: image }, chatId, botId)
let { response } = await this.webMessage({ text: prompt, image }, chatId, botId)
// logger.info(response)
// let responseText = atob(response)
// 处理图片
@ -445,14 +442,14 @@ export default class XinghuoClient {
return {
conversationId: chatId,
text: response,
images: images
images
}
} else {
throw new Error('星火模式错误')
}
}
async createChatList(bot = false) {
async createChatList (bot = false) {
let createChatListRes = await fetch(createChatUrl, {
method: 'POST',
headers: Object.assign(this.headers, {
@ -481,6 +478,6 @@ export default class XinghuoClient {
}
}
function atob(s) {
function atob (s) {
return Buffer.from(s, 'base64').toString()
}