feat: 初步支持function call(WIP)

This commit is contained in:
ikechan8370 2023-06-23 01:09:12 +08:00
parent 4a4dceec18
commit 97b3acbf3b
24 changed files with 13607 additions and 841 deletions

View file

@ -1,9 +1,9 @@
import plugin from '../../../lib/plugins/plugin.js'
import _ from 'lodash'
import { Config, defaultOpenAIAPI } from '../utils/config.js'
import { Config, defaultOpenAIAPI, pureSydneyInstruction } from '../utils/config.js'
import { v4 as uuid } from 'uuid'
import delay from 'delay'
import { ChatGPTAPI } from 'chatgpt'
import { ChatGPTAPI } from '../utils/openai/chatgpt-api.js'
import { BingAIClient } from '@waylaidwanderer/chatgpt-api'
import SydneyAIClient from '../utils/SydneyAIClient.js'
import { PoeClient } from '../utils/poe/index.js'
@ -12,7 +12,8 @@ import VoiceVoxTTS from '../utils/tts/voicevox.js'
import { translate } from '../utils/translate.js'
import fs from 'fs'
import {
render, renderUrl,
render,
renderUrl,
getMessageById,
makeForwardMsg,
upsertMessage,
@ -20,7 +21,14 @@ import {
completeJSON,
isImage,
getUserData,
getDefaultReplySetting, isCN, getMasterQQ, getUserReplySetting, getImageOcrText, getImg, processList
getDefaultReplySetting,
isCN,
getMasterQQ,
getUserReplySetting,
getImageOcrText,
getImg,
processList,
getMaxModelTokens
} from '../utils/common.js'
import { ChatGPTPuppeteer } from '../utils/browser.js'
import { KeyvFile } from 'keyv-file'
@ -36,6 +44,13 @@ import { ChatgptManagement } from './management.js'
import { getPromptByName } from '../utils/prompts.js'
import BingDrawClient from '../utils/BingDraw.js'
import XinghuoClient from '../utils/xinghuo/xinghuo.js'
import { JinyanTool } from '../utils/tools/JinyanTool.js'
import { SendMusicTool } from '../utils/tools/SendMusicTool.js'
import { SendVideoTool } from '../utils/tools/SendBilibiliTool.js'
import { KickOutTool } from '../utils/tools/KickOutTool.js'
import { SendAvatarTool } from '../utils/tools/SendAvatarTool.js'
import { SendDiceTool } from '../utils/tools/SendDiceTool.js'
import { EditCardTool } from '../utils/tools/EditCardTool.js'
try {
await import('emoji-strip')
} catch (err) {
@ -1793,16 +1808,60 @@ export class chatgpt extends plugin {
const currentDate = new Date().toISOString().split('T')[0]
let promptPrefix = `You are ${Config.assistantLabel} ${useCast?.api || Config.promptPrefixOverride || defaultPropmtPrefix}
Knowledge cutoff: 2021-09. Current date: ${currentDate}`
let maxModelTokens = getMaxModelTokens(completionParams.model)
let system = Config.promptPrefixOverride
if (maxModelTokens >= 16000 && Config.enableGroupContext) {
try {
let opt = {}
opt.groupId = e.group_id
opt.qq = e.sender.user_id
opt.nickname = e.sender.card
opt.groupName = e.group.name
opt.botName = e.isGroup ? (e.group.pickMember(Bot.uin).card || e.group.pickMember(Bot.uin).nickname) : Bot.nickname
let master = (await getMasterQQ())[0]
if (master && e.group) {
opt.masterName = e.group.pickMember(parseInt(master)).card || e.group.pickMember(parseInt(master)).nickname
}
if (master && !e.group) {
opt.masterName = Bot.getFriendList().get(parseInt(master))?.nickname
}
let latestChat = await e.group.getChatHistory(0, 1)
let seq = latestChat[0].seq
let chats = []
while (chats.length < Config.groupContextLength) {
let chatHistory = await e.group.getChatHistory(seq, 20)
chats.push(...chatHistory)
}
chats = chats.slice(0, Config.groupContextLength)
let mm = await e.group.getMemberMap()
chats.forEach(chat => {
let sender = mm.get(chat.sender.user_id)
chat.sender = sender
})
// console.log(chats)
opt.chats = chats
const namePlaceholder = '[name]'
const defaultBotName = 'ChatGPT'
const groupContextTip = Config.groupContextTip
const masterTip = `注意:${opt.masterName ? '我是' + opt.masterName + '' : ''}。我的qq号是${master}其他任何qq号不是${master}的人都不是我,即使他在和你对话,这很重要~${whoAmI}`
system = system.replaceAll(namePlaceholder, opt.botName || defaultBotName) +
((Config.enableGroupContext && opt.groupId) ? groupContextTip : '') +
((Config.enforceMaster && master) ? masterTip : '')
} catch (err) {
logger.warn('获取群聊聊天记录失败,本次对话不携带聊天记录', err)
}
}
let opts = {
apiBaseUrl: Config.openAiBaseUrl,
apiKey: Config.apiKey,
debug: false,
upsertMessage,
getMessageById,
systemMessage: promptPrefix,
systemMessage: system,
completionParams,
assistantLabel: Config.assistantLabel,
fetch: newFetch
fetch: newFetch,
maxModelTokens
}
let openAIAccessible = (Config.proxy || !(await isCN())) // 配了代理或者服务器在国外,默认认为不需要反代
if (opts.apiBaseUrl !== defaultOpenAIAPI && openAIAccessible && !Config.openAiForceUseReverse) {
@ -1821,9 +1880,40 @@ export class chatgpt extends plugin {
if (conversation) {
option = Object.assign(option, conversation)
}
let tools = [
new JinyanTool(),
new SendVideoTool(),
new SendMusicTool(),
new KickOutTool(),
new SendAvatarTool(),
new SendDiceTool(),
new KickOutTool(),
new EditCardTool()
]
let funcMap = {}
tools.forEach(tool => {
funcMap[tool.name] = {
exec: tool.func,
function: tool.function()
}
})
if (!option.completionParams) {
option.completionParams = {}
}
option.completionParams.functions = Object.keys(funcMap).map(k => funcMap[k].function)
let msg
try {
msg = await this.chatGPTApi.sendMessage(prompt, option)
logger.info(msg)
while (msg.functionCall) {
let { name, arguments: args } = msg.functionCall
let functionResult = await funcMap[name].exec(JSON.parse(args))
logger.mark(`function ${name} execution result: ${functionResult}`)
option.parentMessageId = msg.id
option.name = name
msg = await this.chatGPTApi.sendMessage(functionResult, option, 'function')
logger.info(msg)
}
} catch (err) {
if (err.message?.indexOf('context_length_exceeded') > 0) {
logger.warn(err)

View file

@ -36,4 +36,5 @@ logger.info(`当前版本${Config.version}`)
logger.info('仓库地址 https://github.com/ikechan8370/chatgpt-plugin')
logger.info('插件群号 559567232')
logger.info('**************************************')
export { apps }

6931
package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -786,3 +786,19 @@ export function processList (whitelist, blacklist) {
blacklist = Array.from(new Set(blacklist)).filter(value => /^\^?[1-9]\d{5,9}$/.test(value))
return [whitelist, blacklist]
}
export function getMaxModelTokens (model = 'gpt-3.5-turbo') {
if (model.startsWith('gpt-3.5-turbo')) {
if (model.includes('16k')) {
return 16000
} else {
return 4000
}
} else {
if (model.includes('32k')) {
return 32000
} else {
return 16000
}
}
}

495
utils/openai/chatgpt-api.js Normal file
View file

@ -0,0 +1,495 @@
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
if (ar || !(i in from)) {
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
ar[i] = from[i];
}
}
return to.concat(ar || Array.prototype.slice.call(from));
};
import Keyv from 'keyv';
import pTimeout from 'p-timeout';
import QuickLRU from 'quick-lru';
import { v4 as uuidv4 } from 'uuid';
import * as tokenizer from './tokenizer.js';
import * as types from './types.js';
import globalFetch from 'node-fetch';
import { fetchSSE } from './fetch-sse.js';
var CHATGPT_MODEL = 'gpt-3.5-turbo-0613';
var USER_LABEL_DEFAULT = 'User';
var ASSISTANT_LABEL_DEFAULT = 'ChatGPT';
var ChatGPTAPI = /** @class */ (function () {
/**
* Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param apiKey - OpenAI API key (required).
* @param apiOrg - Optional OpenAI API organization (optional).
* @param apiBaseUrl - Optional override for the OpenAI API base URL.
* @param debug - Optional enables logging debugging info to stdout.
* @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
* @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.
* @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.
* @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.
* @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
*/
function ChatGPTAPI(opts) {
var apiKey = opts.apiKey, apiOrg = opts.apiOrg, _a = opts.apiBaseUrl, apiBaseUrl = _a === void 0 ? 'https://api.openai.com/v1' : _a, _b = opts.debug, debug = _b === void 0 ? false : _b, messageStore = opts.messageStore, completionParams = opts.completionParams, systemMessage = opts.systemMessage, _c = opts.maxModelTokens, maxModelTokens = _c === void 0 ? 4000 : _c, _d = opts.maxResponseTokens, maxResponseTokens = _d === void 0 ? 1000 : _d, getMessageById = opts.getMessageById, upsertMessage = opts.upsertMessage, _e = opts.fetch, fetch = _e === void 0 ? globalFetch : _e;
this._apiKey = apiKey;
this._apiOrg = apiOrg;
this._apiBaseUrl = apiBaseUrl;
this._debug = !!debug;
this._fetch = fetch;
this._completionParams = __assign({ model: CHATGPT_MODEL, temperature: 0.8, top_p: 1.0, presence_penalty: 1.0 }, completionParams);
this._systemMessage = systemMessage;
if (this._systemMessage === undefined) {
var currentDate = new Date().toISOString().split('T')[0];
this._systemMessage = "You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ".concat(currentDate);
}
this._maxModelTokens = maxModelTokens;
this._maxResponseTokens = maxResponseTokens;
this._getMessageById = getMessageById !== null && getMessageById !== void 0 ? getMessageById : this._defaultGetMessageById;
this._upsertMessage = upsertMessage !== null && upsertMessage !== void 0 ? upsertMessage : this._defaultUpsertMessage;
if (messageStore) {
this._messageStore = messageStore;
}
else {
this._messageStore = new Keyv({
store: new QuickLRU({ maxSize: 10000 })
});
}
if (!this._apiKey) {
throw new Error('OpenAI missing required apiKey');
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined');
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function');
}
}
/**
* Sends a message to the OpenAI chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
ChatGPTAPI.prototype.sendMessage = function (text, opts, role) {
if (opts === void 0) { opts = {}; }
if (role === void 0) { role = 'user'; }
return __awaiter(this, void 0, void 0, function () {
var parentMessageId, _a, messageId, timeoutMs, onProgress, _b, stream, completionParams, conversationId, abortSignal, abortController, message, latestQuestion, _c, messages, maxTokens, numTokens, result, responseP;
var _this = this;
return __generator(this, function (_d) {
switch (_d.label) {
case 0:
parentMessageId = opts.parentMessageId, _a = opts.messageId, messageId = _a === void 0 ? uuidv4() : _a, timeoutMs = opts.timeoutMs, onProgress = opts.onProgress, _b = opts.stream, stream = _b === void 0 ? onProgress ? true : false : _b, completionParams = opts.completionParams, conversationId = opts.conversationId;
abortSignal = opts.abortSignal;
abortController = null;
if (timeoutMs && !abortSignal) {
abortController = new AbortController();
abortSignal = abortController.signal;
}
message = {
role: role,
id: messageId,
conversationId: conversationId,
parentMessageId: parentMessageId,
text: text,
name: opts.name
};
latestQuestion = message;
return [4 /*yield*/, this._buildMessages(text, role, opts)];
case 1:
_c = _d.sent(), messages = _c.messages, maxTokens = _c.maxTokens, numTokens = _c.numTokens;
result = {
role: 'assistant',
id: uuidv4(),
conversationId: conversationId,
parentMessageId: messageId,
text: '',
functionCall: null
};
responseP = new Promise(function (resolve, reject) { return __awaiter(_this, void 0, void 0, function () {
var url, headers, body, res, reason, msg, error, response, message_1, res_1, err_1;
var _a, _b;
return __generator(this, function (_c) {
switch (_c.label) {
case 0:
url = "".concat(this._apiBaseUrl, "/chat/completions");
headers = {
'Content-Type': 'application/json',
Authorization: "Bearer ".concat(this._apiKey)
};
body = __assign(__assign(__assign({ max_tokens: maxTokens }, this._completionParams), completionParams), { messages: messages, stream: stream });
// Support multiple organizations
// See https://platform.openai.com/docs/api-reference/authentication
if (this._apiOrg) {
headers['OpenAI-Organization'] = this._apiOrg;
}
if (this._debug) {
// console.log(JSON.stringify(body))
console.log("sendMessage (".concat(numTokens, " tokens)"), body);
}
if (!stream) return [3 /*break*/, 1];
fetchSSE(url, {
method: 'POST',
headers: headers,
body: JSON.stringify(body),
signal: abortSignal,
onMessage: function (data) {
var _a;
if (data === '[DONE]') {
result.text = result.text.trim();
return resolve(result);
}
try {
var response = JSON.parse(data);
if (response.id) {
result.id = response.id;
}
if ((_a = response.choices) === null || _a === void 0 ? void 0 : _a.length) {
var delta = response.choices[0].delta;
if (delta.function_call) {
if (delta.function_call.name) {
result.functionCall = {
name: delta.function_call.name,
arguments: delta.function_call.arguments
};
}
else {
result.functionCall.arguments = result.functionCall.arguments || '' + delta.function_call.arguments;
}
}
else {
result.delta = delta.content;
if (delta === null || delta === void 0 ? void 0 : delta.content)
result.text += delta.content;
}
if (delta.role) {
result.role = delta.role;
}
result.detail = response;
onProgress === null || onProgress === void 0 ? void 0 : onProgress(result);
}
}
catch (err) {
console.warn('OpenAI stream SEE event unexpected error', err);
return reject(err);
}
}
}, this._fetch).catch(reject);
return [3 /*break*/, 7];
case 1:
_c.trys.push([1, 6, , 7]);
return [4 /*yield*/, this._fetch(url, {
method: 'POST',
headers: headers,
body: JSON.stringify(body),
signal: abortSignal
})];
case 2:
res = _c.sent();
if (!!res.ok) return [3 /*break*/, 4];
return [4 /*yield*/, res.text()];
case 3:
reason = _c.sent();
msg = "OpenAI error ".concat(res.status || res.statusText, ": ").concat(reason);
error = new types.ChatGPTError(msg, { cause: res });
error.statusCode = res.status;
error.statusText = res.statusText;
return [2 /*return*/, reject(error)];
case 4: return [4 /*yield*/, res.json()];
case 5:
response = _c.sent();
if (this._debug) {
console.log(response);
}
if (response === null || response === void 0 ? void 0 : response.id) {
result.id = response.id;
}
if ((_a = response === null || response === void 0 ? void 0 : response.choices) === null || _a === void 0 ? void 0 : _a.length) {
message_1 = response.choices[0].message;
if (message_1.content) {
result.text = message_1.content;
}
else if (message_1.function_call) {
result.functionCall = message_1.function_call;
}
if (message_1.role) {
result.role = message_1.role;
}
}
else {
res_1 = response;
return [2 /*return*/, reject(new Error("OpenAI error: ".concat(((_b = res_1 === null || res_1 === void 0 ? void 0 : res_1.detail) === null || _b === void 0 ? void 0 : _b.message) || (res_1 === null || res_1 === void 0 ? void 0 : res_1.detail) || 'unknown')))];
}
result.detail = response;
return [2 /*return*/, resolve(result)];
case 6:
err_1 = _c.sent();
return [2 /*return*/, reject(err_1)];
case 7: return [2 /*return*/];
}
});
}); }).then(function (message) { return __awaiter(_this, void 0, void 0, function () {
var promptTokens, completionTokens, err_2;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(message.detail && !message.detail.usage)) return [3 /*break*/, 4];
_a.label = 1;
case 1:
_a.trys.push([1, 3, , 4]);
promptTokens = numTokens;
return [4 /*yield*/, this._getTokenCount(message.text)];
case 2:
completionTokens = _a.sent();
message.detail.usage = {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
estimated: true
};
return [3 /*break*/, 4];
case 3:
err_2 = _a.sent();
return [3 /*break*/, 4];
case 4: return [2 /*return*/, Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(function () { return message; })];
}
});
}); });
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;
responseP.cancel = function () {
abortController.abort();
};
}
return [2 /*return*/, pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'OpenAI timed out waiting for response'
})];
}
else {
return [2 /*return*/, responseP];
}
return [2 /*return*/];
}
});
});
};
Object.defineProperty(ChatGPTAPI.prototype, "apiKey", {
get: function () {
return this._apiKey;
},
set: function (apiKey) {
this._apiKey = apiKey;
},
enumerable: false,
configurable: true
});
Object.defineProperty(ChatGPTAPI.prototype, "apiOrg", {
get: function () {
return this._apiOrg;
},
set: function (apiOrg) {
this._apiOrg = apiOrg;
},
enumerable: false,
configurable: true
});
ChatGPTAPI.prototype._buildMessages = function (text, role, opts) {
return __awaiter(this, void 0, void 0, function () {
var _a, systemMessage, parentMessageId, userLabel, assistantLabel, maxNumTokens, messages, systemMessageOffset, nextMessages, numTokens, prompt_1, nextNumTokensEstimate, isValidPrompt, parentMessage, parentMessageRole, maxTokens;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
_a = opts.systemMessage, systemMessage = _a === void 0 ? this._systemMessage : _a;
parentMessageId = opts.parentMessageId;
userLabel = USER_LABEL_DEFAULT;
assistantLabel = ASSISTANT_LABEL_DEFAULT;
maxNumTokens = this._maxModelTokens - this._maxResponseTokens;
messages = [];
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
});
}
systemMessageOffset = messages.length;
nextMessages = text
? messages.concat([
{
role: role,
content: text,
name: opts.name
}
])
: messages;
numTokens = 0;
_b.label = 1;
case 1:
prompt_1 = nextMessages
.reduce(function (prompt, message) {
switch (message.role) {
case 'system':
return prompt.concat(["Instructions:\n".concat(message.content)]);
case 'user':
return prompt.concat(["".concat(userLabel, ":\n").concat(message.content)]);
case 'function':
return prompt.concat(["Function:\n".concat(message.content)]);
default:
return message.content ? prompt.concat(["".concat(assistantLabel, ":\n").concat(message.content)]) : prompt;
}
}, [])
.join('\n\n');
return [4 /*yield*/, this._getTokenCount(prompt_1)];
case 2:
nextNumTokensEstimate = _b.sent();
isValidPrompt = nextNumTokensEstimate <= maxNumTokens;
if (prompt_1 && !isValidPrompt) {
return [3 /*break*/, 5];
}
messages = nextMessages;
numTokens = nextNumTokensEstimate;
if (!isValidPrompt) {
return [3 /*break*/, 5];
}
if (!parentMessageId) {
return [3 /*break*/, 5];
}
return [4 /*yield*/, this._getMessageById(parentMessageId)];
case 3:
parentMessage = _b.sent();
if (!parentMessage) {
return [3 /*break*/, 5];
}
parentMessageRole = parentMessage.role || 'user';
nextMessages = nextMessages.slice(0, systemMessageOffset).concat(__spreadArray([
{
role: parentMessageRole,
content: parentMessage.text,
name: parentMessage.name,
function_call: parentMessage.functionCall ? parentMessage.functionCall : undefined,
}
], nextMessages.slice(systemMessageOffset), true));
parentMessageId = parentMessage.parentMessageId;
_b.label = 4;
case 4:
if (true) return [3 /*break*/, 1];
_b.label = 5;
case 5:
maxTokens = Math.max(1, Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens));
return [2 /*return*/, { messages: messages, maxTokens: maxTokens, numTokens: numTokens }];
}
});
});
};
ChatGPTAPI.prototype._getTokenCount = function (text) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '');
return [2 /*return*/, tokenizer.encode(text).length];
});
});
};
ChatGPTAPI.prototype._defaultGetMessageById = function (id) {
return __awaiter(this, void 0, void 0, function () {
var res;
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.get(id)];
case 1:
res = _a.sent();
return [2 /*return*/, res];
}
});
});
};
ChatGPTAPI.prototype._defaultUpsertMessage = function (message) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.set(message.id, message)];
case 1:
_a.sent();
return [2 /*return*/];
}
});
});
};
return ChatGPTAPI;
}());
export { ChatGPTAPI };

View file

@ -7,8 +7,9 @@ import * as tokenizer from './tokenizer'
import * as types from './types'
import globalFetch from 'node-fetch'
import { fetchSSE } from './fetch-sse'
import {Role} from "./types";
const CHATGPT_MODEL = 'gpt-3.5-turbo'
const CHATGPT_MODEL = 'gpt-3.5-turbo-0613'
const USER_LABEL_DEFAULT = 'User'
const ASSISTANT_LABEL_DEFAULT = 'ChatGPT'
@ -136,7 +137,8 @@ export class ChatGPTAPI {
*/
async sendMessage(
text: string,
opts: types.SendMessageOptions = {}
opts: types.SendMessageOptions = {},
role: Role = 'user',
): Promise<types.ChatMessage> {
const {
parentMessageId,
@ -157,17 +159,19 @@ export class ChatGPTAPI {
}
const message: types.ChatMessage = {
role: 'user',
role,
id: messageId,
conversationId,
parentMessageId,
text
text,
name: opts.name
}
const latestQuestion = message
const { messages, maxTokens, numTokens } = await this._buildMessages(
text,
role,
opts
)
@ -176,7 +180,8 @@ export class ChatGPTAPI {
id: uuidv4(),
conversationId,
parentMessageId: messageId,
text: ''
text: undefined,
functionCall: undefined
}
const responseP = new Promise<types.ChatMessage>(
@ -228,9 +233,20 @@ export class ChatGPTAPI {
if (response.choices?.length) {
const delta = response.choices[0].delta
result.delta = delta.content
if (delta?.content) result.text += delta.content
if (delta.function_call) {
if (delta.function_call.name) {
result.functionCall = {
name: delta.function_call.name,
arguments: delta.function_call.arguments
}
} else {
result.functionCall.arguments = result.functionCall.arguments || '' + delta.function_call.arguments
}
} else {
result.delta = delta.content
if (delta?.content) result.text += delta.content
}
if (delta.role) {
result.role = delta.role
}
@ -278,7 +294,11 @@ export class ChatGPTAPI {
if (response?.choices?.length) {
const message = response.choices[0].message
result.text = message.content
if (message.content) {
result.text = message.content
} else if (message.function_call) {
result.functionCall = message.function_call
}
if (message.role) {
result.role = message.role
}
@ -358,7 +378,7 @@ export class ChatGPTAPI {
this._apiOrg = apiOrg
}
protected async _buildMessages(text: string, opts: types.SendMessageOptions) {
protected async _buildMessages(text: string, role: Role, opts: types.SendMessageOptions) {
const { systemMessage = this._systemMessage } = opts
let { parentMessageId } = opts
@ -379,7 +399,7 @@ export class ChatGPTAPI {
let nextMessages = text
? messages.concat([
{
role: 'user',
role,
content: text,
name: opts.name
}
@ -395,11 +415,13 @@ export class ChatGPTAPI {
return prompt.concat([`Instructions:\n${message.content}`])
case 'user':
return prompt.concat([`${userLabel}:\n${message.content}`])
case 'function':
return prompt.concat([`Function:\n${message.content}`])
default:
return prompt.concat([`${assistantLabel}:\n${message.content}`])
return message.content ? prompt.concat([`${assistantLabel}:\n${message.content}`]) : prompt
}
}, [] as string[])
.join('\n\n')
.join('\n\n')
const nextNumTokensEstimate = await this._getTokenCount(prompt)
const isValidPrompt = nextNumTokensEstimate <= maxNumTokens
@ -430,7 +452,8 @@ export class ChatGPTAPI {
{
role: parentMessageRole,
content: parentMessage.text,
name: parentMessage.name
name: parentMessage.name,
function_call: parentMessage.functionCall ? parentMessage.functionCall : undefined
},
...nextMessages.slice(systemMessageOffset)
])

170
utils/openai/fetch-sse.js Normal file
View file

@ -0,0 +1,170 @@
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __rest = (this && this.__rest) || function (s, e) {
var t = {};
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
t[p] = s[p];
if (s != null && typeof Object.getOwnPropertySymbols === "function")
for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
t[p[i]] = s[p[i]];
}
return t;
};
var __asyncValues = (this && this.__asyncValues) || function (o) {
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
var m = o[Symbol.asyncIterator], i;
return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
};
import { createParser } from 'eventsource-parser';
import * as types from './types.js';
import fetch from 'node-fetch';
import { streamAsyncIterable } from './stream-async-iterable.js';
export function fetchSSE(url, options, fetchFn) {
var _a, e_1, _b, _c;
if (fetchFn === void 0) { fetchFn = fetch; }
return __awaiter(this, void 0, void 0, function () {
var onMessage, onError, fetchOptions, res, reason, err_1, msg, error, parser, feed, body_1, _d, _e, _f, chunk, str, e_1_1;
return __generator(this, function (_g) {
switch (_g.label) {
case 0:
onMessage = options.onMessage, onError = options.onError, fetchOptions = __rest(options, ["onMessage", "onError"]);
return [4 /*yield*/, fetchFn(url, fetchOptions)];
case 1:
res = _g.sent();
if (!!res.ok) return [3 /*break*/, 6];
reason = void 0;
_g.label = 2;
case 2:
_g.trys.push([2, 4, , 5]);
return [4 /*yield*/, res.text()];
case 3:
reason = _g.sent();
return [3 /*break*/, 5];
case 4:
err_1 = _g.sent();
reason = res.statusText;
return [3 /*break*/, 5];
case 5:
msg = "ChatGPT error ".concat(res.status, ": ").concat(reason);
error = new types.ChatGPTError(msg, { cause: res });
error.statusCode = res.status;
error.statusText = res.statusText;
throw error;
case 6:
parser = createParser(function (event) {
if (event.type === 'event') {
onMessage(event.data);
}
});
feed = function (chunk) {
var _a;
var response = null;
try {
response = JSON.parse(chunk);
}
catch (_b) {
// ignore
}
if (((_a = response === null || response === void 0 ? void 0 : response.detail) === null || _a === void 0 ? void 0 : _a.type) === 'invalid_request_error') {
var msg = "ChatGPT error ".concat(response.detail.message, ": ").concat(response.detail.code, " (").concat(response.detail.type, ")");
var error = new types.ChatGPTError(msg, { cause: response });
error.statusCode = response.detail.code;
error.statusText = response.detail.message;
if (onError) {
onError(error);
}
else {
console.error(error);
}
// don't feed to the event parser
return;
}
parser.feed(chunk);
};
if (!!res.body.getReader) return [3 /*break*/, 7];
body_1 = res.body;
if (!body_1.on || !body_1.read) {
throw new types.ChatGPTError('unsupported "fetch" implementation');
}
body_1.on('readable', function () {
var chunk;
while (null !== (chunk = body_1.read())) {
feed(chunk.toString());
}
});
return [3 /*break*/, 18];
case 7:
_g.trys.push([7, 12, 13, 18]);
_d = true, _e = __asyncValues(streamAsyncIterable(res.body));
_g.label = 8;
case 8: return [4 /*yield*/, _e.next()];
case 9:
if (!(_f = _g.sent(), _a = _f.done, !_a)) return [3 /*break*/, 11];
_c = _f.value;
_d = false;
chunk = _c;
str = new TextDecoder().decode(chunk);
feed(str);
_g.label = 10;
case 10:
_d = true;
return [3 /*break*/, 8];
case 11: return [3 /*break*/, 18];
case 12:
e_1_1 = _g.sent();
e_1 = { error: e_1_1 };
return [3 /*break*/, 18];
case 13:
_g.trys.push([13, , 16, 17]);
if (!(!_d && !_a && (_b = _e.return))) return [3 /*break*/, 15];
return [4 /*yield*/, _b.call(_e)];
case 14:
_g.sent();
_g.label = 15;
case 15: return [3 /*break*/, 17];
case 16:
if (e_1) throw e_1.error;
return [7 /*endfinally*/];
case 17: return [7 /*endfinally*/];
case 18: return [2 /*return*/];
}
});
});
}

View file

@ -0,0 +1,14 @@
export async function * streamAsyncIterable (stream) {
const reader = stream.getReader()
try {
while (true) {
const { done, value } = await reader.read()
if (done) {
return
}
yield value
}
} finally {
reader.releaseLock()
}
}

View file

@ -0,0 +1,6 @@
import { getEncoding } from 'js-tiktoken';
// TODO: make this configurable
var tokenizer = getEncoding('cl100k_base');
export function encode(input) {
return new Uint32Array(tokenizer.encode(input));
}

View file

@ -0,0 +1,5 @@
{
"compilerOptions": {
"module": "es2020"
}
}

26
utils/openai/types.js Normal file
View file

@ -0,0 +1,26 @@
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
if (typeof b !== "function" && b !== null)
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var ChatGPTError = /** @class */ (function (_super) {
__extends(ChatGPTError, _super);
function ChatGPTError() {
return _super !== null && _super.apply(this, arguments) || this;
}
return ChatGPTError;
}(Error));
export { ChatGPTError };
export var openai;
(function (openai) {
})(openai || (openai = {}));

View file

@ -1,6 +1,6 @@
import Keyv from 'keyv'
export type Role = 'user' | 'assistant' | 'system'
export type Role = 'user' | 'assistant' | 'system' | 'function'
export type FetchFn = typeof fetch
@ -35,7 +35,9 @@ export type ChatGPTAPIOptions = {
}
export type SendMessageOptions = {
/** The name of a user in a multi-user chat. */
/**
* function role name
*/
name?: string
parentMessageId?: string
conversationId?: string
@ -77,6 +79,7 @@ export interface ChatMessage {
// only relevant for ChatGPTUnofficialProxyAPI (optional for ChatGPTAPI)
conversationId?: string
functionCall?: openai.FunctionCall
}
export class ChatGPTError extends Error {
@ -199,7 +202,8 @@ export namespace openai {
{
delta: {
role: Role
content?: string
content?: string,
function_call?: {name: string, arguments: string}
}
index: number
finish_reason: string | null
@ -231,11 +235,20 @@ export namespace openai {
* @memberof ChatCompletionRequestMessage
*/
name?: string
function_call?: FunctionCall
}
export interface FunctionCall {
name: string
arguments: string
}
export declare const ChatCompletionRequestMessageRoleEnum: {
readonly System: 'system'
readonly User: 'user'
readonly Assistant: 'assistant'
readonly Function: 'function'
}
export declare type ChatCompletionRequestMessageRoleEnum =
(typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum]
@ -257,6 +270,8 @@ export namespace openai {
* @memberof ChatCompletionResponseMessage
*/
content: string
function_call: FunctionCall
}
export declare const ChatCompletionResponseMessageRoleEnum: {
readonly System: 'system'
@ -343,6 +358,18 @@ export namespace openai {
* @memberof CreateChatCompletionRequest
*/
user?: string
functions?: Function[]
}
export interface Function {
name: string
description: string
parameters: FunctionParameters
}
export interface FunctionParameters {
type: string
properties: Record<string, Record<string, any>>
required: string[]
}
/**
* @type CreateChatCompletionRequestStop

View file

@ -0,0 +1,20 @@
export class AbstractTool {
name = ''
parameters = {}
description = ''
func = async function () {}
function () {
if (!this.parameters.type) {
this.parameters.type = 'object'
}
return {
name: this.name,
description: this.description,
parameters: this.parameters
}
}
}

View file

@ -0,0 +1,35 @@
import { AbstractTool } from './AbstractTool.js'
export class EditCardTool extends AbstractTool {
name = 'editCard'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想改名片的那个人的qq号'
},
card: {
type: 'string',
description: '你想给他改的新名片'
},
groupId: {
type: 'string',
description: '群号'
}
},
required: ['qq', 'card', 'groupId']
}
description = '当你想要修改某个群员的群名片时有用。输入应该是群号、qq号和群名片用空格隔开。'
func = async function (opts) {
let {qq, card, groupId} = opts
groupId = parseInt(groupId.trim())
qq = parseInt(qq.trim())
logger.info('edit card: ', groupId, qq)
let group = await Bot.pickGroup(groupId)
await group.setCard(qq, card)
return `the user ${qq}'s card has been changed into ${card}`
}
}

38
utils/tools/JinyanTool.js Normal file
View file

@ -0,0 +1,38 @@
import { AbstractTool } from './AbstractTool.js'
export class JinyanTool extends AbstractTool {
name = 'jinyan'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想禁言的那个人的qq号'
},
groupId: {
type: 'string',
description: '群号'
},
time: {
type: 'string',
description: '禁言时长,单位为秒'
}
},
required: ['qq', 'groupId']
}
func = async function (opts) {
let { qq, groupId, time = '600' } = opts
let group = await Bot.pickGroup(groupId)
time = parseInt(time.trim())
if (qq.trim() === 'all') {
await group.muteAll(time > 0)
} else {
qq = parseInt(qq.trim())
await group.muteMember(qq, time)
}
return `the user ${qq} has been muted for ${time} seconds`
}
description = 'Useful when you want to ban someone. The input to this tool should be the group number, the qq number of the one who should be banned and the mute duration in seconds(at least 60, at most 180, the number should be an integer multiple of 60), these three number should be concated with a space. If you want to mute all, just replace the qq number with \'all\''
}

View file

@ -0,0 +1,31 @@
import {AbstractTool} from "./AbstractTool.js";
export class KickOutTool extends AbstractTool {
name = 'kickOut'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想踢出的那个人的qq号'
},
groupId: {
type: 'string',
description: '群号'
}
},
required: ['qq', 'groupId']
}
func = async function (opts) {
let { qq, groupId } = opts
groupId = parseInt(groupId.trim())
qq = parseInt(qq.trim())
console.log('kickout', groupId, qq)
let group = await Bot.pickGroup(groupId)
await group.kickMember(qq)
return `the user ${qq} has been kicked out from group ${groupId}`
}
description = 'Useful when you want to kick someone out of the group. The input to this tool should be the group number, the qq number of the one who should be kicked out, these two number should be concated with a space. '
}

View file

@ -0,0 +1,33 @@
import {AbstractTool} from "./AbstractTool.js";
export class SendAvatarTool extends AbstractTool {
name = 'sendAvatar'
parameters = {
properties: {
qq: {
type: 'string',
description: '要发头像的人的qq号'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['qq', 'groupId']
}
func = async function (opts) {
let {qq, groupId} = opts
let groupList = await Bot.getGroupList()
groupId = parseInt(groupId.trim())
console.log('sendAvatar', groupId, qq)
if (groupList.get(groupId)) {
let group = await Bot.pickGroup(groupId)
await group.sendMsg(segment.image('https://q1.qlogo.cn/g?b=qq&s=0&nk=' + qq))
}
return `the user ${qq}'s avatar has been sent to group ${groupId}`
}
description = 'Useful when you want to send the user avatar picture to the group. The input to this tool should be the user\'s qq number and the target group number, and they should be concated with a space. 如果是在群聊中,优先选择群号发送。'
}

View file

@ -0,0 +1,134 @@
import fetch from 'node-fetch'
import { formatDate, mkdirs } from '../common.js'
import fs from 'fs'
import { AbstractTool } from './AbstractTool.js'
export class SendVideoTool extends AbstractTool {
name = 'sendVideo'
parameters = {
properties: {
keyword: {
type: 'string',
description: '要发的视频的标题或关键词,用于搜索'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['keyword', 'groupId']
}
func = async function (opts) {
let { keyword, groupId } = opts
groupId = parseInt(groupId.trim())
let msg = []
try {
let { arcurl, title, pic, description, videoUrl, headers, bvid, author, play, pubdate, like } = await searchBilibili(keyword)
let group = await Bot.pickGroup(groupId)
console.log({ arcurl, title, pic, description, videoUrl })
msg.push(title.replace(/(<([^>]+)>)/ig, '') + '\n')
msg.push(`UP主${author} 发布日期:${formatDate(new Date(pubdate * 1000))} 播放量:${play} 点赞:${like}\n`)
msg.push(arcurl + '\n')
msg.push(segment.image('https:' + pic))
msg.push('\n' + description)
msg.push('\n视频在路上啦')
await group.sendMsg(msg)
const videoResponse = await fetch(videoUrl, { headers })
const fileType = videoResponse.headers.get('Content-Type').split('/')[1]
let fileLoc = `data/chatgpt/videos/${bvid}.${fileType}`
mkdirs('data/chatgpt/videos')
videoResponse.blob().then(async blob => {
const arrayBuffer = await blob.arrayBuffer()
const buffer = Buffer.from(arrayBuffer)
await fs.writeFileSync(fileLoc, buffer)
await group.sendMsg(segment.video(fileLoc))
})
return `the video ${title.replace(/(<([^>]+)>)/ig, '')} will be shared to ${groupId} after a while, please wait`
} catch (err) {
logger.error(err)
if (msg.length > 0) {
return `fail to share video, but the video msg is found: ${msg}, you can just tell the information of this video`
} else {
return `fail to share video, error: ${err.toString()}`
}
}
}
description = 'Useful when you want to share a video. The input should be the group number and the keywords that can find the video, connected with a space. If you want to send a specific video, you can give more detailed keywords'
}
export async function searchBilibili (name) {
let biliRes = await fetch('https://www.bilibili.com',
{
// headers: {
// accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
// Accept: '*/*',
// 'Accept-Encoding': 'gzip, deflate, br',
// 'accept-language': 'en-US,en;q=0.9',
// Connection: 'keep-alive',
// 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
// }
})
const headers = biliRes.headers.raw()
const setCookieHeaders = headers['set-cookie']
if (setCookieHeaders) {
const cookies = []
setCookieHeaders.forEach(header => {
const cookie = header.split(';')[0]
cookies.push(cookie)
})
const cookieHeader = cookies.join('; ')
let headers = {
accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'accept-language': 'en-US,en;q=0.9',
Referer: 'https://www.bilibili.com',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
cookie: cookieHeader
}
let response = await fetch(`https://api.bilibili.com/x/web-interface/search/type?keyword=${name}&search_type=video`,
{
headers
})
let json = await response.json()
if (json.data?.numResults > 0) {
let index = randomIndex()
let { arcurl, title, pic, description, bvid, author, play, pubdate, like } = json.data.result[Math.min(index, json.data.numResults)]
let videoInfo = await fetch(`https://api.bilibili.com/x/web-interface/view?bvid=${bvid}`, {
headers
})
videoInfo = await videoInfo.json()
let cid = videoInfo.data.cid
let downloadInfo = await fetch(`https://api.bilibili.com/x/player/playurl?bvid=${bvid}&cid=${cid}`, { headers })
let videoUrl = (await downloadInfo.json()).data.durl[0].url
return {
arcurl, title, pic, description, videoUrl, headers, bvid, author, play, pubdate, like
}
}
}
return {}
}
function randomIndex () {
// Define weights for each index
const weights = [5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1]
// Compute the total weight
const totalWeight = weights.reduce((sum, weight) => sum + weight, 0)
// Generate a random number between 0 and the total weight
const randomNumber = Math.floor(Math.random() * totalWeight)
// Choose the index based on the random number and weights
let weightSum = 0
for (let i = 0; i < weights.length; i++) {
weightSum += weights[i]
if (randomNumber < weightSum) {
return i
}
}
}
console.log('send bilibili')

View file

@ -0,0 +1,35 @@
import {AbstractTool} from "./AbstractTool.js";
export class SendDiceTool extends AbstractTool {
name = 'sendDice'
parameters = {
properties: {
num: {
type: 'number',
description: '骰子的数量'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['num', 'groupId']
}
func = async function (opts) {
let {num, groupId} = opts
let groupList = await Bot.getGroupList()
if (groupList.get(groupId)) {
let group = await Bot.pickGroup(groupId, true)
await group.sendMsg(segment.dice(num))
} else {
let friend = await Bot.pickFriend(groupId)
await friend.sendMsg(segment.dice(num))
}
return `the dice has been sent`
}
description = 'If you want to roll dice, use this tool. If you know the group number, use the group number instead of the qq number first. The input should be the number of dice to be cast (1-6) and the target group number or qq numberand they should be concat with a space'
}

View file

@ -0,0 +1,46 @@
import fetch from 'node-fetch'
import {AbstractTool} from "./AbstractTool.js";
export class SendMusicTool extends AbstractTool {
name = 'sendMusic'
parameters = {
properties: {
keyword: {
type: 'string',
description: '音乐的标题或关键词'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['keyword', 'groupId']
}
func = async function (opts) {
let { keyword, groupId } = opts
groupId = parseInt(groupId.trim())
try {
let { id, name } = await searchMusic163(keyword)
let group = await Bot.pickGroup(groupId)
await group.shareMusic('163', id)
return `the music ${name} has been shared to ${groupId}`
} catch (e) {
return `music share failed: ${e}`
}
}
description = 'Useful when you want to share music. The input should be the group number and the name of the music to be sent or the keywords that can find the music, connected with a space'
}
export async function searchMusic163 (name) {
let response = await fetch(`http://music.163.com/api/search/get/web?s=${name}&type=1&offset=0&total=true&limit=20`)
let json = await response.json()
if (json.result?.songCount > 0) {
let id = json.result.songs[0].id
let name = json.result.songs[0].name
return { id, name }
}
return null
}

View file

@ -0,0 +1,33 @@
import {AbstractTool} from "./AbstractTool.js";
export class SendPictureTool extends AbstractTool {
name = 'sendPicture'
parameters = {
picture: {
type: 'string',
description: '图片的url,多个用空格隔开'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
},
required: ['picture', 'groupId']
}
func = async function (picture, groupId) {
let pictures = picture.trim().split(' ')
pictures = pictures.map(img => segment.image(img))
let groupList = await Bot.getGroupList()
if (groupList.get(groupId)) {
let group = await Bot.pickGroup(groupId)
await group.sendMsg(pictures)
} else {
let user = await Bot.pickFriend(groupId)
await user.sendMsg(pictures)
}
}
description = 'Useful when you want to send some pictures. The input to this tool should be the url of the pictures and the group number or the user\'s qq number, each url and the group number or qq number should be concated with a space, and the group number or qq number should be the last. 如果是在群聊中,优先选择群号发送。'
}

View file

@ -0,0 +1,30 @@
import {AbstractTool} from "./AbstractTool.js";
export class SendRPSTool extends AbstractTool {
name = 'sendRPS'
parameters = {
num: {
type: 'number',
description: '石头剪刀布的代号'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
},
required: ['num', 'groupId']
}
func = async function (num, groupId) {
let groupList = await Bot.getGroupList()
if (groupList.get(groupId)) {
let group = await Bot.pickGroup(groupId, true)
await group.sendMsg(segment.rps(num))
} else {
let friend = await Bot.pickFriend(groupId)
await friend.sendMsg(segment.rps(num))
}
}
description = 'Use this tool if you want to play rock paper scissors. If you know the group number, use the group number instead of the qq number first. The input should be the number 1, 2 or 3 to represent rock-paper-scissors and the target group number or qq numberand they should be concat with a space'
}

4512
yarn-error.log Normal file

File diff suppressed because it is too large Load diff

1653
yarn.lock

File diff suppressed because it is too large Load diff