feat: 智能模式,添加群管、试图、联网搜索、发图、发音乐和视频等功能 (#488)

* fix: 2.7 dev start

* feat: 初步支持function call(WIP)

* fix: syntax error

* fix: syntax error

* feat: 群聊上下文

* fix: 暂时阉割掉全员禁言功能

* fix: 修改禁言时间范围

* fix: 修复一些功能易用性

* fix: 只有管理员和群主才能用jinyan和kickout

* fix: 加回来禁言和踢出

* fix: 修复管理员权限判断问题(可能吧)

* fix: 试图优化逻辑

* fix: fuck openai documents

* fix: 删掉认主不然一直禁言我烦死了

* fix: 哔哩哔哩封面损坏问题

* fix: 加个天气小工具

* fix: 天气不存在城市

* fix: website工具用浏览器

* feat: serp tool

* feat: 增加一个google搜索源

* fix: 加一句描述

* feat: 增加搜索来源选项

* feat: 搜图和发图

* fix: groupId format error

* fix: add a image caption tool

* fix: 修改一些提示。tool太多机器人开始混乱了

* fix: 一些极端的措施

* fix: 增加一些提示和一个暂时的公共接口

* fix: 收拾一下

* fix: 修改命令正则

* fix: 修改一些提示

* fix: move send avatar into send picture tool

* fix: 修复解除禁言的bug
This commit is contained in:
ikechan8370 2023-06-25 01:09:29 +08:00 committed by GitHub
parent 2c5b084b04
commit b7427e74c4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
42 changed files with 18987 additions and 58 deletions

View file

@ -786,3 +786,19 @@ export function processList (whitelist, blacklist) {
blacklist = Array.from(new Set(blacklist)).filter(value => /^\^?[1-9]\d{5,9}$/.test(value))
return [whitelist, blacklist]
}
export function getMaxModelTokens (model = 'gpt-3.5-turbo') {
if (model.startsWith('gpt-3.5-turbo')) {
if (model.includes('16k')) {
return 16000
} else {
return 4000
}
} else {
if (model.includes('32k')) {
return 32000
} else {
return 16000
}
}
}

View file

@ -125,7 +125,12 @@ const defaultConfig = {
enhanceAzureTTSEmotion: false,
autoJapanese: false,
enableGenerateContents: false,
version: 'v2.6.2'
amapKey: '',
azSerpKey: '',
serpSource: 'ikechan8370',
extraUrl: 'https://cpe.ikechan8370.com',
smartMode: false,
version: 'v2.7.0'
}
const _path = process.cwd()
let config = {}

495
utils/openai/chatgpt-api.js Normal file
View file

@ -0,0 +1,495 @@
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
if (ar || !(i in from)) {
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
ar[i] = from[i];
}
}
return to.concat(ar || Array.prototype.slice.call(from));
};
import Keyv from 'keyv';
import pTimeout from 'p-timeout';
import QuickLRU from 'quick-lru';
import { v4 as uuidv4 } from 'uuid';
import * as tokenizer from './tokenizer.js';
import * as types from './types.js';
import globalFetch from 'node-fetch';
import { fetchSSE } from './fetch-sse.js';
var CHATGPT_MODEL = 'gpt-3.5-turbo-0613';
var USER_LABEL_DEFAULT = 'User';
var ASSISTANT_LABEL_DEFAULT = 'ChatGPT';
var ChatGPTAPI = /** @class */ (function () {
/**
* Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param apiKey - OpenAI API key (required).
* @param apiOrg - Optional OpenAI API organization (optional).
* @param apiBaseUrl - Optional override for the OpenAI API base URL.
* @param debug - Optional enables logging debugging info to stdout.
* @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
* @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.
* @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.
* @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.
* @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
*/
function ChatGPTAPI(opts) {
var apiKey = opts.apiKey, apiOrg = opts.apiOrg, _a = opts.apiBaseUrl, apiBaseUrl = _a === void 0 ? 'https://api.openai.com/v1' : _a, _b = opts.debug, debug = _b === void 0 ? false : _b, messageStore = opts.messageStore, completionParams = opts.completionParams, systemMessage = opts.systemMessage, _c = opts.maxModelTokens, maxModelTokens = _c === void 0 ? 4000 : _c, _d = opts.maxResponseTokens, maxResponseTokens = _d === void 0 ? 1000 : _d, getMessageById = opts.getMessageById, upsertMessage = opts.upsertMessage, _e = opts.fetch, fetch = _e === void 0 ? globalFetch : _e;
this._apiKey = apiKey;
this._apiOrg = apiOrg;
this._apiBaseUrl = apiBaseUrl;
this._debug = !!debug;
this._fetch = fetch;
this._completionParams = __assign({ model: CHATGPT_MODEL, temperature: 0.8, top_p: 1.0, presence_penalty: 1.0 }, completionParams);
this._systemMessage = systemMessage;
if (this._systemMessage === undefined) {
var currentDate = new Date().toISOString().split('T')[0];
this._systemMessage = "You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ".concat(currentDate);
}
this._maxModelTokens = maxModelTokens;
this._maxResponseTokens = maxResponseTokens;
this._getMessageById = getMessageById !== null && getMessageById !== void 0 ? getMessageById : this._defaultGetMessageById;
this._upsertMessage = upsertMessage !== null && upsertMessage !== void 0 ? upsertMessage : this._defaultUpsertMessage;
if (messageStore) {
this._messageStore = messageStore;
}
else {
this._messageStore = new Keyv({
store: new QuickLRU({ maxSize: 10000 })
});
}
if (!this._apiKey) {
throw new Error('OpenAI missing required apiKey');
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined');
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function');
}
}
/**
* Sends a message to the OpenAI chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
ChatGPTAPI.prototype.sendMessage = function (text, opts, role) {
if (opts === void 0) { opts = {}; }
if (role === void 0) { role = 'user'; }
return __awaiter(this, void 0, void 0, function () {
var parentMessageId, _a, messageId, timeoutMs, onProgress, _b, stream, completionParams, conversationId, abortSignal, abortController, message, latestQuestion, _c, messages, maxTokens, numTokens, result, responseP;
var _this = this;
return __generator(this, function (_d) {
switch (_d.label) {
case 0:
parentMessageId = opts.parentMessageId, _a = opts.messageId, messageId = _a === void 0 ? uuidv4() : _a, timeoutMs = opts.timeoutMs, onProgress = opts.onProgress, _b = opts.stream, stream = _b === void 0 ? onProgress ? true : false : _b, completionParams = opts.completionParams, conversationId = opts.conversationId;
abortSignal = opts.abortSignal;
abortController = null;
if (timeoutMs && !abortSignal) {
abortController = new AbortController();
abortSignal = abortController.signal;
}
message = {
role: role,
id: messageId,
conversationId: conversationId,
parentMessageId: parentMessageId,
text: text,
name: opts.name
};
latestQuestion = message;
return [4 /*yield*/, this._buildMessages(text, role, opts)];
case 1:
_c = _d.sent(), messages = _c.messages, maxTokens = _c.maxTokens, numTokens = _c.numTokens;
result = {
role: 'assistant',
id: uuidv4(),
conversationId: conversationId,
parentMessageId: messageId,
text: '',
functionCall: null
};
responseP = new Promise(function (resolve, reject) { return __awaiter(_this, void 0, void 0, function () {
var url, headers, body, res, reason, msg, error, response, message_1, res_1, err_1;
var _a, _b;
return __generator(this, function (_c) {
switch (_c.label) {
case 0:
url = "".concat(this._apiBaseUrl, "/chat/completions");
headers = {
'Content-Type': 'application/json',
Authorization: "Bearer ".concat(this._apiKey)
};
body = __assign(__assign(__assign({ max_tokens: maxTokens }, this._completionParams), completionParams), { messages: messages, stream: stream });
// Support multiple organizations
// See https://platform.openai.com/docs/api-reference/authentication
if (this._apiOrg) {
headers['OpenAI-Organization'] = this._apiOrg;
}
if (this._debug) {
// console.log(JSON.stringify(body))
console.log("sendMessage (".concat(numTokens, " tokens)"), body);
}
if (!stream) return [3 /*break*/, 1];
fetchSSE(url, {
method: 'POST',
headers: headers,
body: JSON.stringify(body),
signal: abortSignal,
onMessage: function (data) {
var _a;
if (data === '[DONE]') {
result.text = result.text.trim();
return resolve(result);
}
try {
var response = JSON.parse(data);
if (response.id) {
result.id = response.id;
}
if ((_a = response.choices) === null || _a === void 0 ? void 0 : _a.length) {
var delta = response.choices[0].delta;
if (delta.function_call) {
if (delta.function_call.name) {
result.functionCall = {
name: delta.function_call.name,
arguments: delta.function_call.arguments
};
}
else {
result.functionCall.arguments = result.functionCall.arguments || '' + delta.function_call.arguments;
}
}
else {
result.delta = delta.content;
if (delta === null || delta === void 0 ? void 0 : delta.content)
result.text += delta.content;
}
if (delta.role) {
result.role = delta.role;
}
result.detail = response;
onProgress === null || onProgress === void 0 ? void 0 : onProgress(result);
}
}
catch (err) {
console.warn('OpenAI stream SEE event unexpected error', err);
return reject(err);
}
}
}, this._fetch).catch(reject);
return [3 /*break*/, 7];
case 1:
_c.trys.push([1, 6, , 7]);
return [4 /*yield*/, this._fetch(url, {
method: 'POST',
headers: headers,
body: JSON.stringify(body),
signal: abortSignal
})];
case 2:
res = _c.sent();
if (!!res.ok) return [3 /*break*/, 4];
return [4 /*yield*/, res.text()];
case 3:
reason = _c.sent();
msg = "OpenAI error ".concat(res.status || res.statusText, ": ").concat(reason);
error = new types.ChatGPTError(msg, { cause: res });
error.statusCode = res.status;
error.statusText = res.statusText;
return [2 /*return*/, reject(error)];
case 4: return [4 /*yield*/, res.json()];
case 5:
response = _c.sent();
if (this._debug) {
console.log(response);
}
if (response === null || response === void 0 ? void 0 : response.id) {
result.id = response.id;
}
if ((_a = response === null || response === void 0 ? void 0 : response.choices) === null || _a === void 0 ? void 0 : _a.length) {
message_1 = response.choices[0].message;
if (message_1.content) {
result.text = message_1.content;
}
else if (message_1.function_call) {
result.functionCall = message_1.function_call;
}
if (message_1.role) {
result.role = message_1.role;
}
}
else {
res_1 = response;
return [2 /*return*/, reject(new Error("OpenAI error: ".concat(((_b = res_1 === null || res_1 === void 0 ? void 0 : res_1.detail) === null || _b === void 0 ? void 0 : _b.message) || (res_1 === null || res_1 === void 0 ? void 0 : res_1.detail) || 'unknown')))];
}
result.detail = response;
return [2 /*return*/, resolve(result)];
case 6:
err_1 = _c.sent();
return [2 /*return*/, reject(err_1)];
case 7: return [2 /*return*/];
}
});
}); }).then(function (message) { return __awaiter(_this, void 0, void 0, function () {
var promptTokens, completionTokens, err_2;
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
if (!(message.detail && !message.detail.usage)) return [3 /*break*/, 4];
_a.label = 1;
case 1:
_a.trys.push([1, 3, , 4]);
promptTokens = numTokens;
return [4 /*yield*/, this._getTokenCount(message.text)];
case 2:
completionTokens = _a.sent();
message.detail.usage = {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
estimated: true
};
return [3 /*break*/, 4];
case 3:
err_2 = _a.sent();
return [3 /*break*/, 4];
case 4: return [2 /*return*/, Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(function () { return message; })];
}
});
}); });
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;
responseP.cancel = function () {
abortController.abort();
};
}
return [2 /*return*/, pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'OpenAI timed out waiting for response'
})];
}
else {
return [2 /*return*/, responseP];
}
return [2 /*return*/];
}
});
});
};
Object.defineProperty(ChatGPTAPI.prototype, "apiKey", {
get: function () {
return this._apiKey;
},
set: function (apiKey) {
this._apiKey = apiKey;
},
enumerable: false,
configurable: true
});
Object.defineProperty(ChatGPTAPI.prototype, "apiOrg", {
get: function () {
return this._apiOrg;
},
set: function (apiOrg) {
this._apiOrg = apiOrg;
},
enumerable: false,
configurable: true
});
ChatGPTAPI.prototype._buildMessages = function (text, role, opts) {
return __awaiter(this, void 0, void 0, function () {
var _a, systemMessage, parentMessageId, userLabel, assistantLabel, maxNumTokens, messages, systemMessageOffset, nextMessages, numTokens, prompt_1, nextNumTokensEstimate, isValidPrompt, parentMessage, parentMessageRole, maxTokens;
return __generator(this, function (_b) {
switch (_b.label) {
case 0:
_a = opts.systemMessage, systemMessage = _a === void 0 ? this._systemMessage : _a;
parentMessageId = opts.parentMessageId;
userLabel = USER_LABEL_DEFAULT;
assistantLabel = ASSISTANT_LABEL_DEFAULT;
maxNumTokens = this._maxModelTokens - this._maxResponseTokens;
messages = [];
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
});
}
systemMessageOffset = messages.length;
nextMessages = text
? messages.concat([
{
role: role,
content: text,
name: opts.name
}
])
: messages;
numTokens = 0;
_b.label = 1;
case 1:
prompt_1 = nextMessages
.reduce(function (prompt, message) {
switch (message.role) {
case 'system':
return prompt.concat(["Instructions:\n".concat(message.content)]);
case 'user':
return prompt.concat(["".concat(userLabel, ":\n").concat(message.content)]);
case 'function':
return prompt.concat(["Function:\n".concat(message.content)]);
default:
return message.content ? prompt.concat(["".concat(assistantLabel, ":\n").concat(message.content)]) : prompt;
}
}, [])
.join('\n\n');
return [4 /*yield*/, this._getTokenCount(prompt_1)];
case 2:
nextNumTokensEstimate = _b.sent();
isValidPrompt = nextNumTokensEstimate <= maxNumTokens;
if (prompt_1 && !isValidPrompt) {
return [3 /*break*/, 5];
}
messages = nextMessages;
numTokens = nextNumTokensEstimate;
if (!isValidPrompt) {
return [3 /*break*/, 5];
}
if (!parentMessageId) {
return [3 /*break*/, 5];
}
return [4 /*yield*/, this._getMessageById(parentMessageId)];
case 3:
parentMessage = _b.sent();
if (!parentMessage) {
return [3 /*break*/, 5];
}
parentMessageRole = parentMessage.role || 'user';
nextMessages = nextMessages.slice(0, systemMessageOffset).concat(__spreadArray([
{
role: parentMessageRole,
content: parentMessage.text,
name: parentMessage.name,
function_call: parentMessage.functionCall ? parentMessage.functionCall : undefined,
}
], nextMessages.slice(systemMessageOffset), true));
parentMessageId = parentMessage.parentMessageId;
_b.label = 4;
case 4:
if (true) return [3 /*break*/, 1];
_b.label = 5;
case 5:
maxTokens = Math.max(1, Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens));
return [2 /*return*/, { messages: messages, maxTokens: maxTokens, numTokens: numTokens }];
}
});
});
};
ChatGPTAPI.prototype._getTokenCount = function (text) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '');
return [2 /*return*/, tokenizer.encode(text).length];
});
});
};
ChatGPTAPI.prototype._defaultGetMessageById = function (id) {
return __awaiter(this, void 0, void 0, function () {
var res;
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.get(id)];
case 1:
res = _a.sent();
return [2 /*return*/, res];
}
});
});
};
ChatGPTAPI.prototype._defaultUpsertMessage = function (message) {
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0: return [4 /*yield*/, this._messageStore.set(message.id, message)];
case 1:
_a.sent();
return [2 /*return*/];
}
});
});
};
return ChatGPTAPI;
}());
export { ChatGPTAPI };

493
utils/openai/chatgpt-api.ts Normal file
View file

@ -0,0 +1,493 @@
import Keyv from 'keyv'
import pTimeout from 'p-timeout'
import QuickLRU from 'quick-lru'
import { v4 as uuidv4 } from 'uuid'
import * as tokenizer from './tokenizer'
import * as types from './types'
import globalFetch from 'node-fetch'
import { fetchSSE } from './fetch-sse'
import {Role} from "./types";
const CHATGPT_MODEL = 'gpt-3.5-turbo-0613'
const USER_LABEL_DEFAULT = 'User'
const ASSISTANT_LABEL_DEFAULT = 'ChatGPT'
export class ChatGPTAPI {
protected _apiKey: string
protected _apiBaseUrl: string
protected _apiOrg?: string
protected _debug: boolean
protected _systemMessage: string
protected _completionParams: Omit<
types.openai.CreateChatCompletionRequest,
'messages' | 'n'
>
protected _maxModelTokens: number
protected _maxResponseTokens: number
protected _fetch: types.FetchFn
protected _getMessageById: types.GetMessageByIdFunction
protected _upsertMessage: types.UpsertMessageFunction
protected _messageStore: Keyv<types.ChatMessage>
/**
* Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.
*
* @param apiKey - OpenAI API key (required).
* @param apiOrg - Optional OpenAI API organization (optional).
* @param apiBaseUrl - Optional override for the OpenAI API base URL.
* @param debug - Optional enables logging debugging info to stdout.
* @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
* @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.
* @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.
* @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.
* @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).
* @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.
*/
constructor(opts: types.ChatGPTAPIOptions) {
const {
apiKey,
apiOrg,
apiBaseUrl = 'https://api.openai.com/v1',
debug = false,
messageStore,
completionParams,
systemMessage,
maxModelTokens = 4000,
maxResponseTokens = 1000,
getMessageById,
upsertMessage,
fetch = globalFetch
} = opts
this._apiKey = apiKey
this._apiOrg = apiOrg
this._apiBaseUrl = apiBaseUrl
this._debug = !!debug
this._fetch = fetch
this._completionParams = {
model: CHATGPT_MODEL,
temperature: 0.8,
top_p: 1.0,
presence_penalty: 1.0,
...completionParams
}
this._systemMessage = systemMessage
if (this._systemMessage === undefined) {
const currentDate = new Date().toISOString().split('T')[0]
this._systemMessage = `You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\nCurrent date: ${currentDate}`
}
this._maxModelTokens = maxModelTokens
this._maxResponseTokens = maxResponseTokens
this._getMessageById = getMessageById ?? this._defaultGetMessageById
this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage
if (messageStore) {
this._messageStore = messageStore
} else {
this._messageStore = new Keyv<types.ChatMessage, any>({
store: new QuickLRU<string, types.ChatMessage>({ maxSize: 10000 })
})
}
if (!this._apiKey) {
throw new Error('OpenAI missing required apiKey')
}
if (!this._fetch) {
throw new Error('Invalid environment; fetch is not defined')
}
if (typeof this._fetch !== 'function') {
throw new Error('Invalid "fetch" is not a function')
}
}
/**
* Sends a message to the OpenAI chat completions endpoint, waits for the response
* to resolve, and returns the response.
*
* If you want your response to have historical context, you must provide a valid `parentMessageId`.
*
* If you want to receive a stream of partial responses, use `opts.onProgress`.
*
* Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.
*
* @param message - The prompt message to send
* @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)
* @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)
* @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)
* @param opts.systemMessage - Optional override for the chat "system message" which acts as instructions to the model (defaults to the ChatGPT system message)
* @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)
* @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated
* @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)
* @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.
*
* @returns The response from ChatGPT
*/
async sendMessage(
text: string,
opts: types.SendMessageOptions = {},
role: Role = 'user',
): Promise<types.ChatMessage> {
const {
parentMessageId,
messageId = uuidv4(),
timeoutMs,
onProgress,
stream = onProgress ? true : false,
completionParams,
conversationId
} = opts
let { abortSignal } = opts
let abortController: AbortController = null
if (timeoutMs && !abortSignal) {
abortController = new AbortController()
abortSignal = abortController.signal
}
const message: types.ChatMessage = {
role,
id: messageId,
conversationId,
parentMessageId,
text,
name: opts.name
}
const latestQuestion = message
const { messages, maxTokens, numTokens } = await this._buildMessages(
text,
role,
opts
)
const result: types.ChatMessage = {
role: 'assistant',
id: uuidv4(),
conversationId,
parentMessageId: messageId,
text: undefined,
functionCall: undefined
}
const responseP = new Promise<types.ChatMessage>(
async (resolve, reject) => {
const url = `${this._apiBaseUrl}/chat/completions`
const headers = {
'Content-Type': 'application/json',
Authorization: `Bearer ${this._apiKey}`
}
const body = {
max_tokens: maxTokens,
...this._completionParams,
...completionParams,
messages,
stream
}
// Support multiple organizations
// See https://platform.openai.com/docs/api-reference/authentication
if (this._apiOrg) {
headers['OpenAI-Organization'] = this._apiOrg
}
if (this._debug) {
console.log(`sendMessage (${numTokens} tokens)`, body)
}
if (stream) {
fetchSSE(
url,
{
method: 'POST',
headers,
body: JSON.stringify(body),
signal: abortSignal,
onMessage: (data: string) => {
if (data === '[DONE]') {
result.text = result.text.trim()
return resolve(result)
}
try {
const response: types.openai.CreateChatCompletionDeltaResponse =
JSON.parse(data)
if (response.id) {
result.id = response.id
}
if (response.choices?.length) {
const delta = response.choices[0].delta
if (delta.function_call) {
if (delta.function_call.name) {
result.functionCall = {
name: delta.function_call.name,
arguments: delta.function_call.arguments
}
} else {
result.functionCall.arguments = result.functionCall.arguments || '' + delta.function_call.arguments
}
} else {
result.delta = delta.content
if (delta?.content) result.text += delta.content
}
if (delta.role) {
result.role = delta.role
}
result.detail = response
onProgress?.(result)
}
} catch (err) {
console.warn('OpenAI stream SEE event unexpected error', err)
return reject(err)
}
}
},
this._fetch
).catch(reject)
} else {
try {
const res = await this._fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: abortSignal
})
if (!res.ok) {
const reason = await res.text()
const msg = `OpenAI error ${
res.status || res.statusText
}: ${reason}`
const error = new types.ChatGPTError(msg, { cause: res })
error.statusCode = res.status
error.statusText = res.statusText
return reject(error)
}
const response: types.openai.CreateChatCompletionResponse =
await res.json()
if (this._debug) {
console.log(response)
}
if (response?.id) {
result.id = response.id
}
if (response?.choices?.length) {
const message = response.choices[0].message
if (message.content) {
result.text = message.content
} else if (message.function_call) {
result.functionCall = message.function_call
}
if (message.role) {
result.role = message.role
}
} else {
const res = response as any
return reject(
new Error(
`OpenAI error: ${
res?.detail?.message || res?.detail || 'unknown'
}`
)
)
}
result.detail = response
return resolve(result)
} catch (err) {
return reject(err)
}
}
}
).then(async (message) => {
if (message.detail && !message.detail.usage) {
try {
const promptTokens = numTokens
const completionTokens = await this._getTokenCount(message.text)
message.detail.usage = {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
estimated: true
}
} catch (err) {
// TODO: this should really never happen, but if it does,
// we should handle notify the user gracefully
}
}
return Promise.all([
this._upsertMessage(latestQuestion),
this._upsertMessage(message)
]).then(() => message)
})
if (timeoutMs) {
if (abortController) {
// This will be called when a timeout occurs in order for us to forcibly
// ensure that the underlying HTTP request is aborted.
;(responseP as any).cancel = () => {
abortController.abort()
}
}
return pTimeout(responseP, {
milliseconds: timeoutMs,
message: 'OpenAI timed out waiting for response'
})
} else {
return responseP
}
}
get apiKey(): string {
return this._apiKey
}
set apiKey(apiKey: string) {
this._apiKey = apiKey
}
get apiOrg(): string {
return this._apiOrg
}
set apiOrg(apiOrg: string) {
this._apiOrg = apiOrg
}
protected async _buildMessages(text: string, role: Role, opts: types.SendMessageOptions) {
const { systemMessage = this._systemMessage } = opts
let { parentMessageId } = opts
const userLabel = USER_LABEL_DEFAULT
const assistantLabel = ASSISTANT_LABEL_DEFAULT
const maxNumTokens = this._maxModelTokens - this._maxResponseTokens
let messages: types.openai.ChatCompletionRequestMessage[] = []
if (systemMessage) {
messages.push({
role: 'system',
content: systemMessage
})
}
const systemMessageOffset = messages.length
let nextMessages = text
? messages.concat([
{
role,
content: text,
name: opts.name
}
])
: messages
let numTokens = 0
do {
const prompt = nextMessages
.reduce((prompt, message) => {
switch (message.role) {
case 'system':
return prompt.concat([`Instructions:\n${message.content}`])
case 'user':
return prompt.concat([`${userLabel}:\n${message.content}`])
case 'function':
return prompt.concat([`Function:\n${message.content}`])
default:
return message.content ? prompt.concat([`${assistantLabel}:\n${message.content}`]) : prompt
}
}, [] as string[])
.join('\n\n')
const nextNumTokensEstimate = await this._getTokenCount(prompt)
const isValidPrompt = nextNumTokensEstimate <= maxNumTokens
if (prompt && !isValidPrompt) {
break
}
messages = nextMessages
numTokens = nextNumTokensEstimate
if (!isValidPrompt) {
break
}
if (!parentMessageId) {
break
}
const parentMessage = await this._getMessageById(parentMessageId)
if (!parentMessage) {
break
}
const parentMessageRole = parentMessage.role || 'user'
nextMessages = nextMessages.slice(0, systemMessageOffset).concat([
{
role: parentMessageRole,
content: parentMessage.text,
name: parentMessage.name,
function_call: parentMessage.functionCall ? parentMessage.functionCall : undefined
},
...nextMessages.slice(systemMessageOffset)
])
parentMessageId = parentMessage.parentMessageId
} while (true)
// Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens
// for the response.
const maxTokens = Math.max(
1,
Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)
)
return { messages, maxTokens, numTokens }
}
protected async _getTokenCount(text: string) {
// TODO: use a better fix in the tokenizer
text = text.replace(/<\|endoftext\|>/g, '')
return tokenizer.encode(text).length
}
protected async _defaultGetMessageById(
id: string
): Promise<types.ChatMessage> {
const res = await this._messageStore.get(id)
return res
}
protected async _defaultUpsertMessage(
message: types.ChatMessage
): Promise<void> {
await this._messageStore.set(message.id, message)
}
}

170
utils/openai/fetch-sse.js Normal file
View file

@ -0,0 +1,170 @@
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var __rest = (this && this.__rest) || function (s, e) {
var t = {};
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
t[p] = s[p];
if (s != null && typeof Object.getOwnPropertySymbols === "function")
for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
t[p[i]] = s[p[i]];
}
return t;
};
var __asyncValues = (this && this.__asyncValues) || function (o) {
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
var m = o[Symbol.asyncIterator], i;
return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
};
import { createParser } from 'eventsource-parser';
import * as types from './types.js';
import fetch from 'node-fetch';
import { streamAsyncIterable } from './stream-async-iterable.js';
export function fetchSSE(url, options, fetchFn) {
var _a, e_1, _b, _c;
if (fetchFn === void 0) { fetchFn = fetch; }
return __awaiter(this, void 0, void 0, function () {
var onMessage, onError, fetchOptions, res, reason, err_1, msg, error, parser, feed, body_1, _d, _e, _f, chunk, str, e_1_1;
return __generator(this, function (_g) {
switch (_g.label) {
case 0:
onMessage = options.onMessage, onError = options.onError, fetchOptions = __rest(options, ["onMessage", "onError"]);
return [4 /*yield*/, fetchFn(url, fetchOptions)];
case 1:
res = _g.sent();
if (!!res.ok) return [3 /*break*/, 6];
reason = void 0;
_g.label = 2;
case 2:
_g.trys.push([2, 4, , 5]);
return [4 /*yield*/, res.text()];
case 3:
reason = _g.sent();
return [3 /*break*/, 5];
case 4:
err_1 = _g.sent();
reason = res.statusText;
return [3 /*break*/, 5];
case 5:
msg = "ChatGPT error ".concat(res.status, ": ").concat(reason);
error = new types.ChatGPTError(msg, { cause: res });
error.statusCode = res.status;
error.statusText = res.statusText;
throw error;
case 6:
parser = createParser(function (event) {
if (event.type === 'event') {
onMessage(event.data);
}
});
feed = function (chunk) {
var _a;
var response = null;
try {
response = JSON.parse(chunk);
}
catch (_b) {
// ignore
}
if (((_a = response === null || response === void 0 ? void 0 : response.detail) === null || _a === void 0 ? void 0 : _a.type) === 'invalid_request_error') {
var msg = "ChatGPT error ".concat(response.detail.message, ": ").concat(response.detail.code, " (").concat(response.detail.type, ")");
var error = new types.ChatGPTError(msg, { cause: response });
error.statusCode = response.detail.code;
error.statusText = response.detail.message;
if (onError) {
onError(error);
}
else {
console.error(error);
}
// don't feed to the event parser
return;
}
parser.feed(chunk);
};
if (!!res.body.getReader) return [3 /*break*/, 7];
body_1 = res.body;
if (!body_1.on || !body_1.read) {
throw new types.ChatGPTError('unsupported "fetch" implementation');
}
body_1.on('readable', function () {
var chunk;
while (null !== (chunk = body_1.read())) {
feed(chunk.toString());
}
});
return [3 /*break*/, 18];
case 7:
_g.trys.push([7, 12, 13, 18]);
_d = true, _e = __asyncValues(streamAsyncIterable(res.body));
_g.label = 8;
case 8: return [4 /*yield*/, _e.next()];
case 9:
if (!(_f = _g.sent(), _a = _f.done, !_a)) return [3 /*break*/, 11];
_c = _f.value;
_d = false;
chunk = _c;
str = new TextDecoder().decode(chunk);
feed(str);
_g.label = 10;
case 10:
_d = true;
return [3 /*break*/, 8];
case 11: return [3 /*break*/, 18];
case 12:
e_1_1 = _g.sent();
e_1 = { error: e_1_1 };
return [3 /*break*/, 18];
case 13:
_g.trys.push([13, , 16, 17]);
if (!(!_d && !_a && (_b = _e.return))) return [3 /*break*/, 15];
return [4 /*yield*/, _b.call(_e)];
case 14:
_g.sent();
_g.label = 15;
case 15: return [3 /*break*/, 17];
case 16:
if (e_1) throw e_1.error;
return [7 /*endfinally*/];
case 17: return [7 /*endfinally*/];
case 18: return [2 /*return*/];
}
});
});
}

89
utils/openai/fetch-sse.ts Normal file
View file

@ -0,0 +1,89 @@
import { createParser } from 'eventsource-parser'
import * as types from './types'
import { fetch as nodefetch } from 'node-fetch'
import { streamAsyncIterable } from './stream-async-iterable'
export async function fetchSSE(
url: string,
options: Parameters<typeof fetch>[1] & {
onMessage: (data: string) => void
onError?: (error: any) => void
},
fetch: types.FetchFn = nodefetch
) {
const { onMessage, onError, ...fetchOptions } = options
const res = await fetch(url, fetchOptions)
if (!res.ok) {
let reason: string
try {
reason = await res.text()
} catch (err) {
reason = res.statusText
}
const msg = `ChatGPT error ${res.status}: ${reason}`
const error = new types.ChatGPTError(msg, { cause: res })
error.statusCode = res.status
error.statusText = res.statusText
throw error
}
const parser = createParser((event) => {
if (event.type === 'event') {
onMessage(event.data)
}
})
// handle special response errors
const feed = (chunk: string) => {
let response = null
try {
response = JSON.parse(chunk)
} catch {
// ignore
}
if (response?.detail?.type === 'invalid_request_error') {
const msg = `ChatGPT error ${response.detail.message}: ${response.detail.code} (${response.detail.type})`
const error = new types.ChatGPTError(msg, { cause: response })
error.statusCode = response.detail.code
error.statusText = response.detail.message
if (onError) {
onError(error)
} else {
console.error(error)
}
// don't feed to the event parser
return
}
parser.feed(chunk)
}
if (!res.body.getReader) {
// Vercel polyfills `fetch` with `node-fetch`, which doesn't conform to
// web standards, so this is a workaround...
const body: NodeJS.ReadableStream = res.body as any
if (!body.on || !body.read) {
throw new types.ChatGPTError('unsupported "fetch" implementation')
}
body.on('readable', () => {
let chunk: string | Buffer
while (null !== (chunk = body.read())) {
feed(chunk.toString())
}
})
} else {
for await (const chunk of streamAsyncIterable(res.body)) {
const str = new TextDecoder().decode(chunk)
feed(str)
}
}
}

View file

@ -0,0 +1,14 @@
export async function * streamAsyncIterable (stream) {
const reader = stream.getReader()
try {
while (true) {
const { done, value } = await reader.read()
if (done) {
return
}
yield value
}
} finally {
reader.releaseLock()
}
}

View file

@ -0,0 +1,6 @@
import { getEncoding } from 'js-tiktoken';
// TODO: make this configurable
var tokenizer = getEncoding('cl100k_base');
export function encode(input) {
return new Uint32Array(tokenizer.encode(input));
}

View file

@ -0,0 +1,8 @@
import { getEncoding } from 'js-tiktoken'
// TODO: make this configurable
const tokenizer = getEncoding('cl100k_base')
export function encode(input: string): Uint32Array {
return new Uint32Array(tokenizer.encode(input))
}

View file

@ -0,0 +1,5 @@
{
"compilerOptions": {
"module": "es2020"
}
}

26
utils/openai/types.js Normal file
View file

@ -0,0 +1,26 @@
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
if (typeof b !== "function" && b !== null)
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var ChatGPTError = /** @class */ (function (_super) {
__extends(ChatGPTError, _super);
function ChatGPTError() {
return _super !== null && _super.apply(this, arguments) || this;
}
return ChatGPTError;
}(Error));
export { ChatGPTError };
export var openai;
(function (openai) {
})(openai || (openai = {}));

473
utils/openai/types.ts Normal file
View file

@ -0,0 +1,473 @@
import Keyv from 'keyv'
export type Role = 'user' | 'assistant' | 'system' | 'function'
export type FetchFn = typeof fetch
export type ChatGPTAPIOptions = {
apiKey: string
/** @defaultValue `'https://api.openai.com'` **/
apiBaseUrl?: string
apiOrg?: string
/** @defaultValue `false` **/
debug?: boolean
completionParams?: Partial<
Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>
systemMessage?: string
/** @defaultValue `4096` **/
maxModelTokens?: number
/** @defaultValue `1000` **/
maxResponseTokens?: number
messageStore?: Keyv
getMessageById?: GetMessageByIdFunction
upsertMessage?: UpsertMessageFunction
fetch?: FetchFn
}
export type SendMessageOptions = {
/**
* function role name
*/
name?: string
parentMessageId?: string
conversationId?: string
messageId?: string
stream?: boolean
systemMessage?: string
timeoutMs?: number
onProgress?: (partialResponse: ChatMessage) => void
abortSignal?: AbortSignal
completionParams?: Partial<
Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>
>
}
export type MessageActionType = 'next' | 'variant'
export type SendMessageBrowserOptions = {
conversationId?: string
parentMessageId?: string
messageId?: string
action?: MessageActionType
timeoutMs?: number
onProgress?: (partialResponse: ChatMessage) => void
abortSignal?: AbortSignal
}
export interface ChatMessage {
id: string
text: string
role: Role
name?: string
delta?: string
detail?:
| openai.CreateChatCompletionResponse
| CreateChatCompletionStreamResponse
// relevant for both ChatGPTAPI and ChatGPTUnofficialProxyAPI
parentMessageId?: string
// only relevant for ChatGPTUnofficialProxyAPI (optional for ChatGPTAPI)
conversationId?: string
functionCall?: openai.FunctionCall
}
export class ChatGPTError extends Error {
statusCode?: number
statusText?: string
isFinal?: boolean
accountId?: string
}
/** Returns a chat message from a store by it's ID (or null if not found). */
export type GetMessageByIdFunction = (id: string) => Promise<ChatMessage>
/** Upserts a chat message to a store. */
export type UpsertMessageFunction = (message: ChatMessage) => Promise<void>
export interface CreateChatCompletionStreamResponse
extends openai.CreateChatCompletionDeltaResponse {
usage: CreateCompletionStreamResponseUsage
}
export interface CreateCompletionStreamResponseUsage
extends openai.CreateCompletionResponseUsage {
estimated: true
}
/**
* https://chat.openapi.com/backend-api/conversation
*/
export type ConversationJSONBody = {
/**
* The action to take
*/
action: string
/**
* The ID of the conversation
*/
conversation_id?: string
/**
* Prompts to provide
*/
messages: Prompt[]
/**
* The model to use
*/
model: string
/**
* The parent message ID
*/
parent_message_id: string
}
export type Prompt = {
/**
* The content of the prompt
*/
content: PromptContent
/**
* The ID of the prompt
*/
id: string
/**
* The role played in the prompt
*/
role: Role
}
export type ContentType = 'text'
export type PromptContent = {
/**
* The content type of the prompt
*/
content_type: ContentType
/**
* The parts to the prompt
*/
parts: string[]
}
export type ConversationResponseEvent = {
message?: Message
conversation_id?: string
error?: string | null
}
export type Message = {
id: string
content: MessageContent
role: Role
user: string | null
create_time: string | null
update_time: string | null
end_turn: null
weight: number
recipient: string
metadata: MessageMetadata
}
export type MessageContent = {
content_type: string
parts: string[]
}
export type MessageMetadata = any
export namespace openai {
export interface CreateChatCompletionDeltaResponse {
id: string
object: 'chat.completion.chunk'
created: number
model: string
choices: [
{
delta: {
role: Role
content?: string,
function_call?: {name: string, arguments: string}
}
index: number
finish_reason: string | null
}
]
}
/**
*
* @export
* @interface ChatCompletionRequestMessage
*/
export interface ChatCompletionRequestMessage {
/**
* The role of the author of this message.
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
role: ChatCompletionRequestMessageRoleEnum
/**
* The contents of the message
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
content: string
/**
* The name of the user in a multi-user chat
* @type {string}
* @memberof ChatCompletionRequestMessage
*/
name?: string
function_call?: FunctionCall
}
export interface FunctionCall {
name: string
arguments: string
}
export declare const ChatCompletionRequestMessageRoleEnum: {
readonly System: 'system'
readonly User: 'user'
readonly Assistant: 'assistant'
readonly Function: 'function'
}
export declare type ChatCompletionRequestMessageRoleEnum =
(typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum]
/**
*
* @export
* @interface ChatCompletionResponseMessage
*/
export interface ChatCompletionResponseMessage {
/**
* The role of the author of this message.
* @type {string}
* @memberof ChatCompletionResponseMessage
*/
role: ChatCompletionResponseMessageRoleEnum
/**
* The contents of the message
* @type {string}
* @memberof ChatCompletionResponseMessage
*/
content: string
function_call: FunctionCall
}
export declare const ChatCompletionResponseMessageRoleEnum: {
readonly System: 'system'
readonly User: 'user'
readonly Assistant: 'assistant'
}
export declare type ChatCompletionResponseMessageRoleEnum =
(typeof ChatCompletionResponseMessageRoleEnum)[keyof typeof ChatCompletionResponseMessageRoleEnum]
/**
*
* @export
* @interface CreateChatCompletionRequest
*/
export interface CreateChatCompletionRequest {
/**
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
* @type {string}
* @memberof CreateChatCompletionRequest
*/
model: string
/**
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
* @type {Array<ChatCompletionRequestMessage>}
* @memberof CreateChatCompletionRequest
*/
messages: Array<ChatCompletionRequestMessage>
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
temperature?: number | null
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
top_p?: number | null
/**
* How many chat completion choices to generate for each input message.
* @type {number}
* @memberof CreateChatCompletionRequest
*/
n?: number | null
/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
* @type {boolean}
* @memberof CreateChatCompletionRequest
*/
stream?: boolean | null
/**
*
* @type {CreateChatCompletionRequestStop}
* @memberof CreateChatCompletionRequest
*/
stop?: CreateChatCompletionRequestStop
/**
* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
* @type {number}
* @memberof CreateChatCompletionRequest
*/
max_tokens?: number
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
* @type {number}
* @memberof CreateChatCompletionRequest
*/
presence_penalty?: number | null
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
* @type {number}
* @memberof CreateChatCompletionRequest
*/
frequency_penalty?: number | null
/**
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
* @type {object}
* @memberof CreateChatCompletionRequest
*/
logit_bias?: object | null
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
* @type {string}
* @memberof CreateChatCompletionRequest
*/
user?: string
functions?: Function[]
}
export interface Function {
name: string
description: string
parameters: FunctionParameters
}
export interface FunctionParameters {
type: string
properties: Record<string, Record<string, any>>
required: string[]
}
/**
* @type CreateChatCompletionRequestStop
* Up to 4 sequences where the API will stop generating further tokens.
* @export
*/
export declare type CreateChatCompletionRequestStop = Array<string> | string
/**
*
* @export
* @interface CreateChatCompletionResponse
*/
export interface CreateChatCompletionResponse {
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
id: string
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
object: string
/**
*
* @type {number}
* @memberof CreateChatCompletionResponse
*/
created: number
/**
*
* @type {string}
* @memberof CreateChatCompletionResponse
*/
model: string
/**
*
* @type {Array<CreateChatCompletionResponseChoicesInner>}
* @memberof CreateChatCompletionResponse
*/
choices: Array<CreateChatCompletionResponseChoicesInner>
/**
*
* @type {CreateCompletionResponseUsage}
* @memberof CreateChatCompletionResponse
*/
usage?: CreateCompletionResponseUsage
}
/**
*
* @export
* @interface CreateChatCompletionResponseChoicesInner
*/
export interface CreateChatCompletionResponseChoicesInner {
/**
*
* @type {number}
* @memberof CreateChatCompletionResponseChoicesInner
*/
index?: number
/**
*
* @type {ChatCompletionResponseMessage}
* @memberof CreateChatCompletionResponseChoicesInner
*/
message?: ChatCompletionResponseMessage
/**
*
* @type {string}
* @memberof CreateChatCompletionResponseChoicesInner
*/
finish_reason?: string
}
/**
*
* @export
* @interface CreateCompletionResponseUsage
*/
export interface CreateCompletionResponseUsage {
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
prompt_tokens: number
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
completion_tokens: number
/**
*
* @type {number}
* @memberof CreateCompletionResponseUsage
*/
total_tokens: number
}
}

View file

@ -0,0 +1,20 @@
export class AbstractTool {
name = ''
parameters = {}
description = ''
func = async function () {}
function () {
if (!this.parameters.type) {
this.parameters.type = 'object'
}
return {
name: this.name,
description: this.description,
parameters: this.parameters
}
}
}

View file

@ -0,0 +1,35 @@
import { AbstractTool } from './AbstractTool.js'
export class EditCardTool extends AbstractTool {
name = 'editCard'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想改名片的那个人的qq号'
},
card: {
type: 'string',
description: '你想给他改的新名片'
},
groupId: {
type: 'string',
description: '群号'
}
},
required: ['qq', 'card', 'groupId']
}
description = '当你想要修改某个群员的群名片时有用。输入应该是群号、qq号和群名片用空格隔开。'
func = async function (opts) {
let {qq, card, groupId} = opts
groupId = parseInt(groupId.trim())
qq = parseInt(qq.trim())
logger.info('edit card: ', groupId, qq)
let group = await Bot.pickGroup(groupId)
await group.setCard(qq, card)
return `the user ${qq}'s card has been changed into ${card}`
}
}

View file

@ -0,0 +1,49 @@
import { AbstractTool } from './AbstractTool.js'
import fetch, { File, FormData } from 'node-fetch'
import { Config } from '../config.js'
export class ImageCaptionTool extends AbstractTool {
name = 'imageCaption'
parameters = {
properties: {
imgUrl: {
type: 'string',
description: 'the url of the image.'
},
qq: {
type: 'string',
description: 'if the picture is an avatar of a user, just give his qq number'
}
},
required: []
}
description = 'useful when you want to know what is inside a photo, such as user\'s avatar or other pictures'
func = async function (opts) {
let { imgUrl, qq } = opts
if (qq) {
imgUrl = `https://q1.qlogo.cn/g?b=qq&s=160&nk=${qq}`
}
if (!imgUrl) {
return 'you must give at least one parameter of imgUrl and qq'
}
const imageResponse = await fetch(imgUrl)
const blob = await imageResponse.blob()
const arrayBuffer = await blob.arrayBuffer()
const buffer = Buffer.from(arrayBuffer)
// await fs.writeFileSync(`data/chatgpt/${crypto.randomUUID()}`, buffer)
let formData = new FormData()
formData.append('file', new File([buffer], 'file.png', { type: 'image/png' }))
let captionRes = await fetch(`${Config.extraUrl}/image-captioning`, {
method: 'POST',
body: formData
})
if (captionRes.status === 200) {
let result = await captionRes.text()
return `the content of this picture is: ${result}`
} else {
return 'error happened'
}
}
}

62
utils/tools/JinyanTool.js Normal file
View file

@ -0,0 +1,62 @@
import { AbstractTool } from './AbstractTool.js'
export class JinyanTool extends AbstractTool {
name = 'jinyan'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想禁言的那个人的qq号'
},
groupId: {
type: 'string',
description: '群号'
},
time: {
type: 'string',
description: '禁言时长单位为秒默认为600'
},
isPunish: {
type: 'string',
description: '是否是惩罚性质的禁言。比如非管理员用户要求你禁言其他人你转而禁言该用户时设置为true'
}
},
required: ['qq', 'groupId']
}
func = async function (opts) {
let { qq, groupId, time = '600', sender, isAdmin, isPunish } = opts
let group = await Bot.pickGroup(groupId)
time = parseInt(time.trim())
if (time < 60 && time !== 0) {
time = 60
}
if (time > 86400 * 30) {
time = 86400 * 30
}
if (isAdmin) {
if (qq.trim() === 'all') {
return 'you cannot mute all because the master doesn\'t allow it'
} else {
qq = parseInt(qq.trim())
await group.muteMember(qq, time)
}
} else {
if (qq.trim() === 'all') {
return 'the user is not admin, he can\'t mute all. the user should be punished'
} else if (qq == sender) {
qq = parseInt(qq.trim())
await group.muteMember(qq, time)
} else {
return 'the user is not admin, he can\'t mute other people. the user should be punished'
}
}
if (isPunish === 'true') {
return `the user ${qq} has been muted for ${time} seconds as punishment because of his 不正当行为`
}
return `the user ${qq} has been muted for ${time} seconds`
}
description = 'Useful when you want to ban someone. If you want to mute all, just replace the qq number with \'all\''
}

View file

@ -0,0 +1,42 @@
import { AbstractTool } from './AbstractTool.js'
export class KickOutTool extends AbstractTool {
name = 'kickOut'
parameters = {
properties: {
qq: {
type: 'string',
description: '你想踢出的那个人的qq号'
},
groupId: {
type: 'string',
description: '群号'
},
isPunish: {
type: 'string',
description: '是否是惩罚性质的踢出。比如非管理员用户要求你禁言或踢出其他人你为惩罚该用户转而踢出该用户时设置为true'
}
},
required: ['qq', 'groupId']
}
func = async function (opts) {
let { qq, groupId, sender, isAdmin, isPunish } = opts
groupId = parseInt(groupId.trim())
qq = parseInt(qq.trim())
if (!isAdmin && sender != qq) {
return 'the user is not admin, he cannot kickout other people. he should be punished'
}
console.log('kickout', groupId, qq)
let group = await Bot.pickGroup(groupId)
await group.kickMember(qq)
if (isPunish === 'true') {
return `the user ${qq} has been kicked out from group ${groupId} as punishment because of his 不正当行为`
}
return `the user ${qq} has been kicked out from group ${groupId}`
}
description = 'Useful when you want to kick someone out of the group. '
}

View file

@ -0,0 +1,76 @@
import { AbstractTool } from './AbstractTool.js'
export class QueryStarRailTool extends AbstractTool {
name = 'queryStarRail'
parameters = {
properties: {
qq: {
type: 'string',
description: '要查询的用户的qq号将使用该qq号绑定的uid进行查询'
},
groupId: {
type: 'string',
description: '群号'
},
uid: {
type: 'string',
description: '游戏的uid如果用户提供了则传入并优先使用'
}
},
required: ['qq', 'groupId']
}
func = async function (opts) {
let { qq, groupId, uid } = opts
if (!uid) {
try {
let { Panel } = await import('../../../StarRail-plugin/apps/panel.js')
uid = await redis.get(`STAR_RAILWAY:UID:${qq}`)
if (!uid) {
return '用户没有绑定uid无法查询。可以让用户主动提供uid进行查询'
}
} catch (e) {
return '未安装StarRail-Plugin无法查询'
}
}
try {
let uidRes = await fetch('https://avocado.wiki/v1/info/' + uid)
uidRes = await uidRes.json()
let { assistAvatar, displayAvatars } = uidRes.playerDetailInfo
function dealAvatar (avatar) {
delete avatar.position
delete avatar.vo_tag
delete avatar.desc
delete avatar.promption
delete avatar.relics
delete avatar.behaviorList
delete avatar.images
delete avatar.ranks
if (avatar.equipment) {
avatar.equipment = {
level: avatar.equipment.level,
rank: avatar.equipment.rank,
name: avatar.equipment.name,
skill_desc: avatar.equipment.skill_desc
}
}
}
dealAvatar(assistAvatar)
if (displayAvatars) {
displayAvatars.forEach(avatar => {
dealAvatar(avatar)
})
}
uidRes.playerDetailInfo.assistAvatar = assistAvatar
uidRes.playerDetailInfo.displayAvatars = displayAvatars
delete uidRes.repository
delete uidRes.version
return `the player info in json format is: \n${JSON.stringify(uidRes)}`
} catch (err) {
return `failed to query, error: ${err.toString()}`
}
}
description = 'Useful when you want to query player information of Honkai Star Rail(崩坏:星穹铁道). '
}

View file

@ -0,0 +1,76 @@
import fetch from 'node-fetch'
import { formatDate, mkdirs } from '../common.js'
import fs from 'fs'
import { AbstractTool } from './AbstractTool.js'
export class SearchVideoTool extends AbstractTool {
name = 'searchVideo'
parameters = {
properties: {
keyword: {
type: 'string',
description: '要搜索的视频的标题或关键词'
}
},
required: ['keyword']
}
func = async function (opts) {
let { keyword } = opts
try {
return await searchBilibili(keyword)
} catch (err) {
logger.error(err)
return `fail to search video, error: ${err.toString()}`
}
}
description = 'Useful when you want to search a video by keywords. you should remember the id of the video if you want to share it'
}
export async function searchBilibili (name) {
let biliRes = await fetch('https://www.bilibili.com',
{
// headers: {
// accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
// Accept: '*/*',
// 'Accept-Encoding': 'gzip, deflate, br',
// 'accept-language': 'en-US,en;q=0.9',
// Connection: 'keep-alive',
// 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
// }
})
const headers = biliRes.headers.raw()
const setCookieHeaders = headers['set-cookie']
if (setCookieHeaders) {
const cookies = []
setCookieHeaders.forEach(header => {
const cookie = header.split(';')[0]
cookies.push(cookie)
})
const cookieHeader = cookies.join('; ')
let headers = {
accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'accept-language': 'en-US,en;q=0.9',
Referer: 'https://www.bilibili.com',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
cookie: cookieHeader
}
let response = await fetch(`https://api.bilibili.com/x/web-interface/search/type?keyword=${name}&search_type=video`,
{
headers
})
let json = await response.json()
if (json.data?.numResults > 0) {
let result = json.data.result.map(r => {
return `id: ${r.bvid},标题:${r.title},作者:${r.author},播放量:${r.play},发布日期:${formatDate(new Date(r.pubdate * 1000))}`
}).slice(0, Math.min(json.data?.numResults, 5)).join('\n')
return `这些是关键词“${name}”的搜索结果:\n${result}`
} else {
return `没有找到关键词“${name}”的搜索结果`
}
}
return {}
}

View file

@ -0,0 +1,30 @@
import { AbstractTool } from './AbstractTool.js'
export class SerpImageTool extends AbstractTool {
name = 'searchImage'
parameters = {
properties: {
q: {
type: 'string',
description: 'search keyword'
}
},
required: ['q']
}
func = async function (opts) {
let { q } = opts
let serpRes = await fetch(`https://serp.ikechan8370.com/image/bing?q=${encodeURIComponent(q)}`, {
headers: {
'X-From-Library': 'ikechan8370'
}
})
serpRes = await serpRes.json()
let res = serpRes.data
return `the images search results are here in json format:\n${JSON.stringify(res)}. the murl field is real picture url. You should use sendPicture to send them`
}
description = 'Useful when you want to search images from the internet. '
}

View file

@ -0,0 +1,39 @@
import fetch from 'node-fetch'
import { AbstractTool } from './AbstractTool.js'
export class SearchMusicTool extends AbstractTool {
name = 'searchMusic'
parameters = {
properties: {
keyword: {
type: 'string',
description: '音乐的标题或关键词'
}
},
required: ['keyword']
}
func = async function (opts) {
let { keyword } = opts
try {
let result = await searchMusic163(keyword)
return `search result: ${result}`
} catch (e) {
return `music search failed: ${e}`
}
}
description = 'Useful when you want to search music by keyword.'
}
export async function searchMusic163 (name) {
let response = await fetch(`http://music.163.com/api/search/get/web?s=${name}&type=1&offset=0&total=true&limit=6`)
let json = await response.json()
if (json.result?.songCount > 0) {
return json.result.songs.map(song => {
return `id: ${song.id}, name: ${song.name}, artists: ${song.artists.map(a => a.name).join('&')}, alias: ${song.alias || 'none'}`
}).join('\n')
}
return null
}

View file

@ -0,0 +1,33 @@
import { AbstractTool } from './AbstractTool.js'
export class SendAvatarTool extends AbstractTool {
name = 'sendAvatar'
parameters = {
properties: {
qq: {
type: 'string',
description: '要发头像的人的qq号'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['qq', 'groupId']
}
func = async function (opts) {
let { qq, groupId } = opts
let groupList = await Bot.getGroupList()
groupId = parseInt(groupId.trim())
console.log('sendAvatar', groupId, qq)
if (groupList.get(groupId)) {
let group = await Bot.pickGroup(groupId)
await group.sendMsg(segment.image('https://q1.qlogo.cn/g?b=qq&s=0&nk=' + qq))
}
return `the user ${qq}'s avatar has been sent to group ${groupId}`
}
description = 'Useful when you want to send the user avatar picture to the group. The input to this tool should be the user\'s qq number and the target group number, and they should be concated with a space. 如果是在群聊中,优先选择群号发送。'
}

View file

@ -0,0 +1,136 @@
import fetch from 'node-fetch'
import { formatDate, mkdirs } from '../common.js'
import fs from 'fs'
import { AbstractTool } from './AbstractTool.js'
export class SendVideoTool extends AbstractTool {
name = 'sendVideo'
parameters = {
properties: {
id: {
type: 'string',
description: '要发的视频的id'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['id', 'groupId']
}
func = async function (opts) {
let { id, groupId } = opts
groupId = parseInt(groupId.trim())
let msg = []
try {
let { arcurl, title, pic, description, videoUrl, headers, bvid, author, play, pubdate, like, honor } = await getBilibili(id)
let group = await Bot.pickGroup(groupId)
msg.push(title.replace(/(<([^>]+)>)/ig, '') + '\n')
msg.push(`UP主${author} 发布日期:${formatDate(new Date(pubdate * 1000))} 播放量:${play} 点赞:${like}\n`)
msg.push(arcurl + '\n')
msg.push(segment.image(pic))
msg.push('\n' + description)
if (honor) {
msg.push(`本视频曾获得过${honor}称号`)
}
msg.push('\n视频在路上啦')
await group.sendMsg(msg)
const videoResponse = await fetch(videoUrl, { headers })
const fileType = videoResponse.headers.get('Content-Type').split('/')[1]
let fileLoc = `data/chatgpt/videos/${bvid}.${fileType}`
mkdirs('data/chatgpt/videos')
videoResponse.blob().then(async blob => {
const arrayBuffer = await blob.arrayBuffer()
const buffer = Buffer.from(arrayBuffer)
await fs.writeFileSync(fileLoc, buffer)
await group.sendMsg(segment.video(fileLoc))
})
return `the video ${title.replace(/(<([^>]+)>)/ig, '')} was shared to ${groupId}. the video information: ${msg}`
} catch (err) {
logger.error(err)
if (msg.length > 0) {
return `fail to share video, but the video msg is found: ${msg}, you can just tell the information of this video`
} else {
return `fail to share video, error: ${err.toString()}`
}
}
}
description = 'Useful when you want to share a video. You must use searchVideo to get search result and choose one video and get its id'
}
export async function getBilibili (bvid) {
let biliRes = await fetch('https://www.bilibili.com',
{
// headers: {
// accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
// Accept: '*/*',
// 'Accept-Encoding': 'gzip, deflate, br',
// 'accept-language': 'en-US,en;q=0.9',
// Connection: 'keep-alive',
// 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
// }
})
const headers = biliRes.headers.raw()
const setCookieHeaders = headers['set-cookie']
if (setCookieHeaders) {
const cookies = []
setCookieHeaders.forEach(header => {
const cookie = header.split(';')[0]
cookies.push(cookie)
})
const cookieHeader = cookies.join('; ')
let headers = {
accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'accept-language': 'en-US,en;q=0.9',
Referer: 'https://www.bilibili.com',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
cookie: cookieHeader
}
let videoInfo = await fetch(`https://api.bilibili.com/x/web-interface/view?bvid=${bvid}`, {
headers
})
videoInfo = await videoInfo.json()
let cid = videoInfo.data.cid
let arcurl = `http://www.bilibili.com/video/av${videoInfo.data.aid}`
let title = videoInfo.data.title
let pic = videoInfo.data.pic
let description = videoInfo.data.desc
let author = videoInfo.data.owner.name
let play = videoInfo.data.stat.view
let pubdate = videoInfo.data.pubdate
let like = videoInfo.data.stat.like
let honor = videoInfo.data.honor_reply?.honor?.map(h => h.desc)?.join('、')
let downloadInfo = await fetch(`https://api.bilibili.com/x/player/playurl?bvid=${bvid}&cid=${cid}`, {headers})
let videoUrl = (await downloadInfo.json()).data.durl[0].url
return {
arcurl, title, pic, description, videoUrl, headers, bvid, author, play, pubdate, like, honor
}
} else {
return {}
}
}
function randomIndex () {
// Define weights for each index
const weights = [5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1]
// Compute the total weight
const totalWeight = weights.reduce((sum, weight) => sum + weight, 0)
// Generate a random number between 0 and the total weight
const randomNumber = Math.floor(Math.random() * totalWeight)
// Choose the index based on the random number and weights
let weightSum = 0
for (let i = 0; i < weights.length; i++) {
weightSum += weights[i]
if (randomNumber < weightSum) {
return i
}
}
}
console.log('send bilibili')

View file

@ -0,0 +1,35 @@
import {AbstractTool} from "./AbstractTool.js";
export class SendDiceTool extends AbstractTool {
name = 'sendDice'
parameters = {
properties: {
num: {
type: 'number',
description: '骰子的数量'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['num', 'groupId']
}
func = async function (opts) {
let {num, groupId} = opts
let groupList = await Bot.getGroupList()
if (groupList.get(groupId)) {
let group = await Bot.pickGroup(groupId, true)
await group.sendMsg(segment.dice(num))
} else {
let friend = await Bot.pickFriend(groupId)
await friend.sendMsg(segment.dice(num))
}
return `the dice has been sent`
}
description = 'If you want to roll dice, use this tool. If you know the group number, use the group number instead of the qq number first. The input should be the number of dice to be cast (1-6) and the target group number or qq numberand they should be concat with a space'
}

View file

@ -0,0 +1,33 @@
import { AbstractTool } from './AbstractTool.js'
export class SendMusicTool extends AbstractTool {
name = 'sendMusic'
parameters = {
properties: {
id: {
type: 'string',
description: '音乐的id'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['keyword', 'groupId']
}
func = async function (opts) {
let { id, groupId } = opts
groupId = parseInt(groupId.trim())
try {
let group = await Bot.pickGroup(groupId)
await group.shareMusic('163', id)
return `the music has been shared to ${groupId}`
} catch (e) {
return `music share failed: ${e}`
}
}
description = 'Useful when you want to share music. You must use searchMusic first to get the music id'
}

View file

@ -0,0 +1,50 @@
import { AbstractTool } from './AbstractTool.js'
export class SendPictureTool extends AbstractTool {
name = 'sendPicture'
parameters = {
properties: {
picture: {
type: 'string',
description: 'the url of the pictures, split with space if more than one.'
},
qq: {
type: 'string',
description: 'if you want to send avatar of a user, input his qq number.'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
}
},
required: ['picture', 'groupId']
}
func = async function (opt) {
let { picture, groupId, qq } = opt
if (qq) {
let avatar = `https://q1.qlogo.cn/g?b=qq&s=0&nk=${qq}`
picture += ' ' + avatar
}
let pictures = picture.trim().split(' ')
pictures = pictures.map(img => segment.image(img))
let groupList = await Bot.getGroupList()
groupId = parseInt(groupId)
try {
if (groupList.get(groupId)) {
let group = await Bot.pickGroup(groupId)
await group.sendMsg(pictures)
return `picture has been sent to group ${groupId}`
} else {
let user = await Bot.pickFriend(groupId)
await user.sendMsg(pictures)
return `picture has been sent to user ${groupId}`
}
} catch (err) {
return `failed to send pictures, error: ${JSON.stringify(err)}`
}
}
description = 'Useful when you want to send one or more pictures. '
}

View file

@ -0,0 +1,30 @@
import {AbstractTool} from "./AbstractTool.js";
export class SendRPSTool extends AbstractTool {
name = 'sendRPS'
parameters = {
num: {
type: 'number',
description: '石头剪刀布的代号'
},
groupId: {
type: 'string',
description: '群号或qq号发送目标'
},
required: ['num', 'groupId']
}
func = async function (num, groupId) {
let groupList = await Bot.getGroupList()
if (groupList.get(groupId)) {
let group = await Bot.pickGroup(groupId, true)
await group.sendMsg(segment.rps(num))
} else {
let friend = await Bot.pickFriend(groupId)
await friend.sendMsg(segment.rps(num))
}
}
description = 'Use this tool if you want to play rock paper scissors. If you know the group number, use the group number instead of the qq number first. The input should be the number 1, 2 or 3 to represent rock-paper-scissors and the target group number or qq numberand they should be concat with a space'
}

View file

@ -0,0 +1,37 @@
import { AbstractTool } from './AbstractTool.js'
export class SerpIkechan8370Tool extends AbstractTool {
name = 'search'
parameters = {
properties: {
q: {
type: 'string',
description: 'search keyword'
},
source: {
type: 'string',
enum: ['google', 'bing', 'baidu']
}
},
required: ['q']
}
func = async function (opts) {
let { q, source } = opts
if (!source) {
source = 'google'
}
let serpRes = await fetch(`https://serp.ikechan8370.com/${source}?q=${encodeURIComponent(q)}&lang=zh-CN&limit=10`, {
headers: {
'X-From-Library': 'ikechan8370'
}
})
serpRes = await serpRes.json()
let res = serpRes.data
return `the search results are here in json format:\n${JSON.stringify(res)}`
}
description = 'Useful when you want to search something from the internet. If you don\'t know much about the user\'s question, just search about it! If you want to know details of a result, you can use website tool'
}

40
utils/tools/SerpTool.js Normal file
View file

@ -0,0 +1,40 @@
import { AbstractTool } from './AbstractTool.js'
import { Config } from '../config.js'
export class SerpTool extends AbstractTool {
name = 'serp'
parameters = {
properties: {
q: {
type: 'string',
description: 'search keyword'
}
},
required: ['q']
}
func = async function (opts) {
let { q } = opts
let key = Config.azSerpKey
let serpRes = await fetch(`https://api.bing.microsoft.com/v7.0/search?q=${encodeURIComponent(q)}&mkt=zh-CN`, {
headers: {
'Ocp-Apim-Subscription-Key': key
}
})
serpRes = await serpRes.json()
let res = serpRes.webPages.value
res.forEach(p => {
delete p.displayUrl
delete p.isFamilyFriendly
delete p.thumbnailUrl
delete p.id
delete p.isNavigational
})
return `the search results are here in json format:\n${JSON.stringify(res)}`
}
description = 'Useful when you want to search something from the internet. If you don\'t know much about the user\'s question, just search about it! If you want to know details of a result, you can use website tool'
}

View file

@ -0,0 +1,35 @@
import { AbstractTool } from './AbstractTool.js'
import {Config} from "../config.js";
export class WeatherTool extends AbstractTool {
name = 'weather'
parameters = {
properties: {
city: {
type: 'string',
description: '要查询的地点,细化到县/区级'
}
},
required: ['city']
}
func = async function (opts) {
let { city } = opts
let key = Config.amapKey
let adcodeRes = await fetch(`https://restapi.amap.com/v3/config/district?keywords=${city}&subdistrict=1&key=${key}`)
adcodeRes = await adcodeRes.json()
let adcode = adcodeRes.districts[0]?.adcode
if (!adcode) {
return `the area ${city} doesn't exist! are you kidding? you should mute him for 1 minute`
}
let cityName = adcodeRes.districts[0].name
let res = await fetch(`https://restapi.amap.com/v3/weather/weatherInfo?city=${adcode}&key=${key}`)
res = await res.json()
let result = res.lives[0]
return `the weather information of area ${cityName} in json format is:\n${JSON.stringify(result)}`
}
description = 'Useful when you want to query weather '
}

View file

@ -0,0 +1,86 @@
import { AbstractTool } from './AbstractTool.js'
import { ChatGPTAPI } from '../openai/chatgpt-api.js'
import { Config } from '../config.js'
import fetch from 'node-fetch'
import proxy from 'https-proxy-agent'
import { getMaxModelTokens } from '../common.js'
import { ChatGPTPuppeteer } from '../browser.js'
export class WebsiteTool extends AbstractTool {
name = 'website'
parameters = {
properties: {
url: {
type: 'string',
description: '要访问的网站网址'
}
},
required: ['url']
}
func = async function (opts) {
let { url } = opts
try {
// let res = await fetch(url, {
// headers: {
// 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
// }
// })
// let text = await res.text()
let origin = false
if (!Config.headless) {
Config.headless = true
origin = true
}
let ppt = new ChatGPTPuppeteer()
let browser = await ppt.getBrowser()
let page = await browser.newPage()
await page.goto(url, {
waitUntil: 'networkidle2'
})
let text = await page.content()
await page.close()
if (origin) {
Config.headless = false
}
// text = text.replace(/<style\b[^<]*(?:(?!<\/style>)<[^<]*)*<\/style>/gi, '')
// .replace(/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi, '')
// .replace(/<head\b[^<]*(?:(?!<\/head>)<[^<]*)*<\/head>/gi, '')
// .replace(/<!--[\s\S]*?-->/gi, '')
text = text.replace(/<style\b[^<]*(?:(?!<\/style>)<[^<]*)*<\/style>/gi, '') // 移除<style>标签及其内容
.replace(/<[^>]+style\s*=\s*(["'])(?:(?!\1).)*\1[^>]*>/gi, '') // 移除带有style属性的标签
.replace(/<[^>]+>/g, '')
let maxModelTokens = getMaxModelTokens(Config.model)
text = text.slice(0, Math.min(text.length, maxModelTokens - 1600))
let api = new ChatGPTAPI({
apiBaseUrl: Config.openAiBaseUrl,
apiKey: Config.apiKey,
debug: false,
completionParams: {
model: Config.model
},
fetch: (url, options = {}) => {
const defaultOptions = Config.proxy
? {
agent: proxy(Config.proxy)
}
: {}
const mergedOptions = {
...defaultOptions,
...options
}
return fetch(url, mergedOptions)
},
maxModelTokens
})
const htmlContentSummaryRes = await api.sendMessage(`这是一个网页html经过筛选的内容请你进一步去掉其中的标签、样式、script等无用信息并从中提取出其中的主体内容转换成自然语言告诉我不需要主观描述性的语言。${text}`)
let htmlContentSummary = htmlContentSummaryRes.text
return `this is the main content of website:\n ${htmlContentSummary}`
} catch (err) {
return `failed to visit the website, error: ${err.toString()}`
}
}
description = 'Useful when you want to browse a website by url'
}