mirror of
https://github.com/ikechan8370/chatgpt-plugin.git
synced 2025-12-16 13:27:08 +00:00
Api3 (#149)
* feat: api3 WIP * feat: add support for proxy of openai * fix: if no proxy
This commit is contained in:
parent
ab021d0caa
commit
62c8d56e76
4 changed files with 115 additions and 4 deletions
13
apps/chat.js
13
apps/chat.js
|
|
@ -10,6 +10,7 @@ import { ChatGPTClient, BingAIClient } from '@waylaidwanderer/chatgpt-api'
|
|||
import { getMessageById, tryTimes, upsertMessage } from '../utils/common.js'
|
||||
import { ChatGPTPuppeteer } from '../utils/browser.js'
|
||||
import { KeyvFile } from 'keyv-file'
|
||||
import {OfficialChatGPTClient} from "../utils/message.js";
|
||||
// import puppeteer from '../utils/browser.js'
|
||||
// import showdownKatex from 'showdown-katex'
|
||||
const blockWords = Config.blockWords
|
||||
|
|
@ -402,6 +403,18 @@ export class chatgpt extends plugin {
|
|||
invocationId: response.invocationId,
|
||||
conversationSignature: response.conversationSignature
|
||||
}
|
||||
} else if (use === 'api3') {
|
||||
// official without cloudflare
|
||||
let accessToken = await redis.get('CHATGPT:TOKEN')
|
||||
if (!accessToken) {
|
||||
throw new Error('未绑定ChatGPT AccessToken,请使用#chatgpt设置token命令绑定token')
|
||||
}
|
||||
this.chatGPTApi = new OfficialChatGPTClient({
|
||||
accessToken,
|
||||
apiReverseUrl: Config.api,
|
||||
timeoutMs: 120000
|
||||
})
|
||||
return await this.chatGPTApi.sendMessage(prompt, conversation)
|
||||
} else {
|
||||
let completionParams = {}
|
||||
if (Config.model) {
|
||||
|
|
|
|||
|
|
@ -43,6 +43,11 @@ export class ChatgptManagement extends plugin {
|
|||
fnc: 'useReversedAPIBasedSolution',
|
||||
permission: 'master'
|
||||
},
|
||||
{
|
||||
reg: '^#chatgpt切换API3$',
|
||||
fnc: 'useReversedAPIBasedSolution2',
|
||||
permission: 'master'
|
||||
},
|
||||
{
|
||||
reg: '^#chatgpt切换(必应|Bing)$',
|
||||
fnc: 'useReversedBingSolution',
|
||||
|
|
@ -118,7 +123,12 @@ export class ChatgptManagement extends plugin {
|
|||
|
||||
async useReversedAPIBasedSolution (e) {
|
||||
await redis.set('CHATGPT:USE', 'apiReverse')
|
||||
await this.reply('已切换到基于第三方Reversed API的解决方案,如果已经对话过建议执行`#结束对话`避免引起404错误')
|
||||
await this.reply('已切换到基于第三方Reversed CompletionAPI的解决方案,如果已经对话过建议执行`#结束对话`避免引起404错误')
|
||||
}
|
||||
|
||||
async useReversedAPIBasedSolution2 (e) {
|
||||
await redis.set('CHATGPT:USE', 'api3')
|
||||
await this.reply('已切换到基于第三方Reversed ConversastionAPI的解决方案,如果已经对话过建议执行`#结束对话`避免引起404错误')
|
||||
}
|
||||
|
||||
async useReversedBingSolution (e) {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
// 例如http://127.0.0.1:7890
|
||||
const PROXY = ''
|
||||
const PROXY = 'http://127.0.0.1:7890'
|
||||
const API_KEY = ''
|
||||
|
||||
export const Config = {
|
||||
|
|
@ -20,8 +20,12 @@ export const Config = {
|
|||
apiKey: API_KEY,
|
||||
// 模型名称,选填。如无特殊需求保持默认即可,会使用chatgpt-api库提供的当前可用的最适合的默认值。保底可用的是 text-davinci-003。当发现新的可用的chatGPT模型会更新这里的值
|
||||
// 20230211: text-chat-davinci-002-sh-alpha-aoruigiofdj83 中午存活了几分钟
|
||||
model: 'text-davinci-002-render',
|
||||
api: '',
|
||||
model: '',
|
||||
// ***********************************************************************************************************************************
|
||||
// 以下为API3方式的配置 *
|
||||
// ***********************************************************************************************************************************
|
||||
// from https://github.com/acheong08/ChatGPT
|
||||
api: 'https://chat.duti.tech/api/conversation',
|
||||
// ***********************************************************************************************************************************
|
||||
// 以下为API2方式的配置 *
|
||||
// ***********************************************************************************************************************************
|
||||
|
|
|
|||
84
utils/message.js
Normal file
84
utils/message.js
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { Config } from '../config/index.js'
|
||||
import HttpsProxyAgent from 'https-proxy-agent'
|
||||
import _ from 'lodash'
|
||||
import fetch from 'node-fetch'
|
||||
export class OfficialChatGPTClient {
|
||||
constructor (opts = {}) {
|
||||
const {
|
||||
accessToken,
|
||||
apiReverseUrl,
|
||||
timeoutMs
|
||||
} = opts
|
||||
this._accessToken = accessToken
|
||||
this._apiReverseUrl = apiReverseUrl
|
||||
this._timeoutMs = timeoutMs
|
||||
}
|
||||
|
||||
async sendMessage (prompt, opts = {}) {
|
||||
let {
|
||||
timeoutMs = this._timeoutMs,
|
||||
conversationId = uuidv4(),
|
||||
parentMessageId = uuidv4(),
|
||||
messageId = uuidv4(),
|
||||
action = 'next'
|
||||
} = opts
|
||||
let abortController = null
|
||||
if (timeoutMs) {
|
||||
abortController = new AbortController()
|
||||
}
|
||||
const url = this._apiReverseUrl || 'https://chat.openai.com/backend-api/conversation'
|
||||
const body = {
|
||||
action,
|
||||
messages: [
|
||||
{
|
||||
id: messageId,
|
||||
role: 'user',
|
||||
content: {
|
||||
content_type: 'text',
|
||||
parts: [prompt]
|
||||
}
|
||||
}
|
||||
],
|
||||
conversationId,
|
||||
model: Config.plus ? 'text-davinci-002-render-sha' : 'text-davinci-002-render-sha',
|
||||
parent_message_id: parentMessageId
|
||||
}
|
||||
let option = {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(body),
|
||||
signal: abortController?.signal,
|
||||
headers: {
|
||||
accept: 'text/event-stream',
|
||||
'x-openai-assistant-app-id': '',
|
||||
authorization: `Bearer ${this._accessToken}`,
|
||||
'content-type': 'application/json',
|
||||
referer: 'https://chat.openai.com/chat'
|
||||
},
|
||||
referrer: 'https://chat.openai.com/chat'
|
||||
}
|
||||
if (Config.proxy) {
|
||||
option.agent = new HttpsProxyAgent(Config.proxy)
|
||||
}
|
||||
const res = await fetch(url, option)
|
||||
const decoder = new TextDecoder('utf-8')
|
||||
const bodyBytes = await res.arrayBuffer()
|
||||
const bodyText = decoder.decode(bodyBytes)
|
||||
const events = bodyText.split('\n\n').filter(item => !_.isEmpty(item))
|
||||
let fullResponse = events[events.length - 2]
|
||||
fullResponse = _.trimStart(fullResponse, 'data: ')
|
||||
if (Config.debug) {
|
||||
logger.debug(fullResponse)
|
||||
}
|
||||
fullResponse = JSON.parse(fullResponse)
|
||||
if (!fullResponse.message) {
|
||||
throw new Error(fullResponse.detail || 'unkown error')
|
||||
}
|
||||
return {
|
||||
text: fullResponse.message.content.parts[0],
|
||||
conversationId: fullResponse.conversation_id,
|
||||
id: fullResponse.message.id,
|
||||
parentMessageId
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue