mirror of
https://github.com/ikechan8370/chatgpt-plugin.git
synced 2025-12-17 05:47:11 +00:00
feat: 正式增加语音模式支持
This commit is contained in:
parent
41ca23dc85
commit
5ddeebfcc0
6 changed files with 124 additions and 25 deletions
90
apps/chat.js
90
apps/chat.js
|
|
@ -6,7 +6,15 @@ import delay from 'delay'
|
|||
import { ChatGPTAPI } from 'chatgpt'
|
||||
import { BingAIClient } from '@waylaidwanderer/chatgpt-api'
|
||||
import SydneyAIClient from '../utils/SydneyAIClient.js'
|
||||
import { render, getMessageById, makeForwardMsg, tryTimes, upsertMessage, randomString } from '../utils/common.js'
|
||||
import {
|
||||
render,
|
||||
getMessageById,
|
||||
makeForwardMsg,
|
||||
tryTimes,
|
||||
upsertMessage,
|
||||
randomString,
|
||||
getDefaultUserSetting
|
||||
} from '../utils/common.js'
|
||||
import { ChatGPTPuppeteer } from '../utils/browser.js'
|
||||
import { KeyvFile } from 'keyv-file'
|
||||
import { OfficialChatGPTClient } from '../utils/message.js'
|
||||
|
|
@ -96,6 +104,14 @@ export class chatgpt extends plugin {
|
|||
reg: '^#chatgpt文本模式$',
|
||||
fnc: 'switch2Text'
|
||||
},
|
||||
{
|
||||
reg: '^#chatgpt语音模式$',
|
||||
fnc: 'switch2Audio'
|
||||
},
|
||||
{
|
||||
reg: '^#chatgpt设置语音角色',
|
||||
fnc: 'setDefaultRole'
|
||||
},
|
||||
{
|
||||
reg: '^#(chatgpt)清空(chat)?队列$',
|
||||
fnc: 'emptyQueue',
|
||||
|
|
@ -175,7 +191,7 @@ export class chatgpt extends plugin {
|
|||
await this.reply('依赖keyv未安装,请执行pnpm install keyv', true)
|
||||
}
|
||||
const conversationsCache = new Keyv(conversation)
|
||||
console.log(`SydneyUser_${e.sender.user_id}`, await conversationsCache.get(`SydneyUser_${e.sender.user_id}`))
|
||||
logger.info(`SydneyUser_${e.sender.user_id}`, await conversationsCache.get(`SydneyUser_${e.sender.user_id}`))
|
||||
await conversationsCache.delete(`SydneyUser_${e.sender.user_id}`)
|
||||
await this.reply('已退出当前对话,该对话仍然保留。请@我进行聊天以开启新的对话', true)
|
||||
} else {
|
||||
|
|
@ -313,11 +329,12 @@ export class chatgpt extends plugin {
|
|||
async switch2Picture (e) {
|
||||
let userSetting = await redis.get(`CHATGPT:USER:${e.sender.user_id}`)
|
||||
if (!userSetting) {
|
||||
userSetting = { usePicture: true }
|
||||
userSetting = getDefaultUserSetting()
|
||||
} else {
|
||||
userSetting = JSON.parse(userSetting)
|
||||
}
|
||||
userSetting.usePicture = true
|
||||
userSetting.useTTS = false
|
||||
await redis.set(`CHATGPT:USER:${e.sender.user_id}`, JSON.stringify(userSetting))
|
||||
await this.reply('ChatGPT回复已转换为图片模式')
|
||||
}
|
||||
|
|
@ -325,15 +342,49 @@ export class chatgpt extends plugin {
|
|||
async switch2Text (e) {
|
||||
let userSetting = await redis.get(`CHATGPT:USER:${e.sender.user_id}`)
|
||||
if (!userSetting) {
|
||||
userSetting = { usePicture: false }
|
||||
userSetting = getDefaultUserSetting()
|
||||
} else {
|
||||
userSetting = JSON.parse(userSetting)
|
||||
}
|
||||
userSetting.usePicture = false
|
||||
userSetting.useTTS = false
|
||||
await redis.set(`CHATGPT:USER:${e.sender.user_id}`, JSON.stringify(userSetting))
|
||||
await this.reply('ChatGPT回复已转换为文字模式')
|
||||
}
|
||||
|
||||
async switch2Audio (e) {
|
||||
if (!Config.ttsSpace) {
|
||||
await this.reply('您没有配置VITS API,请前往锅巴面板进行配置')
|
||||
return
|
||||
}
|
||||
let userSetting = await redis.get(`CHATGPT:USER:${e.sender.user_id}`)
|
||||
if (!userSetting) {
|
||||
userSetting = getDefaultUserSetting()
|
||||
} else {
|
||||
userSetting = JSON.parse(userSetting)
|
||||
}
|
||||
userSetting.useTTS = true
|
||||
await redis.set(`CHATGPT:USER:${e.sender.user_id}`, JSON.stringify(userSetting))
|
||||
await this.reply('ChatGPT回复已转换为语音模式')
|
||||
}
|
||||
|
||||
async setDefaultRole (e) {
|
||||
if (!Config.ttsSpace) {
|
||||
await this.reply('您没有配置VITS API,请前往锅巴面板进行配置')
|
||||
return
|
||||
}
|
||||
let userSetting = await redis.get(`CHATGPT:USER:${e.sender.user_id}`)
|
||||
if (!userSetting) {
|
||||
userSetting = getDefaultUserSetting()
|
||||
} else {
|
||||
userSetting = JSON.parse(userSetting)
|
||||
}
|
||||
let speaker = _.trimStart(e.msg, '#chatgpt设置语音角色') || '随机'
|
||||
userSetting.ttsRole = convertSpeaker(speaker)
|
||||
await redis.set(`CHATGPT:USER:${e.sender.user_id}`, JSON.stringify(userSetting))
|
||||
await this.reply(`您的默认语音角色已被设置为”${userSetting.ttsRole}“`)
|
||||
}
|
||||
|
||||
/**
|
||||
* #chatgpt
|
||||
* @param e oicq传递的事件参数e
|
||||
|
|
@ -354,8 +405,18 @@ export class chatgpt extends plugin {
|
|||
return false
|
||||
}
|
||||
}
|
||||
let useTTS = false
|
||||
let speaker = ''
|
||||
let userSetting = await redis.get(`CHATGPT:USER:${e.sender.user_id}`)
|
||||
if (userSetting) {
|
||||
userSetting = JSON.parse(userSetting)
|
||||
if (Object.keys(userSetting).indexOf('useTTS') < 0) {
|
||||
userSetting.useTTS = Config.defaultUseTTS
|
||||
}
|
||||
} else {
|
||||
userSetting = getDefaultUserSetting()
|
||||
}
|
||||
let useTTS = !!userSetting.useTTS
|
||||
let speaker = convertSpeaker(userSetting.ttsRole || Config.defaultTTSRole)
|
||||
// 每个回答可以指定
|
||||
let trySplit = prompt.split('回答:')
|
||||
if (trySplit.length > 1 && speakers.indexOf(convertSpeaker(trySplit[0])) > -1) {
|
||||
useTTS = true
|
||||
|
|
@ -445,11 +506,6 @@ export class chatgpt extends plugin {
|
|||
}
|
||||
}
|
||||
logger.info(`chatgpt prompt: ${prompt}`)
|
||||
// try {
|
||||
// await this.chatGPTApi.init()
|
||||
// } catch (e) {
|
||||
// await this.reply('chatgpt初始化出错:' + e.msg, true)
|
||||
// }
|
||||
let previousConversation
|
||||
let conversation = {}
|
||||
if (use === 'api3') {
|
||||
|
|
@ -537,14 +593,7 @@ export class chatgpt extends plugin {
|
|||
await this.reply('返回内容存在敏感词,我不想回答你', true)
|
||||
return false
|
||||
}
|
||||
let userSetting = await redis.get(`CHATGPT:USER:${e.sender.user_id}`)
|
||||
if (userSetting) {
|
||||
userSetting = JSON.parse(userSetting)
|
||||
} else {
|
||||
userSetting = {
|
||||
usePicture: Config.defaultUsePicture
|
||||
}
|
||||
}
|
||||
|
||||
let quotemessage = []
|
||||
if (chatMessage?.quote) {
|
||||
chatMessage.quote.forEach(function (item, index) {
|
||||
|
|
@ -554,7 +603,7 @@ export class chatgpt extends plugin {
|
|||
})
|
||||
}
|
||||
if (useTTS) {
|
||||
if (Config.ttsSpace && response.length <= 99) {
|
||||
if (Config.ttsSpace && response.length <= 299) {
|
||||
let wav = await generateAudio(response, speaker, '中文')
|
||||
e.reply(segment.record(wav))
|
||||
} else {
|
||||
|
|
@ -885,7 +934,6 @@ export class chatgpt extends plugin {
|
|||
.then(response => response.json())
|
||||
.then(data => {
|
||||
if (data.error) {
|
||||
// console.log(data.error)
|
||||
this.reply('获取失败:' + data.error.code)
|
||||
return false
|
||||
} else {
|
||||
|
|
|
|||
10
apps/help.js
10
apps/help.js
|
|
@ -26,6 +26,16 @@ let helpData = [
|
|||
title: '#chatgpt文本模式',
|
||||
desc: '机器人以文本形式回答,默认选项'
|
||||
},
|
||||
{
|
||||
icon: 'text',
|
||||
title: '#chatgpt语音模式',
|
||||
desc: '机器人以语音形式回答'
|
||||
},
|
||||
{
|
||||
icon: 'text',
|
||||
title: '#chatgpt设置语音角色',
|
||||
desc: '设置语音模式下回复的角色音色'
|
||||
},
|
||||
{
|
||||
icon: 'text',
|
||||
title: '#chatgpt画图+prompt(/张数/图片大小)',
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue