mirror of
https://github.com/ikechan8370/chatgpt-plugin.git
synced 2025-12-17 22:07:10 +00:00
新增:加入高清语音,回复“#chatgpt开启高清语音”来开启
1.新增:加入高清语音,回复“#chatgpt开启高清语音”来开启 2.优化:语音模式下遇到超长文本将会使用图片模式
This commit is contained in:
parent
15f579aab8
commit
3364dbd89d
4 changed files with 73 additions and 14 deletions
|
|
@ -1113,7 +1113,7 @@ export class chatgpt extends plugin {
|
|||
for (let quote of quotemessage) {
|
||||
if (quote.imageLink) imgUrls.push(quote.imageLink)
|
||||
}
|
||||
if (useTTS) {
|
||||
if (useTTS && response.length <= Config.autoUsePictureThreshold {
|
||||
// 缓存数据
|
||||
this.cacheContent(e, use, response, prompt, quotemessage, mood, chatMessage.suggestedResponses, imgUrls)
|
||||
if (response === 'Thanks for this conversation! I\'ve reached my limit, will you hit “New topic,” please?') {
|
||||
|
|
|
|||
|
|
@ -184,6 +184,11 @@ export class ChatgptManagement extends plugin {
|
|||
fnc: 'setDefaultReplySetting',
|
||||
permission: 'master'
|
||||
},
|
||||
{
|
||||
reg: '^#chatgpt(开启|关闭)高清语音',
|
||||
fnc: 'enableTtsHD',
|
||||
permission: 'master'
|
||||
},
|
||||
{
|
||||
/** 命令正则匹配 */
|
||||
reg: '^#(关闭|打开)群聊上下文$',
|
||||
|
|
@ -483,7 +488,14 @@ export class ChatgptManagement extends plugin {
|
|||
await this.reply('设置成功', e.isGroup)
|
||||
return false
|
||||
}
|
||||
|
||||
async enableTtsHD (e) {
|
||||
Config.ttsHD = e.msg.indexOf('开启') > -1
|
||||
if(Config.ttsHD) {
|
||||
await this.reply('已开启高清语音,电脑端将无法播放语音', true)
|
||||
} else {
|
||||
await this.reply('已关闭高清语音', true)
|
||||
}
|
||||
}
|
||||
async enableGroupContext (e) {
|
||||
const reg = /(关闭|打开)/
|
||||
const match = e.msg.match(reg)
|
||||
|
|
|
|||
|
|
@ -114,6 +114,7 @@ const defaultConfig = {
|
|||
cloudMode: 'url',
|
||||
cloudDPR: 1,
|
||||
ttsMode: 'vits-uma-genshin-honkai', // or azure
|
||||
ttsHD: false,
|
||||
azureTTSKey: '',
|
||||
azureTTSRegion: '',
|
||||
azureTTSSpeaker: 'zh-CN-XiaochenNeural',
|
||||
|
|
|
|||
|
|
@ -27,7 +27,9 @@ if (module) {
|
|||
try {
|
||||
pcm2slk = (await import('node-silk')).pcm2slk
|
||||
} catch (e) {
|
||||
if (Config.cloudTranscode) {
|
||||
if (Config.ttsHD) {
|
||||
logger.info('已开启高清语音,电脑端将无法播放语音')
|
||||
} else if (Config.cloudTranscode) {
|
||||
logger.warn('未安装node-silk,将尝试使用云转码服务进行合成')
|
||||
} else {
|
||||
Config.debug && logger.error(e)
|
||||
|
|
@ -49,8 +51,10 @@ async function uploadRecord (recordUrl, ttsMode = 'vits-uma-genshin-honkai') {
|
|||
tmpFile = `data/chatgpt/tts/tmp/${crypto.randomUUID()}.wav`
|
||||
}
|
||||
let result
|
||||
if (pcm2slk) {
|
||||
result = await getPttBuffer(recordUrl, Bot.config.ffmpeg_path)
|
||||
if (Config.ttsHD) {
|
||||
result = await getPttBuffer(recordUrl, Bot.config.ffmpeg_path, false)
|
||||
} else if (pcm2slk) {
|
||||
result = await getPttBuffer(recordUrl, Bot.config.ffmpeg_path, true)
|
||||
} else if (Config.cloudTranscode) {
|
||||
logger.mark('使用云转码silk进行高清语音生成:"')
|
||||
try {
|
||||
|
|
@ -196,21 +200,30 @@ async function uploadRecord (recordUrl, ttsMode = 'vits-uma-genshin-honkai') {
|
|||
|
||||
export default uploadRecord
|
||||
|
||||
async function getPttBuffer (file, ffmpeg = 'ffmpeg') {
|
||||
async function getPttBuffer (file, ffmpeg = 'ffmpeg', transcoding = true) {
|
||||
let buffer
|
||||
let time
|
||||
if (file instanceof Buffer || file.startsWith('base64://')) {
|
||||
// Buffer或base64
|
||||
const buf = file instanceof Buffer ? file : Buffer.from(file.slice(9), 'base64')
|
||||
const head = buf.slice(0, 7).toString()
|
||||
if (head.includes('SILK') || head.includes('AMR')) {
|
||||
return buf
|
||||
} else {
|
||||
const tmpfile = TMP_DIR + '/' + (0, uuid)()
|
||||
if (head.includes('SILK') || head.includes('AMR') || !transcoding) {
|
||||
const tmpfile = path.join(TMP_DIR, (0, uuid)())
|
||||
await fs.promises.writeFile(tmpfile, buf)
|
||||
return audioTrans(tmpfile, ffmpeg)
|
||||
let result = await getAudioTime(tmpfile, ffmpeg)
|
||||
if (result.code == 1) time = result.data
|
||||
fs.unlink(tmpfile, NOOP)
|
||||
buffer = buf
|
||||
} else {
|
||||
const tmpfile = path.join(TMP_DIR, (0, uuid)())
|
||||
let result = await getAudioTime(tmpfile, ffmpeg)
|
||||
if (result.code == 1) time = result.data
|
||||
await fs.promises.writeFile(tmpfile, buf)
|
||||
buffer = await audioTrans(tmpfile, ffmpeg)
|
||||
}
|
||||
} else if (file.startsWith('http://') || file.startsWith('https://')) {
|
||||
// 网络文件
|
||||
// const readable = (await axios.get(file, { responseType: "stream" })).data;
|
||||
try {
|
||||
const headers = {
|
||||
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 12; MI 9 Build/SKQ1.211230.001)'
|
||||
|
|
@ -220,11 +233,14 @@ async function getPttBuffer (file, ffmpeg = 'ffmpeg') {
|
|||
headers
|
||||
})
|
||||
const buf = Buffer.from(await response.arrayBuffer())
|
||||
const tmpfile = TMP_DIR + '/' + (0, uuid)()
|
||||
const tmpfile = path.join(TMP_DIR, (0, uuid)())
|
||||
await fs.promises.writeFile(tmpfile, buf)
|
||||
// await (0, pipeline)(readable.pipe(new DownloadTransform), fs.createWriteStream(tmpfile));
|
||||
const head = await read7Bytes(tmpfile)
|
||||
if (head.includes('SILK') || head.includes('AMR')) {
|
||||
let result = await getAudioTime(tmpfile, ffmpeg)
|
||||
if (result.code == 1) time = result.data
|
||||
if (head.includes('SILK') || head.includes('AMR') || !transcoding) {
|
||||
// const buf = await fs.promises.readFile(tmpfile);
|
||||
fs.unlink(tmpfile, NOOP)
|
||||
buffer = buf
|
||||
} else {
|
||||
|
|
@ -236,7 +252,9 @@ async function getPttBuffer (file, ffmpeg = 'ffmpeg') {
|
|||
file = String(file).replace(/^file:\/{2}/, '')
|
||||
IS_WIN && file.startsWith('/') && (file = file.slice(1))
|
||||
const head = await read7Bytes(file)
|
||||
if (head.includes('SILK') || head.includes('AMR')) {
|
||||
let result = await getAudioTime(file, ffmpeg)
|
||||
if (result.code == 1) time = result.data
|
||||
if (head.includes('SILK') || head.includes('AMR') || !transcoding) {
|
||||
buffer = await fs.promises.readFile(file)
|
||||
} else {
|
||||
buffer = await audioTrans(file, ffmpeg)
|
||||
|
|
@ -245,6 +263,34 @@ async function getPttBuffer (file, ffmpeg = 'ffmpeg') {
|
|||
return { buffer, time }
|
||||
}
|
||||
|
||||
async function getAudioTime (file, ffmpeg = 'ffmpeg') {
|
||||
return new Promise((resolve, _reject) => {
|
||||
(0, child_process.exec)(`${ffmpeg} -i "${file}"`, async (_error, _stdout, stderr) => {
|
||||
try {
|
||||
let time = stderr.split('Duration:')[1]?.split(',')[0].trim()
|
||||
let arr = time?.split(':')
|
||||
arr.reverse()
|
||||
let n = 1
|
||||
let s = 0
|
||||
for (let val of arr) {
|
||||
if (parseInt(val) > 0) s += parseInt(val) * n
|
||||
n *= 60
|
||||
}
|
||||
resolve({
|
||||
code: 1,
|
||||
data: {
|
||||
time,
|
||||
seconds: s,
|
||||
exec_text: stderr
|
||||
}
|
||||
})
|
||||
} catch {
|
||||
resolve({ code: -1 })
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async function audioTrans (file, ffmpeg = 'ffmpeg') {
|
||||
const tmpfile = path.join(TMP_DIR, uuid())
|
||||
const cmd = IS_WIN
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue