diff --git a/dist/buildinfo.json b/dist/buildinfo.json index a7e47d52..2391f786 100644 --- a/dist/buildinfo.json +++ b/dist/buildinfo.json @@ -1 +1 @@ -{"sha": "9273c89", "timestamp": 1705300786} +{"sha": "4d3903e", "timestamp": 1707027852} diff --git a/dist/index.js b/dist/index.js index 82aeb624..e55e654a 100644 --- a/dist/index.js +++ b/dist/index.js @@ -3,9 +3,9 @@ var Environment = class { // -- 版本数据 -- // // 当前版本 - BUILD_TIMESTAMP = 1705300786; + BUILD_TIMESTAMP = 1707027852; // 当前版本 commit id - BUILD_VERSION = "9273c89"; + BUILD_VERSION = "4d3903e"; // -- 基础配置 -- /** * @type {I18n | null} @@ -899,6 +899,12 @@ async function makeResponse200(resp) { }); } } +function isJsonResponse(resp) { + return resp.headers.get("content-type").indexOf("json") !== -1; +} +function isEventStreamResponse(resp) { + return resp.headers.get("content-type").indexOf("text/event-stream") !== -1; +} // src/vendors/stream.js var Stream = class { @@ -1064,8 +1070,8 @@ var LineDecoder = class { return lines; } }; -LineDecoder.NEWLINE_CHARS = /* @__PURE__ */ new Set(["\n", "\r", "\v", "\f", "", "", "", "\x85", "\u2028", "\u2029"]); -LineDecoder.NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g; +LineDecoder.NEWLINE_CHARS = /* @__PURE__ */ new Set(["\n", "\r"]); +LineDecoder.NEWLINE_REGEXP = /\r\n|[\n\r]/g; function partition(str, delimiter) { const index = str.indexOf(delimiter); if (index !== -1) { @@ -1125,14 +1131,14 @@ async function requestCompletionsFromOpenAI(message, history, context, onStream) body: JSON.stringify(body), signal }); - if (onStream && resp.ok && resp.headers.get("content-type").indexOf("text/event-stream") !== -1) { + if (onStream && resp.ok && isEventStreamResponse(resp)) { const stream = new Stream(resp, controller); let contentFull = ""; let lengthDelta = 0; let updateStep = 20; try { for await (const data of stream) { - const c = data.choices[0].delta?.content || ""; + const c = data?.choices?.[0]?.delta?.content || ""; lengthDelta += c.length; contentFull = contentFull + c; if (lengthDelta > updateStep) { @@ -1148,7 +1154,20 @@ ERROR: ${e.message}`; } return contentFull; } + if (!isJsonResponse(resp)) { + if (ENV.DEBUG_MODE || ENV.DEV_MODE) { + throw new Error(`OpenAI API Error +> ${resp.statusText} +Body: ${await resp.text()}`); + } else { + throw new Error(`OpenAI API Error +> ${resp.statusText}`); + } + } const result = await resp.json(); + if (!result) { + throw new Error("Empty response"); + } if (result.error?.message) { if (ENV.DEBUG_MODE || ENV.DEV_MODE) { throw new Error(`OpenAI API Error @@ -1159,13 +1178,10 @@ Body: ${JSON.stringify(body)}`); > ${result.error.message}`); } } - setTimeout(() => updateBotUsage(result.usage, context).catch(console.error), 0); try { + setTimeout(() => updateBotUsage(result?.usage, context).catch(console.error), 0); return result.choices[0].message.content; } catch (e) { - if (!result) { - throw new Error("Empty response"); - } throw Error(result?.error?.message || JSON.stringify(result)); } } @@ -1231,7 +1247,7 @@ async function updateBotUsage(usage, context) { await DATABASE.put(context.SHARE_CONTEXT.usageKey, JSON.stringify(dbValue)); } -// src/workers-ai.js +// src/workersai.js async function run(model, body) { const id = ENV.CLOUDFLARE_ACCOUNT_ID; const token = ENV.CLOUDFLARE_TOKEN; @@ -1255,7 +1271,7 @@ async function requestCompletionsFromWorkersAI(message, history, context, onStre }; const resp = await run(model, request); const controller = new AbortController(); - if (onStream && resp.ok && resp.headers.get("content-type").indexOf("text/event-stream") !== -1) { + if (onStream && resp.ok && isEventStreamResponse(resp)) { const stream = new Stream(resp, controller); let contentFull = ""; let lengthDelta = 0; @@ -1522,7 +1538,12 @@ async function chatWithLLM(text, context, modifier) { } return sendMessageToTelegramWithContext(context)(answer); } catch (e) { - return sendMessageToTelegramWithContext(context)(`Error: ${e.message}`); + let errMsg = `Error: ${e.message}`; + if (errMsg.length > 2048) { + errMsg = errMsg.substring(0, 2048); + } + context.CURRENT_CHAT_CONTEXT.disable_web_page_preview = true; + return sendMessageToTelegramWithContext(context)(errMsg); } } diff --git a/dist/timestamp b/dist/timestamp index 6e157c85..d1ef4ae6 100644 --- a/dist/timestamp +++ b/dist/timestamp @@ -1 +1 @@ -1705300786 +1707027852 diff --git a/doc/cn/CONFIG.md b/doc/cn/CONFIG.md index a76c3845..5bb418b6 100644 --- a/doc/cn/CONFIG.md +++ b/doc/cn/CONFIG.md @@ -48,7 +48,7 @@ | API_KEY | OpenAI API Key | `null` | 可以同时使用多个key,使用的时候会随机选择一个 | | CHAT_MODEL | open ai 模型选择 | `gpt-3.5-turbo` | | | OPENAI_API_DOMAIN | OPENAI API Domain [废弃: 使用 OPENAI_API_BASE] | `https://api.openai.com` | 可以替换为其他与OpenAI API兼容的其他服务商的域名 | -| OPENAI_API_DOMAIN | OPENAI API Base URL | `https://api.openai.com/v1` | 兼容Cloudflare AI 网关 | +| OPENAI_API_BASE | OPENAI API Base URL | `https://api.openai.com/v1` | 兼容Cloudflare AI 网关 | | - | - | - | - | | AZURE_API_KEY | azure api key | `null` | 支持azure的API,两个密钥随便选一个就可以。如果你要默认使用azure,你可以设置`AI_PROVIDER`为`azure` | | AZURE_COMPLETIONS_API | azure api url | `null` | 格式`https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/chat/completions?api-version=2023-05-15` | diff --git a/src/llm.js b/src/llm.js index c7543dd2..fed628c4 100644 --- a/src/llm.js +++ b/src/llm.js @@ -8,7 +8,7 @@ import {DATABASE, ENV} from './env.js'; import {Context} from './context.js'; import {isAzureEnable, isOpenAIEnable, requestCompletionsFromOpenAI, requestImageFromOpenAI} from './openai.js'; import {tokensCounter} from './utils.js'; -import {isWorkersAIEnable, requestCompletionsFromWorkersAI, requestImageFromWorkersAI} from './workers-ai.js'; +import {isWorkersAIEnable, requestCompletionsFromWorkersAI, requestImageFromWorkersAI} from './workersai.js'; import {isGeminiAIEnable, requestCompletionsFromGeminiAI} from './gemini.js'; @@ -242,6 +242,11 @@ export async function chatWithLLM(text, context, modifier) { } return sendMessageToTelegramWithContext(context)(answer); } catch (e) { - return sendMessageToTelegramWithContext(context)(`Error: ${e.message}`); + let errMsg = `Error: ${e.message}`; + if (errMsg.length > 2048) { // 裁剪错误信息 最长2048 + errMsg = errMsg.substring(0, 2048); + } + context.CURRENT_CHAT_CONTEXT.disable_web_page_preview = true; + return sendMessageToTelegramWithContext(context)(errMsg); } } diff --git a/src/openai.js b/src/openai.js index aafc0ab0..a8010ef6 100644 --- a/src/openai.js +++ b/src/openai.js @@ -1,6 +1,7 @@ /* eslint-disable no-unused-vars */ import {Context} from './context.js'; import {DATABASE, ENV} from './env.js'; +import {isEventStreamResponse, isJsonResponse} from './utils.js'; import {Stream} from './vendors/stream.js'; @@ -89,14 +90,14 @@ export async function requestCompletionsFromOpenAI(message, history, context, on body: JSON.stringify(body), signal, }); - if (onStream && resp.ok && resp.headers.get('content-type').indexOf('text/event-stream') !== -1) { + if (onStream && resp.ok && isEventStreamResponse(resp)) { const stream = new Stream(resp, controller); let contentFull = ''; let lengthDelta = 0; let updateStep = 20; try { for await (const data of stream) { - const c = data.choices[0].delta?.content || ''; + const c = data?.choices?.[0]?.delta?.content || ''; lengthDelta += c.length; contentFull = contentFull + c; if (lengthDelta > updateStep) { @@ -110,8 +111,17 @@ export async function requestCompletionsFromOpenAI(message, history, context, on } return contentFull; } - + if (!isJsonResponse(resp)) { + if (ENV.DEBUG_MODE || ENV.DEV_MODE) { + throw new Error(`OpenAI API Error\n> ${resp.statusText}\nBody: ${await resp.text()}`); + } else { + throw new Error(`OpenAI API Error\n> ${resp.statusText}`); + } + } const result = await resp.json(); + if (!result) { + throw new Error('Empty response'); + } if (result.error?.message) { if (ENV.DEBUG_MODE || ENV.DEV_MODE) { throw new Error(`OpenAI API Error\n> ${result.error.message}\nBody: ${JSON.stringify(body)}`); @@ -119,13 +129,10 @@ export async function requestCompletionsFromOpenAI(message, history, context, on throw new Error(`OpenAI API Error\n> ${result.error.message}`); } } - setTimeout(() => updateBotUsage(result.usage, context).catch(console.error), 0); try { + setTimeout(() => updateBotUsage(result?.usage, context).catch(console.error), 0); return result.choices[0].message.content; } catch (e) { - if (!result) { - throw new Error('Empty response'); - } throw Error(result?.error?.message || JSON.stringify(result)); } } diff --git a/src/utils.js b/src/utils.js index e0256d5d..7e70e7c5 100644 --- a/src/utils.js +++ b/src/utils.js @@ -184,3 +184,21 @@ export async function makeResponse200(resp) { }}); } } + +/** + * + * @param {Response} resp + * @return {boolean} + */ +export function isJsonResponse(resp) { + return resp.headers.get('content-type').indexOf('json') !== -1; +} + +/** + * + * @param {Response} resp + * @return {boolean} + */ +export function isEventStreamResponse(resp) { + return resp.headers.get('content-type').indexOf('text/event-stream') !== -1; +} diff --git a/src/vendors/stream.js b/src/vendors/stream.js index 8daee40e..cba57edc 100644 --- a/src/vendors/stream.js +++ b/src/vendors/stream.js @@ -180,8 +180,8 @@ class LineDecoder { } } // prettier-ignore -LineDecoder.NEWLINE_CHARS = new Set(['\n', '\r', '\x0b', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029']); -LineDecoder.NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g; +LineDecoder.NEWLINE_CHARS = new Set(['\n', '\r']); +LineDecoder.NEWLINE_REGEXP = /\r\n|[\n\r]/g; function partition(str, delimiter) { const index = str.indexOf(delimiter); if (index !== -1) { diff --git a/src/workers-ai.js b/src/workersai.js similarity index 96% rename from src/workers-ai.js rename to src/workersai.js index 93c6e632..4453eed6 100644 --- a/src/workers-ai.js +++ b/src/workersai.js @@ -1,6 +1,7 @@ /* eslint-disable no-unused-vars */ import {Context} from './context.js'; import {ENV} from './env.js'; +import {isEventStreamResponse} from './utils.js'; import {Stream} from './vendors/stream.js'; /** @@ -49,7 +50,7 @@ export async function requestCompletionsFromWorkersAI(message, history, context, const resp = await run(model, request); const controller = new AbortController(); - if (onStream && resp.ok && resp.headers.get('content-type').indexOf('text/event-stream') !== -1) { + if (onStream && resp.ok && isEventStreamResponse(resp)) { const stream = new Stream(resp, controller); let contentFull = ''; let lengthDelta = 0;