From d28beba32aadeacd2a8addec9333e9cf5b01ff97 Mon Sep 17 00:00:00 2001 From: tbxark Date: Thu, 1 Aug 2024 09:49:21 +0800 Subject: [PATCH 01/24] =?UTF-8?q?feat:=20=E6=9C=80=E5=B0=8Fstream=E6=A8=A1?= =?UTF-8?q?=E5=BC=8F=E6=B6=88=E6=81=AF=E9=97=B4=E9=9A=94=EF=BC=8C=E9=BB=98?= =?UTF-8?q?=E8=AE=A4=E4=B8=8D=E5=90=AF=E7=94=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/agent/request.js | 8 ++++++++ src/config/env.js | 2 ++ 2 files changed, 10 insertions(+) diff --git a/src/agent/request.js b/src/agent/request.js index 7a3cf5c0..ed732411 100644 --- a/src/agent/request.js +++ b/src/agent/request.js @@ -92,6 +92,7 @@ export async function requestChatCompletions(url, header, body, context, onStrea const {signal} = controller; let timeoutID = null; + let lastUpdateTime = Date.now(); if (ENV.CHAT_COMPLETE_API_TIMEOUT > 0) { timeoutID = setTimeout(() => controller.abort(), ENV.CHAT_COMPLETE_API_TIMEOUT); } @@ -123,6 +124,13 @@ export async function requestChatCompletions(url, header, body, context, onStrea lengthDelta += c.length; contentFull = contentFull + c; if (lengthDelta > updateStep) { + if (ENV.TELEGRAM_MIN_STREAM_INTERVAL > 0 ){ + const delta = Date.now() - lastUpdateTime; + if (delta < ENV.TELEGRAM_MIN_STREAM_INTERVAL) { + continue; + } + lastUpdateTime = Date.now(); + } lengthDelta = 0; updateStep += 20; await onStream(`${contentFull}\n...`); diff --git a/src/config/env.js b/src/config/env.js index 86498d68..75463088 100644 --- a/src/config/env.js +++ b/src/config/env.js @@ -132,6 +132,8 @@ class Environment { TELEGRAM_AVAILABLE_TOKENS = []; // 默认消息模式 DEFAULT_PARSE_MODE = 'Markdown'; + // 最小stream模式消息间隔,小于等于0则不限制 + TELEGRAM_MIN_STREAM_INTERVAL = 0; // -- 权限相关 -- // From 0db94bbca43aaa728ec6ff70bb5cc433492e6370 Mon Sep 17 00:00:00 2001 From: tbxark Date: Thu, 1 Aug 2024 10:57:35 +0800 Subject: [PATCH 02/24] =?UTF-8?q?perf:=20=E5=B0=81=E8=A3=85llm=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E5=8F=82=E6=95=B0=E4=B8=BALlmRequestParams?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dist/buildinfo.json | 2 +- dist/index.js | 163 +++++++++++++++++++++++----------------- dist/timestamp | 2 +- src/agent/agents.js | 18 +---- src/agent/anthropic.js | 8 +- src/agent/azure.js | 8 +- src/agent/cohere.js | 7 +- src/agent/gemini.js | 7 +- src/agent/llm.js | 46 +++++++----- src/agent/mistralai.js | 7 +- src/agent/openai.js | 7 +- src/agent/workersai.js | 7 +- src/telegram/command.js | 4 +- src/telegram/message.js | 112 ++++++++++++++------------- src/types/agent.js | 35 +++++++++ src/types/context.js | 1 + 16 files changed, 249 insertions(+), 185 deletions(-) create mode 100644 src/types/agent.js diff --git a/dist/buildinfo.json b/dist/buildinfo.json index 44bd908e..f2f8c3ac 100644 --- a/dist/buildinfo.json +++ b/dist/buildinfo.json @@ -1 +1 @@ -{"sha": "de66f3b", "timestamp": 1722427564} +{"sha": "d28beba", "timestamp": 1722481015} diff --git a/dist/index.js b/dist/index.js index f7196956..730ef707 100644 --- a/dist/index.js +++ b/dist/index.js @@ -89,9 +89,9 @@ var Environment = class { // -- 版本数据 -- // // 当前版本 - BUILD_TIMESTAMP = 1722427564; + BUILD_TIMESTAMP = 1722481015; // 当前版本 commit id - BUILD_VERSION = "de66f3b"; + BUILD_VERSION = "d28beba"; // -- 基础配置 -- /** * @type {I18n | null} @@ -111,6 +111,8 @@ var Environment = class { TELEGRAM_AVAILABLE_TOKENS = []; // 默认消息模式 DEFAULT_PARSE_MODE = "Markdown"; + // 最小stream模式消息间隔,小于等于0则不限制 + TELEGRAM_MIN_STREAM_INTERVAL = 0; // -- 权限相关 -- // // 允许所有人使用 @@ -964,6 +966,7 @@ async function requestChatCompletions(url, header, body, context, onStream, onRe const controller = new AbortController(); const { signal } = controller; let timeoutID = null; + let lastUpdateTime = Date.now(); if (ENV.CHAT_COMPLETE_API_TIMEOUT > 0) { timeoutID = setTimeout(() => controller.abort(), ENV.CHAT_COMPLETE_API_TIMEOUT); } @@ -991,6 +994,13 @@ async function requestChatCompletions(url, header, body, context, onStream, onRe lengthDelta += c.length; contentFull = contentFull + c; if (lengthDelta > updateStep) { + if (ENV.TELEGRAM_MIN_STREAM_INTERVAL > 0) { + const delta = Date.now() - lastUpdateTime; + if (delta < ENV.TELEGRAM_MIN_STREAM_INTERVAL) { + continue; + } + lastUpdateTime = Date.now(); + } lengthDelta = 0; updateStep += 20; await onStream(`${contentFull} @@ -1030,7 +1040,8 @@ function openAIKeyFromContext(context) { function isOpenAIEnable(context) { return context.USER_CONFIG.OPENAI_API_KEY.length > 0; } -async function requestCompletionsFromOpenAI(message, prompt, history, context, onStream) { +async function requestCompletionsFromOpenAI(params, context, onStream) { + const { message, prompt, history } = params; const url = `${context.USER_CONFIG.OPENAI_API_BASE}/chat/completions`; const messages = [...history || [], { role: "user", content: message }]; if (prompt) { @@ -1089,7 +1100,8 @@ async function run(model, body, id, token) { function isWorkersAIEnable(context) { return !!(context.USER_CONFIG.CLOUDFLARE_ACCOUNT_ID && context.USER_CONFIG.CLOUDFLARE_TOKEN); } -async function requestCompletionsFromWorkersAI(message, prompt, history, context, onStream) { +async function requestCompletionsFromWorkersAI(params, context, onStream) { + const { message, prompt, history } = params; const id = context.USER_CONFIG.CLOUDFLARE_ACCOUNT_ID; const token = context.USER_CONFIG.CLOUDFLARE_TOKEN; const model = context.USER_CONFIG.WORKERS_CHAT_MODEL; @@ -1128,7 +1140,8 @@ async function requestImageFromWorkersAI(prompt, context) { function isGeminiAIEnable(context) { return !!context.USER_CONFIG.GOOGLE_API_KEY; } -async function requestCompletionsFromGeminiAI(message, prompt, history, context, onStream) { +async function requestCompletionsFromGeminiAI(params, context, onStream) { + const { message, prompt, history } = params; onStream = null; const url = `${context.USER_CONFIG.GOOGLE_COMPLETIONS_API}${context.USER_CONFIG.GOOGLE_COMPLETIONS_MODEL}:${onStream ? "streamGenerateContent" : "generateContent"}?key=${context.USER_CONFIG.GOOGLE_API_KEY}`; const contentsTemp = [...history || [], { role: "user", content: message }]; @@ -1179,7 +1192,8 @@ async function requestCompletionsFromGeminiAI(message, prompt, history, context, function isMistralAIEnable(context) { return !!context.USER_CONFIG.MISTRAL_API_KEY; } -async function requestCompletionsFromMistralAI(message, prompt, history, context, onStream) { +async function requestCompletionsFromMistralAI(params, context, onStream) { + const { message, prompt, history } = params; const url = `${context.USER_CONFIG.MISTRAL_API_BASE}/chat/completions`; const messages = [...history || [], { role: "user", content: message }]; if (prompt) { @@ -1201,7 +1215,8 @@ async function requestCompletionsFromMistralAI(message, prompt, history, context function isCohereAIEnable(context) { return !!context.USER_CONFIG.COHERE_API_KEY; } -async function requestCompletionsFromCohereAI(message, prompt, history, context, onStream) { +async function requestCompletionsFromCohereAI(params, context, onStream) { + const { message, prompt, history } = params; const url = `${context.USER_CONFIG.COHERE_API_BASE}/chat`; const header = { "Authorization": `Bearer ${context.USER_CONFIG.COHERE_API_KEY}`, @@ -1247,7 +1262,8 @@ async function requestCompletionsFromCohereAI(message, prompt, history, context, function isAnthropicAIEnable(context) { return !!context.USER_CONFIG.ANTHROPIC_API_KEY; } -async function requestCompletionsFromAnthropicAI(message, prompt, history, context, onStream) { +async function requestCompletionsFromAnthropicAI(params, context, onStream) { + const { message, prompt, history } = params; const url = `${context.USER_CONFIG.ANTHROPIC_API_BASE}/messages`; const header = { "x-api-key": context.USER_CONFIG.ANTHROPIC_API_KEY, @@ -1290,7 +1306,8 @@ function isAzureEnable(context) { function isAzureImageEnable(context) { return !!(context.USER_CONFIG.AZURE_API_KEY && context.USER_CONFIG.AZURE_DALLE_API); } -async function requestCompletionsFromAzureOpenAI(message, prompt, history, context, onStream) { +async function requestCompletionsFromAzureOpenAI(params, context, onStream) { + const { message, prompt, history } = params; const url = context.USER_CONFIG.AZURE_COMPLETIONS_API; const messages = [...history || [], { role: "user", content: message }]; if (prompt) { @@ -1542,24 +1559,30 @@ async function loadHistory(key) { } return history; } -async function requestCompletionsFromLLM(text, prompt, context, llm, modifier, onStream) { +async function requestCompletionsFromLLM(params, context, llm, modifier, onStream) { const historyDisable = ENV.AUTO_TRIM_HISTORY && ENV.MAX_HISTORY_LENGTH <= 0; const historyKey = context.SHARE_CONTEXT.chatHistoryKey; + const { message } = params; let history = await loadHistory(historyKey); if (modifier) { - const modifierData = modifier(history, text); + const modifierData = modifier(history, message); history = modifierData.history; - text = modifierData.text; + params.message = modifierData.message; } - const answer = await llm(text, prompt, history, context, onStream); + const llmParams = { + ...params, + history, + prompt: context.USER_CONFIG.SYSTEM_INIT_MESSAGE + }; + const answer = await llm(llmParams, context, onStream); if (!historyDisable) { - history.push({ role: "user", content: text || "" }); + history.push({ role: "user", content: message || "" }); history.push({ role: "assistant", content: answer }); await DATABASE.put(historyKey, JSON.stringify(history)).catch(console.error); } return answer; } -async function chatWithLLM(text, context, modifier) { +async function chatWithLLM(params, context, modifier) { try { try { const msg = await sendMessageToTelegramWithContext(context)("...").then((r) => r.json()); @@ -1574,12 +1597,12 @@ async function chatWithLLM(text, context, modifier) { let nextEnableTime = null; if (ENV.STREAM_MODE) { context.CURRENT_CHAT_CONTEXT.parse_mode = null; - onStream = async (text2) => { + onStream = async (text) => { try { if (nextEnableTime && nextEnableTime > Date.now()) { return; } - const resp = await sendMessageToTelegramWithContext(context)(text2); + const resp = await sendMessageToTelegramWithContext(context)(text); if (resp.status === 429) { const retryAfter = parseInt(resp.headers.get("Retry-After")); if (retryAfter) { @@ -1600,8 +1623,7 @@ async function chatWithLLM(text, context, modifier) { if (llm === null) { return sendMessageToTelegramWithContext(context)(`LLM is not enable`); } - const prompt = context.USER_CONFIG.SYSTEM_INIT_MESSAGE; - const answer = await requestCompletionsFromLLM(text, prompt, context, llm, modifier, onStream); + const answer = await requestCompletionsFromLLM(params, context, llm, modifier, onStream); context.CURRENT_CHAT_CONTEXT.parse_mode = parseMode; if (ENV.SHOW_REPLY_BUTTON && context.CURRENT_CHAT_CONTEXT.message_id) { try { @@ -1923,9 +1945,9 @@ async function commandRegenerate(message, command, subcommand, context) { if (subcommand) { nextText = subcommand; } - return { history: historyCopy, text: nextText }; + return { history: historyCopy, message: nextText }; }; - return chatWithLLM(null, context, mf); + return chatWithLLM({ message: null }, context, mf); } async function commandEcho(message, command, subcommand, context) { let msg = "
";
@@ -2185,63 +2207,70 @@ async function msgHandleGroupMessage(message, context) {
     context.SHARE_CONTEXT.currentBotName = res.info.bot_name;
     botName = res.info.bot_name;
   }
-  if (botName) {
-    let mentioned = false;
-    if (message.entities) {
-      let content = "";
-      let offset = 0;
-      message.entities.forEach((entity) => {
-        switch (entity.type) {
-          case "bot_command":
-            if (!mentioned) {
-              const mention = message.text.substring(
-                entity.offset,
-                entity.offset + entity.length
-              );
-              if (mention.endsWith(botName)) {
-                mentioned = true;
-              }
-              const cmd = mention.replaceAll("@" + botName, "").replaceAll(botName, "").trim();
-              content += cmd;
-              offset = entity.offset + entity.length;
-            }
-            break;
-          case "mention":
-          case "text_mention":
-            if (!mentioned) {
-              const mention = message.text.substring(
-                entity.offset,
-                entity.offset + entity.length
-              );
-              if (mention === botName || mention === "@" + botName) {
-                mentioned = true;
-              }
-            }
-            content += message.text.substring(offset, entity.offset);
-            offset = entity.offset + entity.length;
-            break;
+  if (!botName) {
+    throw new Error("Not set bot name");
+  }
+  if (!message.entities) {
+    throw new Error("No entities");
+  }
+  let { text } = message;
+  if (!text) {
+    throw new Error("Empty message");
+  }
+  let content = "";
+  let offset = 0;
+  let mentioned = false;
+  for (const entity of message.entities) {
+    switch (entity.type) {
+      case "bot_command":
+        if (!mentioned) {
+          const mention = text.substring(
+            entity.offset,
+            entity.offset + entity.length
+          );
+          if (mention.endsWith(botName)) {
+            mentioned = true;
+          }
+          const cmd = mention.replaceAll("@" + botName, "").replaceAll(botName, "").trim();
+          content += cmd;
+          offset = entity.offset + entity.length;
         }
-      });
-      content += message.text.substring(offset, message.text.length);
-      message.text = content.trim();
-    }
-    if (!mentioned) {
-      throw new Error("No mentioned");
-    } else {
-      return null;
+        break;
+      case "mention":
+      case "text_mention":
+        if (!mentioned) {
+          const mention = text.substring(
+            entity.offset,
+            entity.offset + entity.length
+          );
+          if (mention === botName || mention === "@" + botName) {
+            mentioned = true;
+          }
+        }
+        content += text.substring(offset, entity.offset);
+        offset = entity.offset + entity.length;
+        break;
     }
   }
-  throw new Error("Not set bot name");
+  content += text.substring(offset, text.length);
+  message.text = content.trim();
+  if (!mentioned) {
+    throw new Error("No mentioned");
+  }
+  return null;
 }
 async function msgHandleCommand(message, context) {
+  if (!message.text) {
+    return null;
+  }
   return await handleCommandMessage(message, context);
 }
 async function msgChatWithLLM(message, context) {
-  let text = message.text;
+  let { text } = message;
   if (ENV.EXTRA_MESSAGE_CONTEXT && context.SHARE_CONTEXT.extraMessageContext && context.SHARE_CONTEXT.extraMessageContext.text) {
     text = context.SHARE_CONTEXT.extraMessageContext.text + "\n" + text;
   }
-  return chatWithLLM(text, context, null);
+  return chatWithLLM({ message: text }, context, null);
 }
 async function loadMessage(request, context) {
   const raw = await request.json();
diff --git a/dist/timestamp b/dist/timestamp
index 56066760..b7ff02e0 100644
--- a/dist/timestamp
+++ b/dist/timestamp
@@ -1 +1 @@
-1722427564
+1722481015
diff --git a/src/agent/agents.js b/src/agent/agents.js
index c2876199..665724fd 100644
--- a/src/agent/agents.js
+++ b/src/agent/agents.js
@@ -11,23 +11,9 @@ import {
     requestImageFromAzureOpenAI
 } from "./azure.js";
 import "../types/context.js";
+import "../types/agent.js";
+
 
-/**
- *
- * @typedef {function} ChatAgentRequest
- * @param {string} message
- * @param {string} prompt
- * @param {Array} history
- * @param {ContextType} context
- * @param {function} onStream
- * @return {Promise}
- * */
-/**
- * @typedef {object} ChatAgent
- * @property {string} name
- * @property {function} enable
- * @property {ChatAgentRequest} request
- */
 /**
  * @type {ChatAgent[]}
  */
diff --git a/src/agent/anthropic.js b/src/agent/anthropic.js
index d549ec1e..d330b881 100644
--- a/src/agent/anthropic.js
+++ b/src/agent/anthropic.js
@@ -1,4 +1,5 @@
 import "../types/context.js";
+import "../types/agent.js";
 import {anthropicSseJsonParser, Stream} from "./stream.js";
 import {ENV} from "../config/env.js";
 import {requestChatCompletions} from "./request.js";
@@ -16,14 +17,13 @@ export function isAnthropicAIEnable(context) {
 /**
  * 发送消息到Anthropic AI
  *
- * @param {string} message
- * @param {string} prompt
- * @param {Array} history
+ * @param {LlmParams} params
  * @param {ContextType} context
  * @param {function} onStream
  * @return {Promise}
  */
-export async function requestCompletionsFromAnthropicAI(message, prompt, history, context, onStream) {
+export async function requestCompletionsFromAnthropicAI(params, context, onStream) {
+    const { message, prompt, history } = params;
     const url = `${context.USER_CONFIG.ANTHROPIC_API_BASE}/messages`;
     const header = {
         'x-api-key': context.USER_CONFIG.ANTHROPIC_API_KEY,
diff --git a/src/agent/azure.js b/src/agent/azure.js
index 4a829c93..4ebd872d 100644
--- a/src/agent/azure.js
+++ b/src/agent/azure.js
@@ -1,4 +1,5 @@
 import "../types/context.js";
+import "../types/agent.js";
 import {requestChatCompletions} from "./request.js";
 
 /**
@@ -30,14 +31,13 @@ export function isAzureImageEnable(context) {
 /**
  * 发送消息到Azure ChatGPT
  *
- * @param {string} message
- * @param {string} prompt
- * @param {Array} history
+ * @param {LlmParams} params
  * @param {ContextType} context
  * @param {function} onStream
  * @return {Promise}
  */
-export async function requestCompletionsFromAzureOpenAI(message, prompt, history, context, onStream) {
+export async function requestCompletionsFromAzureOpenAI(params, context, onStream) {
+    const { message, prompt, history } = params;
     const url = context.USER_CONFIG.AZURE_COMPLETIONS_API;
 
     const messages = [...(history || []), {role: 'user', content: message}];
diff --git a/src/agent/cohere.js b/src/agent/cohere.js
index c8a54757..f0d56ead 100644
--- a/src/agent/cohere.js
+++ b/src/agent/cohere.js
@@ -15,14 +15,13 @@ export function isCohereAIEnable(context) {
 /**
  * 发送消息到Cohere AI
  *
- * @param {string} message
- * @param {string} prompt
- * @param {Array} history
+ * @param {LlmParams} params
  * @param {ContextType} context
  * @param {function} onStream
  * @return {Promise}
  */
-export async function requestCompletionsFromCohereAI(message, prompt, history, context, onStream) {
+export async function requestCompletionsFromCohereAI(params, context, onStream) {
+    const { message, prompt, history } = params;
     const url = `${context.USER_CONFIG.COHERE_API_BASE}/chat`;
     const header = {
         'Authorization': `Bearer ${context.USER_CONFIG.COHERE_API_KEY}`,
diff --git a/src/agent/gemini.js b/src/agent/gemini.js
index f6f18fdc..7ad9cae2 100644
--- a/src/agent/gemini.js
+++ b/src/agent/gemini.js
@@ -11,14 +11,13 @@ export function isGeminiAIEnable(context) {
 /**
  * 发送消息到Gemini
  *
- * @param {string} message
- * @param {string} prompt
- * @param {Array} history
+ * @param {LlmParams} params
  * @param {ContextType} context
  * @param {function} onStream
  * @return {Promise}
  */
-export async function requestCompletionsFromGeminiAI(message, prompt, history, context, onStream) {
+export async function requestCompletionsFromGeminiAI(params, context, onStream) {
+    const { message, prompt, history } = params;
     onStream = null; // 暂时不支持stream模式
     const url = `${context.USER_CONFIG.GOOGLE_COMPLETIONS_API}${context.USER_CONFIG.GOOGLE_COMPLETIONS_MODEL}:${
         onStream ? 'streamGenerateContent' : 'generateContent'
diff --git a/src/agent/llm.js b/src/agent/llm.js
index 1f43f662..05aae6b8 100644
--- a/src/agent/llm.js
+++ b/src/agent/llm.js
@@ -5,6 +5,7 @@ import {
 } from '../telegram/telegram.js';
 import {DATABASE, ENV} from '../config/env.js';
 import {loadChatLLM} from "./agents.js";
+import "../types/agent.js";
 
 /**
  * @return {(function(string): number)}
@@ -15,11 +16,7 @@ function tokensCounter() {
     };
 }
 
-/**
- * @typedef {object} HistoryItem
- * @property {string} role
- * @property {string} content
- */
+
 /**
  * 加载历史TG消息
  *
@@ -82,29 +79,41 @@ async function loadHistory(key) {
     return history;
 }
 
+/**
+ * @typedef {object} LlmModifierResult
+ * @property {HistoryItem[]} history
+ * @property {string} message
+ *
+ * @typedef {function(HistoryItem[], string): LlmModifierResult} LlmModifier
+ */
 
 /**
  *
- * @param {string} text
- * @param {string | null} prompt
+ * @param {LlmRequestParams} params
  * @param {ContextType} context
- * @param {function(string, string, HistoryItem[], ContextType, function)} llm
- * @param {function(HistoryItem[], string)} modifier
+ * @param {ChatAgentRequest} llm
+ * @param {LlmModifier} modifier
  * @param {function(string)} onStream
  * @return {Promise}
  */
-async function requestCompletionsFromLLM(text, prompt, context, llm, modifier, onStream) {
+async function requestCompletionsFromLLM(params, context, llm, modifier, onStream) {
     const historyDisable = ENV.AUTO_TRIM_HISTORY && ENV.MAX_HISTORY_LENGTH <= 0;
     const historyKey = context.SHARE_CONTEXT.chatHistoryKey;
+    const { message } = params;
     let history = await loadHistory(historyKey);
     if (modifier) {
-        const modifierData = modifier(history, text);
+        const modifierData = modifier(history, message);
         history = modifierData.history;
-        text = modifierData.text;
+        params.message = modifierData.message;
     }
-    const answer = await llm(text, prompt, history, context, onStream);
+    const llmParams = {
+        ...params,
+        history: history,
+        prompt: context.USER_CONFIG.SYSTEM_INIT_MESSAGE,
+    };
+    const answer = await llm(llmParams, context, onStream);
     if (!historyDisable) {
-        history.push({role: 'user', content: text || ''});
+        history.push({role: 'user', content: message || ''});
         history.push({role: 'assistant', content: answer});
         await DATABASE.put(historyKey, JSON.stringify(history)).catch(console.error);
     }
@@ -114,12 +123,12 @@ async function requestCompletionsFromLLM(text, prompt, context, llm, modifier, o
 /**
  * 与LLM聊天
  *
- * @param {string|null} text
+ * @param {LlmRequestParams} params
  * @param {ContextType} context
- * @param {function} modifier
+ * @param {LlmModifier} modifier
  * @return {Promise}
  */
-export async function chatWithLLM(text, context, modifier) {
+export async function chatWithLLM(params, context, modifier) {
     try {
         try {
             const msg = await sendMessageToTelegramWithContext(context)('...').then((r) => r.json());
@@ -164,8 +173,7 @@ export async function chatWithLLM(text, context, modifier) {
         if (llm === null) {
             return sendMessageToTelegramWithContext(context)(`LLM is not enable`);
         }
-        const prompt = context.USER_CONFIG.SYSTEM_INIT_MESSAGE;
-        const answer = await requestCompletionsFromLLM(text, prompt, context, llm, modifier, onStream);
+        const answer = await requestCompletionsFromLLM(params, context, llm, modifier, onStream);
         context.CURRENT_CHAT_CONTEXT.parse_mode = parseMode;
         if (ENV.SHOW_REPLY_BUTTON && context.CURRENT_CHAT_CONTEXT.message_id) {
             try {
diff --git a/src/agent/mistralai.js b/src/agent/mistralai.js
index 94105f93..8e7e7c40 100644
--- a/src/agent/mistralai.js
+++ b/src/agent/mistralai.js
@@ -12,14 +12,13 @@ export function isMistralAIEnable(context) {
 /**
  * 发送消息到Mistral AI
  *
- * @param {string} message
- * @param {string} prompt
- * @param {Array} history
+ * @param {LlmParams} params
  * @param {ContextType} context
  * @param {function} onStream
  * @return {Promise}
  */
-export async function requestCompletionsFromMistralAI(message, prompt, history, context, onStream) {
+export async function requestCompletionsFromMistralAI(params, context, onStream) {
+    const {message, prompt, history} = params;
     const url = `${context.USER_CONFIG.MISTRAL_API_BASE}/chat/completions`;
     const messages = [...(history || []), {role: 'user', content: message}];
     if (prompt) {
diff --git a/src/agent/openai.js b/src/agent/openai.js
index c58e58a6..f18983c9 100644
--- a/src/agent/openai.js
+++ b/src/agent/openai.js
@@ -24,14 +24,13 @@ export function isOpenAIEnable(context) {
 /**
  * 发送消息到ChatGPT
  *
- * @param {string} message
- * @param {string} prompt
- * @param {Array} history
+ * @param {LlmParams} params
  * @param {ContextType} context
  * @param {function} onStream
  * @return {Promise}
  */
-export async function requestCompletionsFromOpenAI(message, prompt, history, context, onStream) {
+export async function requestCompletionsFromOpenAI(params, context, onStream) {
+    const { message, prompt, history } = params;
     const url = `${context.USER_CONFIG.OPENAI_API_BASE}/chat/completions`;
     const messages = [...(history || []), {role: 'user', content: message}];
     if (prompt) {
diff --git a/src/agent/workersai.js b/src/agent/workersai.js
index 9e944501..a91c67a7 100644
--- a/src/agent/workersai.js
+++ b/src/agent/workersai.js
@@ -32,15 +32,14 @@ export function isWorkersAIEnable(context) {
 /**
  * 发送消息到Workers AI
  *
- * @param {string} message
- * @param {string} prompt
- * @param {Array} history
+ * @param {LlmParams} params
  * @param {ContextType} context
  * @param {function} onStream
  * @return {Promise}
  */
-export async function requestCompletionsFromWorkersAI(message, prompt, history, context, onStream) {
+export async function requestCompletionsFromWorkersAI(params, context, onStream) {
 
+    const {message, prompt, history} = params;
     const id = context.USER_CONFIG.CLOUDFLARE_ACCOUNT_ID;
     const token = context.USER_CONFIG.CLOUDFLARE_TOKEN;
     const model = context.USER_CONFIG.WORKERS_CHAT_MODEL;
diff --git a/src/telegram/command.js b/src/telegram/command.js
index ccc281ff..49772d47 100644
--- a/src/telegram/command.js
+++ b/src/telegram/command.js
@@ -428,9 +428,9 @@ async function commandRegenerate(message, command, subcommand, context) {
         if (subcommand) {
             nextText = subcommand;
         }
-        return {history: historyCopy, text: nextText};
+        return {history: historyCopy, message: nextText};
     };
-    return chatWithLLM(null, context, mf);
+    return chatWithLLM({message: null}, context, mf);
 }
 
 /**
diff --git a/src/telegram/message.js b/src/telegram/message.js
index ee83ab71..7bfbe0fc 100644
--- a/src/telegram/message.js
+++ b/src/telegram/message.js
@@ -163,58 +163,64 @@ async function msgHandleGroupMessage(message, context) {
         context.SHARE_CONTEXT.currentBotName = res.info.bot_name;
         botName = res.info.bot_name;
     }
-    if (botName) {
-        let mentioned = false;
-        // Reply消息
-        if (message.entities) {
-            let content = '';
-            let offset = 0;
-            message.entities.forEach((entity) => {
-                switch (entity.type) {
-                    case 'bot_command':
-                        if (!mentioned) {
-                            const mention = message.text.substring(
-                                entity.offset,
-                                entity.offset + entity.length,
-                            );
-                            if (mention.endsWith(botName)) {
-                                mentioned = true;
-                            }
-                            const cmd = mention
-                                .replaceAll('@' + botName, '')
-                                .replaceAll(botName, '')
-                                .trim();
-                            content += cmd;
-                            offset = entity.offset + entity.length;
-                        }
-                        break;
-                    case 'mention':
-                    case 'text_mention':
-                        if (!mentioned) {
-                            const mention = message.text.substring(
-                                entity.offset,
-                                entity.offset + entity.length,
-                            );
-                            if (mention === botName || mention === '@' + botName) {
-                                mentioned = true;
-                            }
-                        }
-                        content += message.text.substring(offset, entity.offset);
-                        offset = entity.offset + entity.length;
-                        break;
+    if (!botName) {
+        throw new Error('Not set bot name');
+    }
+    if (!message.entities) {
+       throw new Error('No entities');
+    }
+
+    let { text } = message;
+    if (!text) {
+        throw new Error('Empty message');
+    }
+
+    let content = '';
+    let offset = 0;
+    let mentioned = false;
+
+    for (const entity of message.entities) {
+        switch (entity.type) {
+            case 'bot_command':
+                if (!mentioned) {
+                    const mention = text.substring(
+                        entity.offset,
+                        entity.offset + entity.length,
+                    );
+                    if (mention.endsWith(botName)) {
+                        mentioned = true;
+                    }
+                    const cmd = mention
+                        .replaceAll('@' + botName, '')
+                        .replaceAll(botName, '')
+                        .trim();
+                    content += cmd;
+                    offset = entity.offset + entity.length;
                 }
-            });
-            content += message.text.substring(offset, message.text.length);
-            message.text = content.trim();
-        }
-        // 未AT机器人的消息不作处理
-        if (!mentioned) {
-            throw new Error('No mentioned');
-        } else {
-            return null;
+                break;
+            case 'mention':
+            case 'text_mention':
+                if (!mentioned) {
+                    const mention = text.substring(
+                        entity.offset,
+                        entity.offset + entity.length,
+                    );
+                    if (mention === botName || mention === '@' + botName) {
+                        mentioned = true;
+                    }
+                }
+                content += text.substring(offset, entity.offset);
+                offset = entity.offset + entity.length;
+                break;
         }
     }
-    throw new Error('Not set bot name');
+    content += text.substring(offset, text.length);
+    message.text = content.trim();
+    // 未AT机器人的消息不作处理
+    if (!mentioned) {
+        throw new Error('No mentioned');
+    }
+    return null;
 }
 
 
@@ -226,6 +232,10 @@ async function msgHandleGroupMessage(message, context) {
  * @return {Promise}
  */
 async function msgHandleCommand(message, context) {
+    if (!message.text) {
+        // 非文本消息不作处理
+        return null;
+    }
     return await handleCommandMessage(message, context);
 }
 
@@ -237,11 +247,11 @@ async function msgHandleCommand(message, context) {
  * @return {Promise}
  */
 async function msgChatWithLLM(message, context) {
-    let text = message.text;
+    let { text } = message;
     if (ENV.EXTRA_MESSAGE_CONTEXT && context.SHARE_CONTEXT.extraMessageContext && context.SHARE_CONTEXT.extraMessageContext.text) {
         text = context.SHARE_CONTEXT.extraMessageContext.text + '\n' + text;
     }
-    return chatWithLLM(text, context, null);
+    return chatWithLLM({message: text}, context, null);
 }
 
 
diff --git a/src/types/agent.js b/src/types/agent.js
new file mode 100644
index 00000000..bb134d7a
--- /dev/null
+++ b/src/types/agent.js
@@ -0,0 +1,35 @@
+/**
+ * @typedef {object} LlmRequestParams
+ * @property {string | null | undefined} message - 输入文本
+ * @property {string | null | undefined} image - 图片
+ * @property {string | null | undefined} audio - 音频
+ */
+
+/**
+ * @typedef {LlmRequestParams} LlmParams
+ * @property {string | null | undefined} prompt - 提示
+ * @property {HistoryItem[] | null | undefined} history - 历史记录
+ */
+
+/**
+ *
+ * @typedef {function} ChatAgentRequest
+ * @param {LlmParams} params
+ * @param {ContextType} context
+ * @param {function} onStream
+ * @return {Promise}
+ *
+ */
+
+/**
+ * @typedef {object} ChatAgent
+ * @property {string} name
+ * @property {function} enable
+ * @property {ChatAgentRequest} request
+ */
+
+/**
+ * @typedef {object} HistoryItem
+ * @property {string} role
+ * @property {string} content
+ */
diff --git a/src/types/context.js b/src/types/context.js
index 9eff81c8..c7cf3b7a 100644
--- a/src/types/context.js
+++ b/src/types/context.js
@@ -84,4 +84,5 @@
  * @property {UserConfigType} USER_CONFIG - 用户配置
  * @property {CurrentChatContextType} CURRENT_CHAT_CONTEXT - 当前聊天上下文
  * @property {ShareContextType} SHARE_CONTEXT - 共享上下文
+ * @property {function(TelegramMessage)} initContext
  */

From b24c8b5e498d1f229e8c4733b348ae7349515462 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Thu, 1 Aug 2024 13:41:21 +0800
Subject: [PATCH 03/24] =?UTF-8?q?doc:=20=E4=BC=98=E5=8C=96jsdoc=E6=96=87?=
 =?UTF-8?q?=E6=A1=A3?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 src/agent/agents.js     |  4 +--
 src/agent/request.js    |  2 +-
 src/agent/workersai.js  |  4 +--
 src/telegram/message.js | 29 +++++++++++--------
 src/types/agent.js      | 10 +++----
 src/types/context.js    | 62 ++++++++++++++++++++++-------------------
 src/types/telegram.js   |  3 +-
 7 files changed, 62 insertions(+), 52 deletions(-)

diff --git a/src/agent/agents.js b/src/agent/agents.js
index 665724fd..0bc07d12 100644
--- a/src/agent/agents.js
+++ b/src/agent/agents.js
@@ -116,7 +116,7 @@ export function chatModelKey(agentName) {
  * 加载聊天AI
  *
  * @param {ContextType} context
- * @return {ChatAgent | null}
+ * @return {?ChatAgent}
  */
 export function loadChatLLM(context) {
     for (const llm of chatLlmAgents) {
@@ -172,7 +172,7 @@ export const imageGenAgents = [
  * 加载图片AI
  *
  * @param {ContextType} context
- * @return {ImageAgent | null}
+ * @return {?ImageAgent}
  */
 export function loadImageGen(context) {
     for (const imgGen of imageGenAgents) {
diff --git a/src/agent/request.js b/src/agent/request.js
index ed732411..7fe26c29 100644
--- a/src/agent/request.js
+++ b/src/agent/request.js
@@ -78,7 +78,7 @@ export function isEventStreamResponse(resp) {
 /**
  * 发送请求到支持sse的聊天接口
  *
- * @param {string | null} url
+ * @param {string} url
  * @param {object} header
  * @param {object} body
  * @param {ContextType} context
diff --git a/src/agent/workersai.js b/src/agent/workersai.js
index a91c67a7..56cc2556 100644
--- a/src/agent/workersai.js
+++ b/src/agent/workersai.js
@@ -6,8 +6,8 @@ import {requestChatCompletions} from "./request.js";
  *
  * @param {string} model - The AI model to run.
  * @param {Object} body - The data to provide to the AI model.
- * @param {string | null} id
- * @param {string | null} token
+ * @param {string} id
+ * @param {string} token
  * @return {Promise} The response from the AI model.
  */
 async function run(model, body, id, token) {
diff --git a/src/telegram/message.js b/src/telegram/message.js
index 7bfbe0fc..f584d0b7 100644
--- a/src/telegram/message.js
+++ b/src/telegram/message.js
@@ -130,10 +130,13 @@ async function msgFilterWhiteList(message, context) {
  */
 // eslint-disable-next-line no-unused-vars
 async function msgFilterUnsupportedMessage(message, context) {
-    if (!message.text) {
-        throw new Error('Not supported message type');
+    if (message.text) {
+        return null;// 纯文本消息
     }
-    return null;
+    if (message.caption) {
+        return null;// 图片中的文本消息
+    }
+    throw new Error('Not supported message type');
 }
 
 /**
@@ -170,8 +173,9 @@ async function msgHandleGroupMessage(message, context) {
        throw new Error('No entities');
     }
 
-    let { text } = message;
-    if (!text) {
+    const { text, caption } = message;
+    let originContent = text || caption || '';
+    if (!originContent) {
         throw new Error('Empty message');
     }
 
@@ -183,7 +187,7 @@ async function msgHandleGroupMessage(message, context) {
         switch (entity.type) {
             case 'bot_command':
                 if (!mentioned) {
-                    const mention = text.substring(
+                    const mention = originContent.substring(
                         entity.offset,
                         entity.offset + entity.length,
                     );
@@ -201,7 +205,7 @@ async function msgHandleGroupMessage(message, context) {
             case 'mention':
             case 'text_mention':
                 if (!mentioned) {
-                    const mention = text.substring(
+                    const mention = originContent.substring(
                         entity.offset,
                         entity.offset + entity.length,
                     );
@@ -209,12 +213,12 @@ async function msgHandleGroupMessage(message, context) {
                         mentioned = true;
                     }
                 }
-                content += text.substring(offset, entity.offset);
+                content += originContent.substring(offset, entity.offset);
                 offset = entity.offset + entity.length;
                 break;
         }
     }
-    content += text.substring(offset, text.length);
+    content += originContent.substring(offset, originContent.length);
     message.text = content.trim();
     // 未AT机器人的消息不作处理
     if (!mentioned) {
@@ -247,11 +251,12 @@ async function msgHandleCommand(message, context) {
  * @return {Promise}
  */
 async function msgChatWithLLM(message, context) {
-    let { text } = message;
+    const { text, caption } = message;
+    let content = text || caption;
     if (ENV.EXTRA_MESSAGE_CONTEXT && context.SHARE_CONTEXT.extraMessageContext && context.SHARE_CONTEXT.extraMessageContext.text) {
-        text = context.SHARE_CONTEXT.extraMessageContext.text + '\n' + text;
+        content = context.SHARE_CONTEXT.extraMessageContext.text + '\n' + text;
     }
-    return chatWithLLM({message: text}, context, null);
+    return chatWithLLM({message: content}, context, null);
 }
 
 
diff --git a/src/types/agent.js b/src/types/agent.js
index bb134d7a..b576f986 100644
--- a/src/types/agent.js
+++ b/src/types/agent.js
@@ -1,14 +1,14 @@
 /**
  * @typedef {object} LlmRequestParams
- * @property {string | null | undefined} message - 输入文本
- * @property {string | null | undefined} image - 图片
- * @property {string | null | undefined} audio - 音频
+ * @property {?string} [message] - 输入文本
+ * @property {string} [image] - 图片
+ * @property {string} [audio] - 音频
  */
 
 /**
  * @typedef {LlmRequestParams} LlmParams
- * @property {string | null | undefined} prompt - 提示
- * @property {HistoryItem[] | null | undefined} history - 历史记录
+ * @property {?string} [prompt] - 提示
+ * @property {HistoryItem[]} [history] - 历史记录
  */
 
 /**
diff --git a/src/types/context.js b/src/types/context.js
index c7cf3b7a..b9caa03c 100644
--- a/src/types/context.js
+++ b/src/types/context.js
@@ -1,3 +1,7 @@
+/**
+ * @typedef {(string|number)} TelegramID
+ */
+
 /**
  * 用于保存用户配置
  *
@@ -7,7 +11,7 @@
  *
  * @property {string} AI_PROVIDER
  * @property {string} AI_IMAGE_PROVIDER
- * @property {string | null} SYSTEM_INIT_MESSAGE
+ * @property {string} SYSTEM_INIT_MESSAGE
  * @property {string} SYSTEM_INIT_MESSAGE_ROLE
  *
  * @property {string[]} OPENAI_API_KEY
@@ -20,28 +24,28 @@
  * @property {string} DALL_E_IMAGE_QUALITY
  * @property {string} DALL_E_IMAGE_STYLE
  *
- * @property {string | null} AZURE_API_KEY
- * @property {string | null} AZURE_COMPLETIONS_API
- * @property {string | null} AZURE_DALLE_API
+ * @property {?string} AZURE_API_KEY
+ * @property {?string} AZURE_COMPLETIONS_API
+ * @property {?string} AZURE_DALLE_API
  *
- * @property {string | null} CLOUDFLARE_ACCOUNT_ID
- * @property {string | null} CLOUDFLARE_TOKEN
+ * @property {?string} CLOUDFLARE_ACCOUNT_ID
+ * @property {?string} CLOUDFLARE_TOKEN
  * @property {string} WORKERS_CHAT_MODEL
  * @property {string} WORKERS_IMAGE_MODEL
  *
- * @property {string | null} GOOGLE_API_KEY
+ * @property {?string} GOOGLE_API_KEY
  * @property {string} GOOGLE_COMPLETIONS_API
  * @property {string} GOOGLE_COMPLETIONS_MODEL
  *
- * @property {string | null} MISTRAL_API_KEY
+ * @property {?string} MISTRAL_API_KEY
  * @property {string} MISTRAL_API_BASE
  * @property {string} MISTRAL_CHAT_MODEL
  *
- * @property {string | null} COHERE_API_KEY
+ * @property {?string} COHERE_API_KEY
  * @property {string} COHERE_API_BASE
  * @property {string} COHERE_CHAT_MODEL
  *
- * @property {string | null} ANTHROPIC_API_KEY
+ * @property {?string} ANTHROPIC_API_KEY
  * @property {string} ANTHROPIC_API_BASE
  * @property {string} ANTHROPIC_CHAT_MODEL
  *
@@ -51,31 +55,31 @@
  * 用于保存全局使用的临时变量
  *
  * @typedef {object} ShareContextType
- * @property {string | null} currentBotId - 当前机器人 ID
- * @property {string | null} currentBotToken - 当前机器人 Token
- * @property {string | null} currentBotName - 当前机器人名称: xxx_bot
- * @property {string | null} chatHistoryKey - history:chat_id:bot_id:$from_id
- * @property {string | null} chatLastMessageIdKey - last_message_id:$chatHistoryKey
- * @property {string | null} configStoreKey - user_config:chat_id:bot_id:$from_id
- * @property {string | null} groupAdminKey - group_admin:group_id
- * @property {string | null} usageKey - usage:bot_id
- * @property {string | null} chatType - 会话场景, private/group/supergroup 等, 来源 message.chat.type
- * @property {string | number | null} chatId - 会话 id, private 场景为发言人 id, group/supergroup 场景为群组 id
- * @property {string | number | null} speakerId - 发言人 id
- * @property {object | null} extraMessageContext - 额外消息上下文
+ * @property {?string} currentBotId - 当前机器人 ID
+ * @property {?string} currentBotToken - 当前机器人 Token
+ * @property {?string} currentBotName - 当前机器人名称: xxx_bot
+ * @property {?string} chatHistoryKey - history:chat_id:bot_id:$from_id
+ * @property {?string} chatLastMessageIdKey - last_message_id:$chatHistoryKey
+ * @property {?string} configStoreKey - user_config:chat_id:bot_id:$from_id
+ * @property {?string} groupAdminKey - group_admin:group_id
+ * @property {?string} usageKey - usage:bot_id
+ * @property {?string} chatType - 会话场景, private/group/supergroup 等, 来源 message.chat.type
+ * @property {?TelegramID} chatId - 会话 id, private 场景为发言人 id, group/supergroup 场景为群组 id
+ * @property {?TelegramID} speakerId - 发言人 id
+ * @property {?object} extraMessageContext - 额外消息上下文
  * */
 
 /**
  * 用于保存发起telegram请求的聊天上下文
  *
  * @typedef {object} CurrentChatContextType
- * @property {string | number | null} chat_id
- * @property {string | number | null} reply_to_message_id - 如果是群组,这个值为消息ID,否则为null
- * @property {string | null} parse_mode
- * @property {string | number | null} message_id - 编辑消息的ID
- * @property {object | null} reply_markup -  回复键盘
- * @property {boolean | null} allow_sending_without_reply
- * @property {boolean | null} disable_web_page_preview
+ * @property {?TelegramID} chat_id
+ * @property {?TelegramID} reply_to_message_id - 如果是群组,这个值为消息ID,否则为null
+ * @property {?string} parse_mode
+ * @property {?TelegramID} message_id - 编辑消息的ID
+ * @property {?object} reply_markup -  回复键盘
+ * @property {boolean} allow_sending_without_reply
+ * @property {boolean} disable_web_page_preview
  */
 
 /**
diff --git a/src/types/telegram.js b/src/types/telegram.js
index 5015aaa6..50563fbb 100644
--- a/src/types/telegram.js
+++ b/src/types/telegram.js
@@ -30,7 +30,8 @@
  * @property {TelegramUser} from - The user that sent the message.
  * @property {TelegramChat} chat - The chat where the message was sent.
  * @property {number} date - The date the message was sent.
- * @property {string} text - The text of the message.
+ * @property {?string} [text] - The text of the message.
+ * @property {?string} [caption] - The caption of the message.
  * @property {TelegramMessageEntity[]} [entities] - An array of message entities.
  * @property {TelegramMessage} [reply_to_message] - The message that this message is a reply to.
  * @property {boolean} is_topic_message - True, if the message is a topic message.

From 8e2362cba6927d11991b55ee737dbe723424705c Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Thu, 1 Aug 2024 14:45:05 +0800
Subject: [PATCH 04/24] =?UTF-8?q?doc:=20=E6=B7=BB=E5=8A=A0telegram?=
 =?UTF-8?q?=E6=96=87=E4=BB=B6=E7=9B=B8=E5=85=B3=E6=96=87=E6=A1=A3?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 src/telegram/message.js  | 11 ++++++-----
 src/telegram/telegram.js | 21 +++++++++++++++++++++
 src/types/agent.js       |  2 +-
 src/types/context.js     |  3 ---
 src/types/telegram.js    | 29 ++++++++++++++++++++++++++---
 5 files changed, 54 insertions(+), 12 deletions(-)

diff --git a/src/telegram/message.js b/src/telegram/message.js
index f584d0b7..4cdec371 100644
--- a/src/telegram/message.js
+++ b/src/telegram/message.js
@@ -1,6 +1,6 @@
 import {CONST, DATABASE, ENV} from '../config/env.js';
 import {Context} from '../config/context.js';
-import {getBot, sendMessageToTelegramWithContext} from './telegram.js';
+import {getBot, filesListToUrl, sendMessageToTelegramWithContext} from './telegram.js';
 import {handleCommandMessage} from './command.js';
 import {errorToString} from '../utils/utils.js';
 import {chatWithLLM} from '../agent/llm.js';
@@ -256,7 +256,8 @@ async function msgChatWithLLM(message, context) {
     if (ENV.EXTRA_MESSAGE_CONTEXT && context.SHARE_CONTEXT.extraMessageContext && context.SHARE_CONTEXT.extraMessageContext.text) {
         content = context.SHARE_CONTEXT.extraMessageContext.text + '\n' + text;
     }
-    return chatWithLLM({message: content}, context, null);
+    const params = { message: content}
+    return chatWithLLM(params, context, null);
 }
 
 
@@ -305,14 +306,14 @@ export async function handleMessage(request) {
         msgInitChatContext,
         // 检查环境是否准备好: DATABASE
         msgCheckEnvIsReady,
+        // 过滤非白名单用户
+        msgFilterWhiteList,
         // DEBUG: 保存最后一条消息
         msgSaveLastMessage,
-        // 过滤不支持的消息(抛出异常结束消息处理:当前只支持文本消息)
+        // 过滤不支持的消息(抛出异常结束消息处理)
         msgFilterUnsupportedMessage,
         // 处理群消息,判断是否需要响应此条消息
         msgHandleGroupMessage,
-        // 过滤非白名单用户
-        msgFilterWhiteList,
         // 忽略旧消息
         msgIgnoreOldMessage,
         // 处理命令消息
diff --git a/src/telegram/telegram.js b/src/telegram/telegram.js
index 99d952c1..5109eff5 100644
--- a/src/telegram/telegram.js
+++ b/src/telegram/telegram.js
@@ -334,3 +334,24 @@ export async function getBot(token) {
         return resp;
     }
 }
+
+/**
+ * 获取文件链接
+ * @param {string} fileId
+ * @param {string} token
+ * @returns {string}
+ */
+export function getFileLink(fileId, token) {
+    return `${ENV.TELEGRAM_API_DOMAIN}/file/bot${token}/${fileId}`;
+}
+
+/**
+ *
+ * @param {TelegramBaseFile[]} files
+ * @param {string} token
+ * @returns {string[]}
+ */
+export function filesListToUrl(files, token) {
+    return  Array.from((new Set(files.map((file) => file.file_id))))
+        .map((fileId) => getFileLink(fileId, token));
+}
diff --git a/src/types/agent.js b/src/types/agent.js
index b576f986..c3258ba8 100644
--- a/src/types/agent.js
+++ b/src/types/agent.js
@@ -1,7 +1,7 @@
 /**
  * @typedef {object} LlmRequestParams
  * @property {?string} [message] - 输入文本
- * @property {string} [image] - 图片
+ * @property {string | string[]} [image] - 图片
  * @property {string} [audio] - 音频
  */
 
diff --git a/src/types/context.js b/src/types/context.js
index b9caa03c..e814835a 100644
--- a/src/types/context.js
+++ b/src/types/context.js
@@ -1,6 +1,3 @@
-/**
- * @typedef {(string|number)} TelegramID
- */
 
 /**
  * 用于保存用户配置
diff --git a/src/types/telegram.js b/src/types/telegram.js
index 50563fbb..7f9413ec 100644
--- a/src/types/telegram.js
+++ b/src/types/telegram.js
@@ -1,6 +1,27 @@
+/**
+ * @typedef {(string|number)} TelegramID
+ */
+
+
+/**
+ * @typedef {Object} TelegramBaseFile
+ * @property {string} file_id - Unique identifier for this file.
+ * @property {string} file_unique_id - Unique identifier for this file, which is supposed to be the same over time and for different bots.
+ * @property {number} file_size - Optional. File size, if known.
+ *
+ * @typedef {TelegramBaseFile} TelegramPhoto
+ * @property {number} width - Photo width.
+ * @property {number} height - Photo height.
+ *
+ * @typedef {TelegramBaseFile} TelegramVoice
+ * @property {number} duration - Duration of the audio in seconds.
+ * @property {string} mime_type - Optional. MIME type of the file as defined by sender.
+ */
+
+
 /**
  * @typedef {Object} TelegramUser
- * @property {number} id - The ID of the user.
+ * @property {TelegramID} id - The ID of the user.
  * @property {boolean} is_bot - True, if the user is a bot.
  * @property {string} first_name - The first name of the user.
  * @property {string} [last_name] - The last name of the user.
@@ -10,7 +31,7 @@
 
 /**
  * @typedef {Object} TelegramChat
- * @property {number} id - The ID of the chat.
+ * @property {TelegramID} id - The ID of the chat.
  * @property {string} type - The type of the chat.
  * @property {boolean} is_forum - True, if the chat is a forum.
  */
@@ -32,6 +53,8 @@
  * @property {number} date - The date the message was sent.
  * @property {?string} [text] - The text of the message.
  * @property {?string} [caption] - The caption of the message.
+ * @property {TelegramPhoto[]} [photo] - An array of photos.
+ * @property {TelegramVoice} [voice] - The voice message.
  * @property {TelegramMessageEntity[]} [entities] - An array of message entities.
  * @property {TelegramMessage} [reply_to_message] - The message that this message is a reply to.
  * @property {boolean} is_topic_message - True, if the message is a topic message.
@@ -40,7 +63,7 @@
 
 /**
  * @typedef {Object} TelegramWebhookRequest
- * @property {number} update_id - The update's unique identifier.
+ * @property {TelegramID} update_id - The update's unique identifier.
  * @property {TelegramMessage} message - The message
  * @property {TelegramMessage} edited_message - The edited message
  */

From f986aa67970d423449beccd7952ede663131cb9c Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Thu, 1 Aug 2024 18:10:49 +0800
Subject: [PATCH 05/24] =?UTF-8?q?feat:=20openai=E6=94=AF=E6=8C=81=E5=9B=BE?=
 =?UTF-8?q?=E7=89=87=E6=B6=88=E6=81=AF?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 dist/buildinfo.json      |   2 +-
 dist/index.js            | 196 +++++++++++++++++++++++++--------------
 dist/timestamp           |   2 +-
 src/agent/anthropic.js   |  16 +++-
 src/agent/azure.js       |  13 +--
 src/agent/cohere.js      |  28 +++---
 src/agent/gemini.js      |  38 +++++---
 src/agent/llm.js         |  10 +-
 src/agent/mistralai.js   |  25 ++++-
 src/agent/openai.js      |  40 ++++++--
 src/agent/workersai.js   |  16 +++-
 src/telegram/message.js  |  12 ++-
 src/telegram/telegram.js |  30 +++---
 src/types/agent.js       |   4 +-
 14 files changed, 290 insertions(+), 142 deletions(-)

diff --git a/dist/buildinfo.json b/dist/buildinfo.json
index f2f8c3ac..4b18c304 100644
--- a/dist/buildinfo.json
+++ b/dist/buildinfo.json
@@ -1 +1 @@
-{"sha": "d28beba", "timestamp": 1722481015}
+{"sha": "8e2362c", "timestamp": 1722507009}
diff --git a/dist/index.js b/dist/index.js
index 730ef707..83efbaa8 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -89,9 +89,9 @@ var Environment = class {
   // -- 版本数据 --
   //
   // 当前版本
-  BUILD_TIMESTAMP = 1722481015;
+  BUILD_TIMESTAMP = 1722507009;
   // 当前版本 commit id
-  BUILD_VERSION = "d28beba";
+  BUILD_VERSION = "8e2362c";
   // -- 基础配置 --
   /**
    * @type {I18n | null}
@@ -708,6 +708,22 @@ async function getBot(token) {
     return resp;
   }
 }
+async function getFileLink(fileId, token) {
+  const resp = await fetch(
+    `${ENV.TELEGRAM_API_DOMAIN}/bot${token}/getFile`,
+    {
+      method: "POST",
+      headers: {
+        "Content-Type": "application/json"
+      },
+      body: JSON.stringify({ file_id: fileId })
+    }
+  ).then((res) => res.json());
+  if (resp.ok && resp.result.file_path) {
+    return `https://api.telegram.org/file/bot${token}/${resp.result.file_path}`;
+  }
+  return "";
+}
 
 // src/agent/stream.js
 var Stream = class {
@@ -1040,23 +1056,39 @@ function openAIKeyFromContext(context) {
 function isOpenAIEnable(context) {
   return context.USER_CONFIG.OPENAI_API_KEY.length > 0;
 }
+function renderOpenAiMessage(item) {
+  const res = {
+    role: item.role,
+    content: item.content
+  };
+  if (item.images && item.images.length > 0) {
+    res.content = [];
+    if (item.content) {
+      res.content.push({ type: "text", text: item.content });
+    }
+    for (const image of item.images) {
+      res.content.push({ type: "image_url", image_url: { url: image } });
+    }
+  }
+  return res;
+}
 async function requestCompletionsFromOpenAI(params, context, onStream) {
-  const { message, prompt, history } = params;
+  const { message, images, prompt, history } = params;
   const url = `${context.USER_CONFIG.OPENAI_API_BASE}/chat/completions`;
-  const messages = [...history || [], { role: "user", content: message }];
+  const header = {
+    "Content-Type": "application/json",
+    "Authorization": `Bearer ${openAIKeyFromContext(context)}`
+  };
+  const messages = [...history || [], { role: "user", content: message, images }];
   if (prompt) {
     messages.unshift({ role: context.USER_CONFIG.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
   }
   const body = {
     model: context.USER_CONFIG.OPENAI_CHAT_MODEL,
     ...context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS,
-    messages,
+    messages: messages.map(renderOpenAiMessage),
     stream: onStream != null
   };
-  const header = {
-    "Content-Type": "application/json",
-    "Authorization": `Bearer ${openAIKeyFromContext(context)}`
-  };
   return requestChatCompletions(url, header, body, context, onStream);
 }
 async function requestImageFromOpenAI(prompt, context) {
@@ -1100,6 +1132,12 @@ async function run(model, body, id, token) {
 function isWorkersAIEnable(context) {
   return !!(context.USER_CONFIG.CLOUDFLARE_ACCOUNT_ID && context.USER_CONFIG.CLOUDFLARE_TOKEN);
 }
+function renderWorkerAIMessage(item) {
+  return {
+    role: item.role,
+    content: item.content
+  };
+}
 async function requestCompletionsFromWorkersAI(params, context, onStream) {
   const { message, prompt, history } = params;
   const id = context.USER_CONFIG.CLOUDFLARE_ACCOUNT_ID;
@@ -1114,7 +1152,7 @@ async function requestCompletionsFromWorkersAI(params, context, onStream) {
     messages.unshift({ role: context.USER_CONFIG.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
   }
   const body = {
-    messages,
+    messages: messages.map(renderWorkerAIMessage),
     stream: onStream !== null
   };
   const options = {};
@@ -1140,6 +1178,21 @@ async function requestImageFromWorkersAI(prompt, context) {
 function isGeminiAIEnable(context) {
   return !!context.USER_CONFIG.GOOGLE_API_KEY;
 }
+var GEMINI_ROLE_MAP = {
+  "assistant": "model",
+  "system": "user",
+  "user": "user"
+};
+function renderGeminiMessage(item) {
+  return {
+    role: GEMINI_ROLE_MAP[item.role],
+    parts: [
+      {
+        "text": item.content || ""
+      }
+    ]
+  };
+}
 async function requestCompletionsFromGeminiAI(params, context, onStream) {
   const { message, prompt, history } = params;
   onStream = null;
@@ -1149,22 +1202,10 @@ async function requestCompletionsFromGeminiAI(params, context, onStream) {
     contentsTemp.unshift({ role: "assistant", content: prompt });
   }
   const contents = [];
-  const rolMap = {
-    "assistant": "model",
-    "system": "user",
-    "user": "user"
-  };
   for (const msg of contentsTemp) {
-    msg.role = rolMap[msg.role];
+    msg.role = GEMINI_ROLE_MAP[msg.role];
     if (contents.length === 0 || contents[contents.length - 1].role !== msg.role) {
-      contents.push({
-        "role": msg.role,
-        "parts": [
-          {
-            "text": msg.content
-          }
-        ]
-      });
+      contents.push(renderGeminiMessage(msg));
     } else {
       contents[contents.length - 1].parts[0].text += msg.content;
     }
@@ -1192,22 +1233,28 @@ async function requestCompletionsFromGeminiAI(params, context, onStream) {
 function isMistralAIEnable(context) {
   return !!context.USER_CONFIG.MISTRAL_API_KEY;
 }
+function renderMistralMessage(item) {
+  return {
+    role: item.role,
+    content: item.content
+  };
+}
 async function requestCompletionsFromMistralAI(params, context, onStream) {
   const { message, prompt, history } = params;
   const url = `${context.USER_CONFIG.MISTRAL_API_BASE}/chat/completions`;
+  const header = {
+    "Content-Type": "application/json",
+    "Authorization": `Bearer ${context.USER_CONFIG.MISTRAL_API_KEY}`
+  };
   const messages = [...history || [], { role: "user", content: message }];
   if (prompt) {
     messages.unshift({ role: context.USER_CONFIG.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
   }
   const body = {
     model: context.USER_CONFIG.MISTRAL_CHAT_MODEL,
-    messages,
+    messages: messages.map(renderMistralMessage),
     stream: onStream != null
   };
-  const header = {
-    "Content-Type": "application/json",
-    "Authorization": `Bearer ${context.USER_CONFIG.MISTRAL_API_KEY}`
-  };
   return requestChatCompletions(url, header, body, context, onStream);
 }
 
@@ -1215,6 +1262,16 @@ async function requestCompletionsFromMistralAI(params, context, onStream) {
 function isCohereAIEnable(context) {
   return !!context.USER_CONFIG.COHERE_API_KEY;
 }
+var COHERE_ROLE_MAP = {
+  "assistant": "CHATBOT",
+  "user": "USER"
+};
+function renderCohereMessage(item) {
+  return {
+    role: COHERE_ROLE_MAP[item.role],
+    content: item.content
+  };
+}
 async function requestCompletionsFromCohereAI(params, context, onStream) {
   const { message, prompt, history } = params;
   const url = `${context.USER_CONFIG.COHERE_API_BASE}/chat`;
@@ -1223,21 +1280,12 @@ async function requestCompletionsFromCohereAI(params, context, onStream) {
     "Content-Type": "application/json",
     "Accept": onStream !== null ? "text/event-stream" : "application/json"
   };
-  const roleMap = {
-    "assistant": "CHATBOT",
-    "user": "USER"
-  };
   const body = {
     message,
     model: context.USER_CONFIG.COHERE_CHAT_MODEL,
     stream: onStream != null,
     preamble: prompt,
-    chat_history: history.map((msg) => {
-      return {
-        role: roleMap[msg.role],
-        message: msg.content
-      };
-    })
+    chat_history: history.map(renderCohereMessage)
   };
   if (!body.preamble) {
     delete body.preamble;
@@ -1262,6 +1310,12 @@ async function requestCompletionsFromCohereAI(params, context, onStream) {
 function isAnthropicAIEnable(context) {
   return !!context.USER_CONFIG.ANTHROPIC_API_KEY;
 }
+function renderAnthropicMessage(item) {
+  return {
+    role: item.role,
+    content: item.content
+  };
+}
 async function requestCompletionsFromAnthropicAI(params, context, onStream) {
   const { message, prompt, history } = params;
   const url = `${context.USER_CONFIG.ANTHROPIC_API_BASE}/messages`;
@@ -1270,10 +1324,11 @@ async function requestCompletionsFromAnthropicAI(params, context, onStream) {
     "anthropic-version": "2023-06-01",
     "content-type": "application/json"
   };
+  const messages = [...history || [], { role: "user", content: message }].map(renderAnthropicMessage);
   const body = {
     system: prompt,
     model: context.USER_CONFIG.ANTHROPIC_CHAT_MODEL,
-    messages: [...history || [], { role: "user", content: message }],
+    messages,
     stream: onStream != null,
     max_tokens: ENV.MAX_TOKEN_LENGTH
   };
@@ -1309,19 +1364,19 @@ function isAzureImageEnable(context) {
 async function requestCompletionsFromAzureOpenAI(params, context, onStream) {
   const { message, prompt, history } = params;
   const url = context.USER_CONFIG.AZURE_COMPLETIONS_API;
+  const header = {
+    "Content-Type": "application/json",
+    "api-key": azureKeyFromContext(context)
+  };
   const messages = [...history || [], { role: "user", content: message }];
   if (prompt) {
     messages.unshift({ role: context.USER_CONFIG.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
   }
   const body = {
     ...context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS,
-    messages,
+    messages: messages.map(renderOpenAiMessage),
     stream: onStream != null
   };
-  const header = {
-    "Content-Type": "application/json",
-    "api-key": azureKeyFromContext(context)
-  };
   return requestChatCompletions(url, header, body, context, onStream);
 }
 async function requestImageFromAzureOpenAI(prompt, context) {
@@ -1518,12 +1573,6 @@ async function loadHistory(key) {
   let history = [];
   try {
     history = JSON.parse(await DATABASE.get(key));
-    history = history.map((item) => {
-      return {
-        role: item.role,
-        content: item.content
-      };
-    });
   } catch (e) {
     console.error(e);
   }
@@ -1562,7 +1611,7 @@ async function loadHistory(key) {
 async function requestCompletionsFromLLM(params, context, llm, modifier, onStream) {
   const historyDisable = ENV.AUTO_TRIM_HISTORY && ENV.MAX_HISTORY_LENGTH <= 0;
   const historyKey = context.SHARE_CONTEXT.chatHistoryKey;
-  const { message } = params;
+  const { message, images } = params;
   let history = await loadHistory(historyKey);
   if (modifier) {
     const modifierData = modifier(history, message);
@@ -1576,7 +1625,7 @@ async function requestCompletionsFromLLM(params, context, llm, modifier, onStrea
   };
   const answer = await llm(llmParams, context, onStream);
   if (!historyDisable) {
-    history.push({ role: "user", content: message || "" });
+    history.push({ role: "user", content: message || "", images });
     history.push({ role: "assistant", content: answer });
     await DATABASE.put(historyKey, JSON.stringify(history)).catch(console.error);
   }
@@ -2185,10 +2234,13 @@ async function msgFilterWhiteList(message, context) {
   );
 }
 async function msgFilterUnsupportedMessage(message, context) {
-  if (!message.text) {
-    throw new Error("Not supported message type");
+  if (message.text) {
+    return null;
   }
-  return null;
+  if (message.caption) {
+    return null;
+  }
+  throw new Error("Not supported message type");
 }
 async function msgHandleGroupMessage(message, context) {
   if (!CONST.GROUP_TYPES.includes(context.SHARE_CONTEXT.chatType)) {
@@ -2213,8 +2265,9 @@ async function msgHandleGroupMessage(message, context) {
   if (!message.entities) {
     throw new Error("No entities");
   }
-  let { text } = message;
-  if (!text) {
+  const { text, caption } = message;
+  let originContent = text || caption || "";
+  if (!originContent) {
     throw new Error("Empty message");
   }
   let content = "";
@@ -2224,7 +2277,7 @@ async function msgHandleGroupMessage(message, context) {
     switch (entity.type) {
       case "bot_command":
         if (!mentioned) {
-          const mention = text.substring(
+          const mention = originContent.substring(
             entity.offset,
             entity.offset + entity.length
           );
@@ -2239,7 +2292,7 @@ async function msgHandleGroupMessage(message, context) {
       case "mention":
       case "text_mention":
         if (!mentioned) {
-          const mention = text.substring(
+          const mention = originContent.substring(
             entity.offset,
             entity.offset + entity.length
           );
@@ -2247,12 +2300,12 @@ async function msgHandleGroupMessage(message, context) {
             mentioned = true;
           }
         }
-        content += text.substring(offset, entity.offset);
+        content += originContent.substring(offset, entity.offset);
         offset = entity.offset + entity.length;
         break;
     }
   }
-  content += text.substring(offset, text.length);
+  content += originContent.substring(offset, originContent.length);
   message.text = content.trim();
   if (!mentioned) {
     throw new Error("No mentioned");
@@ -2266,11 +2319,18 @@ async function msgHandleCommand(message, context) {
   return await handleCommandMessage(message, context);
 }
 async function msgChatWithLLM(message, context) {
-  let { text } = message;
+  const { text, caption } = message;
+  let content = text || caption;
   if (ENV.EXTRA_MESSAGE_CONTEXT && context.SHARE_CONTEXT.extraMessageContext && context.SHARE_CONTEXT.extraMessageContext.text) {
-    text = context.SHARE_CONTEXT.extraMessageContext.text + "\n" + text;
+    content = context.SHARE_CONTEXT.extraMessageContext.text + "\n" + text;
+  }
+  const params = { message: content };
+  if (message.photo && message.photo.length > 0) {
+    const fileId = message.photo[message.photo.length - 1].file_id;
+    const url = await getFileLink(fileId, context.SHARE_CONTEXT.currentBotToken);
+    params.images = [url];
   }
-  return chatWithLLM({ message: text }, context, null);
+  return chatWithLLM(params, context, null);
 }
 async function loadMessage(request, context) {
   const raw = await request.json();
@@ -2292,14 +2352,14 @@ async function handleMessage(request) {
     msgInitChatContext,
     // 检查环境是否准备好: DATABASE
     msgCheckEnvIsReady,
+    // 过滤非白名单用户
+    msgFilterWhiteList,
     // DEBUG: 保存最后一条消息
     msgSaveLastMessage,
-    // 过滤不支持的消息(抛出异常结束消息处理:当前只支持文本消息)
+    // 过滤不支持的消息(抛出异常结束消息处理)
     msgFilterUnsupportedMessage,
     // 处理群消息,判断是否需要响应此条消息
     msgHandleGroupMessage,
-    // 过滤非白名单用户
-    msgFilterWhiteList,
     // 忽略旧消息
     msgIgnoreOldMessage,
     // 处理命令消息
diff --git a/dist/timestamp b/dist/timestamp
index b7ff02e0..6192f18f 100644
--- a/dist/timestamp
+++ b/dist/timestamp
@@ -1 +1 @@
-1722481015
+1722507009
diff --git a/src/agent/anthropic.js b/src/agent/anthropic.js
index d330b881..bba41ffa 100644
--- a/src/agent/anthropic.js
+++ b/src/agent/anthropic.js
@@ -13,6 +13,17 @@ export function isAnthropicAIEnable(context) {
     return !!(context.USER_CONFIG.ANTHROPIC_API_KEY);
 }
 
+/**
+ * @param {HistoryItem} item
+ * @return {Object}
+ */
+function renderAnthropicMessage(item) {
+    return {
+        role: item.role,
+        content: item.content,
+    };
+}
+
 
 /**
  * 发送消息到Anthropic AI
@@ -30,17 +41,18 @@ export async function requestCompletionsFromAnthropicAI(params, context, onStrea
         "anthropic-version": "2023-06-01",
         'content-type': 'application/json',
     };
+
+    const messages = ([...(history || []), {role: 'user', content: message}]).map(renderAnthropicMessage);
     const body = {
         system: prompt,
         model: context.USER_CONFIG.ANTHROPIC_CHAT_MODEL,
-        messages: [...(history || []), {role: 'user', content: message}],
+        messages: messages,
         stream: onStream != null,
         max_tokens: ENV.MAX_TOKEN_LENGTH,
     };
     if (!body.system) {
         delete body.system;
     }
-
     /**
      * @type {SseChatCompatibleOptions}
      */
diff --git a/src/agent/azure.js b/src/agent/azure.js
index 4ebd872d..1c69a734 100644
--- a/src/agent/azure.js
+++ b/src/agent/azure.js
@@ -1,6 +1,7 @@
 import "../types/context.js";
 import "../types/agent.js";
 import {requestChatCompletions} from "./request.js";
+import {renderOpenAiMessage} from "./openai.js";
 
 /**
  * @param {ContextType} context
@@ -39,22 +40,22 @@ export function isAzureImageEnable(context) {
 export async function requestCompletionsFromAzureOpenAI(params, context, onStream) {
     const { message, prompt, history } = params;
     const url = context.USER_CONFIG.AZURE_COMPLETIONS_API;
+    const header = {
+        'Content-Type': 'application/json',
+        'api-key': azureKeyFromContext(context),
+    };
 
     const messages = [...(history || []), {role: 'user', content: message}];
     if (prompt) {
         messages.unshift({role: context.USER_CONFIG.SYSTEM_INIT_MESSAGE_ROLE, content: prompt});
     }
+
     const body = {
         ...context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS,
-        messages,
+        messages: messages.map(renderOpenAiMessage),
         stream: onStream != null,
     };
 
-    const header = {
-        'Content-Type': 'application/json',
-        'api-key': azureKeyFromContext(context),
-    };
-
     return requestChatCompletions(url, header, body, context, onStream);
 }
 
diff --git a/src/agent/cohere.js b/src/agent/cohere.js
index f0d56ead..c5e9d2ef 100644
--- a/src/agent/cohere.js
+++ b/src/agent/cohere.js
@@ -11,6 +11,22 @@ export function isCohereAIEnable(context) {
     return !!(context.USER_CONFIG.COHERE_API_KEY);
 }
 
+const COHERE_ROLE_MAP = {
+    'assistant': 'CHATBOT',
+    'user': 'USER',
+};
+
+/**
+ * @param {HistoryItem} item
+ * @return {Object}
+ */
+export function renderCohereMessage(item) {
+    return {
+        role: COHERE_ROLE_MAP[item.role],
+        content: item.content,
+    };
+}
+
 
 /**
  * 发送消息到Cohere AI
@@ -29,22 +45,12 @@ export async function requestCompletionsFromCohereAI(params, context, onStream)
         'Accept': onStream !== null ? 'text/event-stream' : 'application/json',
     };
 
-    const roleMap = {
-        'assistant': 'CHATBOT',
-        'user': 'USER',
-    };
-
     const body = {
         message,
         model: context.USER_CONFIG.COHERE_CHAT_MODEL,
         stream: onStream != null,
         preamble: prompt,
-        chat_history: history.map((msg) => {
-            return {
-                role: roleMap[msg.role],
-                message: msg.content,
-            };
-        }),
+        chat_history: history.map(renderCohereMessage),
     };
     if (!body.preamble) {
         delete body.preamble;
diff --git a/src/agent/gemini.js b/src/agent/gemini.js
index 7ad9cae2..f28bf68e 100644
--- a/src/agent/gemini.js
+++ b/src/agent/gemini.js
@@ -8,6 +8,28 @@ export function isGeminiAIEnable(context) {
     return !!(context.USER_CONFIG.GOOGLE_API_KEY);
 }
 
+
+const GEMINI_ROLE_MAP =  {
+    'assistant': 'model',
+    'system': 'user',
+    'user': 'user',
+};
+
+/**
+ * @param {HistoryItem} item
+ * @return {Object}
+ */
+export function renderGeminiMessage(item) {
+    return {
+        role: GEMINI_ROLE_MAP[item.role],
+        parts: [
+            {
+                'text': item.content || '',
+            },
+        ],
+    };
+}
+
 /**
  * 发送消息到Gemini
  *
@@ -28,24 +50,12 @@ export async function requestCompletionsFromGeminiAI(params, context, onStream)
         contentsTemp.unshift({role: 'assistant', content: prompt});
     }
     const contents = [];
-    const rolMap = {
-        'assistant': 'model',
-        'system': 'user',
-        'user': 'user',
-    };
     // role必须是 model,user 而且不能连续两个一样
     for (const msg of contentsTemp) {
-        msg.role = rolMap[msg.role];
+        msg.role = GEMINI_ROLE_MAP[msg.role];
         // 如果存在最后一个元素或role不一样则插入
         if (contents.length === 0 || contents[contents.length - 1].role !== msg.role) {
-            contents.push({
-                'role': msg.role,
-                'parts': [
-                    {
-                        'text': msg.content,
-                    },
-                ],
-            });
+            contents.push(renderGeminiMessage(msg));
         } else {
             // 否则合并
             contents[contents.length - 1].parts[0].text += msg.content;
diff --git a/src/agent/llm.js b/src/agent/llm.js
index 05aae6b8..a6e914dd 100644
--- a/src/agent/llm.js
+++ b/src/agent/llm.js
@@ -29,12 +29,6 @@ async function loadHistory(key) {
     let history = [];
     try {
         history = JSON.parse(await DATABASE.get(key));
-        history = history.map((item) => {
-            return {
-                role: item.role,
-                content: item.content,
-            };
-        });
     } catch (e) {
         console.error(e);
     }
@@ -99,7 +93,7 @@ async function loadHistory(key) {
 async function requestCompletionsFromLLM(params, context, llm, modifier, onStream) {
     const historyDisable = ENV.AUTO_TRIM_HISTORY && ENV.MAX_HISTORY_LENGTH <= 0;
     const historyKey = context.SHARE_CONTEXT.chatHistoryKey;
-    const { message } = params;
+    const { message, images } = params;
     let history = await loadHistory(historyKey);
     if (modifier) {
         const modifierData = modifier(history, message);
@@ -113,7 +107,7 @@ async function requestCompletionsFromLLM(params, context, llm, modifier, onStrea
     };
     const answer = await llm(llmParams, context, onStream);
     if (!historyDisable) {
-        history.push({role: 'user', content: message || ''});
+        history.push({role: 'user', content: message || '', images});
         history.push({role: 'assistant', content: answer});
         await DATABASE.put(historyKey, JSON.stringify(history)).catch(console.error);
     }
diff --git a/src/agent/mistralai.js b/src/agent/mistralai.js
index 8e7e7c40..b49d532f 100644
--- a/src/agent/mistralai.js
+++ b/src/agent/mistralai.js
@@ -9,6 +9,18 @@ export function isMistralAIEnable(context) {
     return !!(context.USER_CONFIG.MISTRAL_API_KEY);
 }
 
+/**
+ * @param {HistoryItem} item
+ * @return {Object}
+ */
+export function renderMistralMessage(item) {
+    return {
+        role: item.role,
+        content: item.content,
+    };
+}
+
+
 /**
  * 发送消息到Mistral AI
  *
@@ -20,18 +32,21 @@ export function isMistralAIEnable(context) {
 export async function requestCompletionsFromMistralAI(params, context, onStream) {
     const {message, prompt, history} = params;
     const url = `${context.USER_CONFIG.MISTRAL_API_BASE}/chat/completions`;
+    const header = {
+        'Content-Type': 'application/json',
+        'Authorization': `Bearer ${context.USER_CONFIG.MISTRAL_API_KEY}`,
+    };
+
     const messages = [...(history || []), {role: 'user', content: message}];
     if (prompt) {
         messages.unshift({role: context.USER_CONFIG.SYSTEM_INIT_MESSAGE_ROLE, content: prompt});
     }
+
     const body = {
         model: context.USER_CONFIG.MISTRAL_CHAT_MODEL,
-        messages,
+        messages: messages.map(renderMistralMessage),
         stream: onStream != null,
     };
-    const header = {
-        'Content-Type': 'application/json',
-        'Authorization': `Bearer ${context.USER_CONFIG.MISTRAL_API_KEY}`,
-    };
+
     return requestChatCompletions(url, header, body, context, onStream);
 }
diff --git a/src/agent/openai.js b/src/agent/openai.js
index f18983c9..07deb592 100644
--- a/src/agent/openai.js
+++ b/src/agent/openai.js
@@ -21,6 +21,28 @@ export function isOpenAIEnable(context) {
 }
 
 
+/**
+ * @param {HistoryItem} item
+ * @return {Object}
+ */
+export function renderOpenAiMessage(item) {
+    const res = {
+        role: item.role,
+        content: item.content,
+    };
+    if (item.images && item.images.length > 0) {
+        res.content = [];
+        if (item.content) {
+            res.content.push({type: 'text', text: item.content});
+        }
+        for (const image of item.images) {
+            res.content.push({type: 'image_url', image_url: {url: image}});
+        }
+    }
+    return res;
+}
+
+
 /**
  * 发送消息到ChatGPT
  *
@@ -30,23 +52,27 @@ export function isOpenAIEnable(context) {
  * @return {Promise}
  */
 export async function requestCompletionsFromOpenAI(params, context, onStream) {
-    const { message, prompt, history } = params;
+
+    const { message, images, prompt, history } = params;
     const url = `${context.USER_CONFIG.OPENAI_API_BASE}/chat/completions`;
-    const messages = [...(history || []), {role: 'user', content: message}];
+    const header = {
+        'Content-Type': 'application/json',
+        'Authorization': `Bearer ${openAIKeyFromContext(context)}`,
+    };
+
+    const messages = [...(history || []), {role: 'user', content: message, images}];
     if (prompt) {
         messages.unshift({role: context.USER_CONFIG.SYSTEM_INIT_MESSAGE_ROLE, content: prompt});
     }
+
     const body = {
         model: context.USER_CONFIG.OPENAI_CHAT_MODEL,
         ...context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS,
-        messages,
+        messages: messages.map(renderOpenAiMessage),
         stream: onStream != null,
     };
 
-    const header = {
-        'Content-Type': 'application/json',
-        'Authorization': `Bearer ${openAIKeyFromContext(context)}`,
-    };
+
 
     return requestChatCompletions(url, header, body, context, onStream);
 }
diff --git a/src/agent/workersai.js b/src/agent/workersai.js
index 56cc2556..1c639792 100644
--- a/src/agent/workersai.js
+++ b/src/agent/workersai.js
@@ -29,6 +29,18 @@ export function isWorkersAIEnable(context) {
     return !!(context.USER_CONFIG.CLOUDFLARE_ACCOUNT_ID && context.USER_CONFIG.CLOUDFLARE_TOKEN);
 }
 
+/**
+ * @param {HistoryItem} item
+ * @return {Object}
+ */
+export function renderWorkerAIMessage(item) {
+    return {
+        role: item.role,
+        content: item.content,
+    };
+}
+
+
 /**
  * 发送消息到Workers AI
  *
@@ -47,12 +59,14 @@ export async function requestCompletionsFromWorkersAI(params, context, onStream)
     const header = {
         Authorization: `Bearer ${token}`
     };
+
     const messages = [...(history || []), {role: 'user', content: message}];
     if (prompt) {
         messages.unshift({role: context.USER_CONFIG.SYSTEM_INIT_MESSAGE_ROLE, content: prompt});
     }
+
     const body = {
-        messages: messages,
+        messages: messages.map(renderWorkerAIMessage),
         stream: onStream !== null,
     };
 
diff --git a/src/telegram/message.js b/src/telegram/message.js
index 4cdec371..0411885b 100644
--- a/src/telegram/message.js
+++ b/src/telegram/message.js
@@ -1,6 +1,6 @@
 import {CONST, DATABASE, ENV} from '../config/env.js';
 import {Context} from '../config/context.js';
-import {getBot, filesListToUrl, sendMessageToTelegramWithContext} from './telegram.js';
+import {getBot, getFileLink, sendMessageToTelegramWithContext} from './telegram.js';
 import {handleCommandMessage} from './command.js';
 import {errorToString} from '../utils/utils.js';
 import {chatWithLLM} from '../agent/llm.js';
@@ -256,7 +256,15 @@ async function msgChatWithLLM(message, context) {
     if (ENV.EXTRA_MESSAGE_CONTEXT && context.SHARE_CONTEXT.extraMessageContext && context.SHARE_CONTEXT.extraMessageContext.text) {
         content = context.SHARE_CONTEXT.extraMessageContext.text + '\n' + text;
     }
-    const params = { message: content}
+    /**
+     * @type {LlmRequestParams}
+     */
+    const params = { message: content };
+    if (message.photo && message.photo.length > 0) {
+        const fileId = message.photo[message.photo.length - 1].file_id;
+        const url = await getFileLink(fileId, context.SHARE_CONTEXT.currentBotToken);
+        params.images = [url];
+    }
     return chatWithLLM(params, context, null);
 }
 
diff --git a/src/telegram/telegram.js b/src/telegram/telegram.js
index 5109eff5..336a182b 100644
--- a/src/telegram/telegram.js
+++ b/src/telegram/telegram.js
@@ -339,19 +339,21 @@ export async function getBot(token) {
  * 获取文件链接
  * @param {string} fileId
  * @param {string} token
- * @returns {string}
+ * @returns {Promise}
  */
-export function getFileLink(fileId, token) {
-    return `${ENV.TELEGRAM_API_DOMAIN}/file/bot${token}/${fileId}`;
-}
-
-/**
- *
- * @param {TelegramBaseFile[]} files
- * @param {string} token
- * @returns {string[]}
- */
-export function filesListToUrl(files, token) {
-    return  Array.from((new Set(files.map((file) => file.file_id))))
-        .map((fileId) => getFileLink(fileId, token));
+export async function getFileLink(fileId, token) {
+    const resp = await fetch(
+        `${ENV.TELEGRAM_API_DOMAIN}/bot${token}/getFile`,
+        {
+            method: 'POST',
+            headers: {
+                'Content-Type': 'application/json',
+            },
+            body: JSON.stringify({file_id: fileId}),
+        },
+    ).then((res) => res.json());
+    if (resp.ok && resp.result.file_path) {
+        return `https://api.telegram.org/file/bot${token}/${resp.result.file_path}`;
+    }
+    return ''
 }
diff --git a/src/types/agent.js b/src/types/agent.js
index c3258ba8..e3596fd7 100644
--- a/src/types/agent.js
+++ b/src/types/agent.js
@@ -1,8 +1,7 @@
 /**
  * @typedef {object} LlmRequestParams
  * @property {?string} [message] - 输入文本
- * @property {string | string[]} [image] - 图片
- * @property {string} [audio] - 音频
+ * @property {string[]} [images] - 图片
  */
 
 /**
@@ -32,4 +31,5 @@
  * @typedef {object} HistoryItem
  * @property {string} role
  * @property {string} content
+ * @property {string[]} [images] - 图片
  */

From 71b1b0a68284b3278be8477b37982d10531937cc Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Thu, 1 Aug 2024 22:52:17 +0800
Subject: [PATCH 06/24] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=E5=9B=BE?=
 =?UTF-8?q?=E7=89=87=E8=B4=A8=E9=87=8F=E5=8F=82=E6=95=B0`TELEGRAM=5FPHOTO?=
 =?UTF-8?q?=5FSIZE=5FOFFSET=20`?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 src/config/env.js       | 3 +++
 src/telegram/message.js | 9 ++++++++-
 src/utils/utils.js      | 9 +++++++++
 3 files changed, 20 insertions(+), 1 deletion(-)

diff --git a/src/config/env.js b/src/config/env.js
index 75463088..e22bb2fb 100644
--- a/src/config/env.js
+++ b/src/config/env.js
@@ -134,6 +134,9 @@ class Environment {
     DEFAULT_PARSE_MODE = 'Markdown';
     // 最小stream模式消息间隔,小于等于0则不限制
     TELEGRAM_MIN_STREAM_INTERVAL = 0;
+    // 图片尺寸偏移 0为第一位,-1为最后一位, 越靠后的图片越大
+    TELEGRAM_PHOTO_SIZE_OFFSET = -2;
+
 
     // --  权限相关 --
     //
diff --git a/src/telegram/message.js b/src/telegram/message.js
index 0411885b..a40134f3 100644
--- a/src/telegram/message.js
+++ b/src/telegram/message.js
@@ -261,7 +261,14 @@ async function msgChatWithLLM(message, context) {
      */
     const params = { message: content };
     if (message.photo && message.photo.length > 0) {
-        const fileId = message.photo[message.photo.length - 1].file_id;
+        let sizeIndex = 0;
+        if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET >= 0) {
+            sizeIndex = ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
+        } else if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET < 0) {
+            sizeIndex = message.photo.length + ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
+        }
+        sizeIndex = Math.max(0, Math.min(sizeIndex, message.photo.length - 1));
+        const fileId = message.photo[sizeIndex].file_id;
         const url = await getFileLink(fileId, context.SHARE_CONTEXT.currentBotToken);
         params.images = [url];
     }
diff --git a/src/utils/utils.js b/src/utils/utils.js
index 109cffa1..d670f207 100644
--- a/src/utils/utils.js
+++ b/src/utils/utils.js
@@ -96,3 +96,12 @@ export async function makeResponse200(resp) {
     }
 }
 
+/**
+ * @param url
+ * @returns {Promise}
+ */
+export async function urlToBase64String(url) {
+    return fetch(url)
+      .then(resp  => resp.arrayBuffer())
+      .then(buffer => Buffer.from(buffer).toString('base64'));
+}
\ No newline at end of file

From febfe324d2e03c2850fc8d6038f1d6607a0e5196 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Fri, 2 Aug 2024 18:29:44 +0800
Subject: [PATCH 07/24] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0base64=E5=9B=BE?=
 =?UTF-8?q?=E7=89=87=E6=94=AF=E6=8C=81=EF=BC=88=E9=9C=80=E8=A6=81nodejs?=
 =?UTF-8?q?=E5=85=BC=E5=AE=B9=EF=BC=89?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 Makefile                 |  2 +-
 src/agent/anthropic.js   | 26 ++++++++++++----
 src/agent/azure.js       |  8 ++---
 src/agent/cohere.js      |  2 +-
 src/agent/gemini.js      |  2 +-
 src/agent/mistralai.js   |  2 +-
 src/agent/openai.js      | 24 +++++++++++----
 src/agent/workersai.js   |  2 +-
 src/config/env.js        |  5 +++-
 src/telegram/telegram.js |  2 +-
 src/utils/utils.js       | 64 ++++++++++++++++++++++++++++++++++++----
 wrangler-example.toml    |  1 +
 12 files changed, 111 insertions(+), 29 deletions(-)

diff --git a/Makefile b/Makefile
index 5b1baa63..99e1b22b 100644
--- a/Makefile
+++ b/Makefile
@@ -11,7 +11,7 @@ build: clean
 	TIMESTAMP=$$(date +%s) && \
 	echo "$$TIMESTAMP" > $(TIMESTAMP_FILE) && \
 	echo "{\"sha\": \"$$COMMIT_HASH\", \"timestamp\": $$TIMESTAMP}" > $(BUILD_INFO_JSON) && \
-	$(ESBUILD) $(ENTRY_FILE) --bundle --outfile=$(OUTPUT_FILE) --format=esm --define:process.env.BUILD_VERSION="'$$COMMIT_HASH'" --define:process.env.BUILD_TIMESTAMP="$$TIMESTAMP"
+	$(ESBUILD) $(ENTRY_FILE) --bundle --outfile=$(OUTPUT_FILE) --format=esm --platform=node --define:process.env.BUILD_VERSION="'$$COMMIT_HASH'" --define:process.env.BUILD_TIMESTAMP="$$TIMESTAMP"
 
 .PHONY: clean
 clean:
diff --git a/src/agent/anthropic.js b/src/agent/anthropic.js
index bba41ffa..58aad29b 100644
--- a/src/agent/anthropic.js
+++ b/src/agent/anthropic.js
@@ -3,6 +3,7 @@ import "../types/agent.js";
 import {anthropicSseJsonParser, Stream} from "./stream.js";
 import {ENV} from "../config/env.js";
 import {requestChatCompletions} from "./request.js";
+import {imageToBase64String} from "../utils/utils.js";
 
 
 /**
@@ -15,13 +16,25 @@ export function isAnthropicAIEnable(context) {
 
 /**
  * @param {HistoryItem} item
- * @return {Object}
+ * @return {Promise}
  */
-function renderAnthropicMessage(item) {
-    return {
+async function renderAnthropicMessage(item) {
+    const res = {
         role: item.role,
         content: item.content,
     };
+
+    if (item.images && item.images.length > 0) {
+        res.content = [];
+        if (item.content) {
+            res.content.push({type: 'text', text: item.content});
+        }
+        for (const image of item.images) {
+            const { data, format } = await imageToBase64String(image);
+            res.content.push({type: 'image', source: {type: 'base64', media_type: format, data: data}});
+        }
+    }
+    return res;
 }
 
 
@@ -34,7 +47,7 @@ function renderAnthropicMessage(item) {
  * @return {Promise}
  */
 export async function requestCompletionsFromAnthropicAI(params, context, onStream) {
-    const { message, prompt, history } = params;
+    const { message, images, prompt, history } = params;
     const url = `${context.USER_CONFIG.ANTHROPIC_API_BASE}/messages`;
     const header = {
         'x-api-key': context.USER_CONFIG.ANTHROPIC_API_KEY,
@@ -42,11 +55,12 @@ export async function requestCompletionsFromAnthropicAI(params, context, onStrea
         'content-type': 'application/json',
     };
 
-    const messages = ([...(history || []), {role: 'user', content: message}]).map(renderAnthropicMessage);
+    const messages = ([...(history || []), {role: 'user', content: message, images}]);
+
     const body = {
         system: prompt,
         model: context.USER_CONFIG.ANTHROPIC_CHAT_MODEL,
-        messages: messages,
+        messages: await Promise.all(messages.map(renderAnthropicMessage)),
         stream: onStream != null,
         max_tokens: ENV.MAX_TOKEN_LENGTH,
     };
diff --git a/src/agent/azure.js b/src/agent/azure.js
index 1c69a734..1180cdb2 100644
--- a/src/agent/azure.js
+++ b/src/agent/azure.js
@@ -1,7 +1,7 @@
 import "../types/context.js";
 import "../types/agent.js";
 import {requestChatCompletions} from "./request.js";
-import {renderOpenAiMessage} from "./openai.js";
+import {renderOpenAIMessage} from "./openai.js";
 
 /**
  * @param {ContextType} context
@@ -38,21 +38,21 @@ export function isAzureImageEnable(context) {
  * @return {Promise}
  */
 export async function requestCompletionsFromAzureOpenAI(params, context, onStream) {
-    const { message, prompt, history } = params;
+    const { message, images, prompt, history } = params;
     const url = context.USER_CONFIG.AZURE_COMPLETIONS_API;
     const header = {
         'Content-Type': 'application/json',
         'api-key': azureKeyFromContext(context),
     };
 
-    const messages = [...(history || []), {role: 'user', content: message}];
+    const messages = [...(history || []), {role: 'user', content: message, images}];
     if (prompt) {
         messages.unshift({role: context.USER_CONFIG.SYSTEM_INIT_MESSAGE_ROLE, content: prompt});
     }
 
     const body = {
         ...context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS,
-        messages: messages.map(renderOpenAiMessage),
+        messages: await Promise.all(messages.map(renderOpenAIMessage)),
         stream: onStream != null,
     };
 
diff --git a/src/agent/cohere.js b/src/agent/cohere.js
index c5e9d2ef..fc8075b5 100644
--- a/src/agent/cohere.js
+++ b/src/agent/cohere.js
@@ -20,7 +20,7 @@ const COHERE_ROLE_MAP = {
  * @param {HistoryItem} item
  * @return {Object}
  */
-export function renderCohereMessage(item) {
+function renderCohereMessage(item) {
     return {
         role: COHERE_ROLE_MAP[item.role],
         content: item.content,
diff --git a/src/agent/gemini.js b/src/agent/gemini.js
index f28bf68e..14e49448 100644
--- a/src/agent/gemini.js
+++ b/src/agent/gemini.js
@@ -19,7 +19,7 @@ const GEMINI_ROLE_MAP =  {
  * @param {HistoryItem} item
  * @return {Object}
  */
-export function renderGeminiMessage(item) {
+function renderGeminiMessage(item) {
     return {
         role: GEMINI_ROLE_MAP[item.role],
         parts: [
diff --git a/src/agent/mistralai.js b/src/agent/mistralai.js
index b49d532f..6fb780c6 100644
--- a/src/agent/mistralai.js
+++ b/src/agent/mistralai.js
@@ -13,7 +13,7 @@ export function isMistralAIEnable(context) {
  * @param {HistoryItem} item
  * @return {Object}
  */
-export function renderMistralMessage(item) {
+function renderMistralMessage(item) {
     return {
         role: item.role,
         content: item.content,
diff --git a/src/agent/openai.js b/src/agent/openai.js
index 07deb592..8c3fd6ad 100644
--- a/src/agent/openai.js
+++ b/src/agent/openai.js
@@ -1,5 +1,7 @@
 import "../types/context.js";
 import {requestChatCompletions} from "./request.js";
+import {ENV} from "../config/env.js";
+import {imageToBase64String, supportsNativeBase64} from "../utils/utils.js";
 
 
 /**
@@ -23,9 +25,9 @@ export function isOpenAIEnable(context) {
 
 /**
  * @param {HistoryItem} item
- * @return {Object}
+ * @return {Promise}
  */
-export function renderOpenAiMessage(item) {
+export async function renderOpenAIMessage(item) {
     const res = {
         role: item.role,
         content: item.content,
@@ -36,7 +38,19 @@ export function renderOpenAiMessage(item) {
             res.content.push({type: 'text', text: item.content});
         }
         for (const image of item.images) {
-            res.content.push({type: 'image_url', image_url: {url: image}});
+            switch (ENV.TELEGRAM_IMAGE_TRANSFER_MODE) {
+                case 'base64':
+                    if (supportsNativeBase64()) {
+                        const { data, format } = await imageToBase64String(image);
+                        res.content.push({type: 'image_url', url: `data:${format};base64,${data}`});
+                        break;
+                    }
+                    // fallthrough
+                case 'url':
+                default:
+                    res.content.push({type: 'image_url', image_url: {url: image}});
+                    break;
+            }
         }
     }
     return res;
@@ -68,12 +82,10 @@ export async function requestCompletionsFromOpenAI(params, context, onStream) {
     const body = {
         model: context.USER_CONFIG.OPENAI_CHAT_MODEL,
         ...context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS,
-        messages: messages.map(renderOpenAiMessage),
+        messages: await Promise.all(messages.map(renderOpenAIMessage)),
         stream: onStream != null,
     };
 
-
-
     return requestChatCompletions(url, header, body, context, onStream);
 }
 
diff --git a/src/agent/workersai.js b/src/agent/workersai.js
index 1c639792..2f3518d7 100644
--- a/src/agent/workersai.js
+++ b/src/agent/workersai.js
@@ -33,7 +33,7 @@ export function isWorkersAIEnable(context) {
  * @param {HistoryItem} item
  * @return {Object}
  */
-export function renderWorkerAIMessage(item) {
+function renderWorkerAIMessage(item) {
     return {
         role: item.role,
         content: item.content,
diff --git a/src/config/env.js b/src/config/env.js
index e22bb2fb..bee29432 100644
--- a/src/config/env.js
+++ b/src/config/env.js
@@ -134,8 +134,11 @@ class Environment {
     DEFAULT_PARSE_MODE = 'Markdown';
     // 最小stream模式消息间隔,小于等于0则不限制
     TELEGRAM_MIN_STREAM_INTERVAL = 0;
-    // 图片尺寸偏移 0为第一位,-1为最后一位, 越靠后的图片越大
+    // 图片尺寸偏移 0为第一位,-1为最后一位, 越靠后的图片越大。PS: 图片过大可能导致token消耗过多,或者workers超时或内存不足
     TELEGRAM_PHOTO_SIZE_OFFSET = -2;
+    // 向LLM优先传递图片方式:url, base64
+    TELEGRAM_IMAGE_TRANSFER_MODE = 'url';
+
 
 
     // --  权限相关 --
diff --git a/src/telegram/telegram.js b/src/telegram/telegram.js
index 336a182b..bfac3412 100644
--- a/src/telegram/telegram.js
+++ b/src/telegram/telegram.js
@@ -355,5 +355,5 @@ export async function getFileLink(fileId, token) {
     if (resp.ok && resp.result.file_path) {
         return `https://api.telegram.org/file/bot${token}/${resp.result.file_path}`;
     }
-    return ''
+    return '';
 }
diff --git a/src/utils/utils.js b/src/utils/utils.js
index d670f207..da3deabd 100644
--- a/src/utils/utils.js
+++ b/src/utils/utils.js
@@ -97,11 +97,63 @@ export async function makeResponse200(resp) {
 }
 
 /**
- * @param url
+ * @returns {boolean}
+ */
+export function supportsNativeBase64() {
+    return typeof Buffer !== 'undefined';
+}
+
+/**
+ * @param {string} url
  * @returns {Promise}
  */
-export async function urlToBase64String(url) {
-    return fetch(url)
-      .then(resp  => resp.arrayBuffer())
-      .then(buffer => Buffer.from(buffer).toString('base64'));
-}
\ No newline at end of file
+async function urlToBase64String(url) {
+   try {
+       const { Buffer } = await import('node:buffer');
+       return fetch(url)
+           .then(resp  => resp.arrayBuffer())
+           .then(buffer => Buffer.from(buffer).toString('base64'));
+   } catch {
+       // 非原生base64编码速度太慢不适合在workers中使用
+       // 在wrangler.toml中添加 nodejs 选项启用nodejs兼容
+       // compatibility_flags = [ "nodejs_compat" ]
+       throw new Error('Need to enable nodejs compatibility to support base64 encoding');
+   }
+}
+
+/**
+ * @param {string} base64String
+ * @returns {string}
+ */
+function getImageFormatFromBase64(base64String) {
+    const firstChar = base64String.charAt(0);
+    switch (firstChar) {
+        case '/':
+            return 'jpeg';
+        case 'i':
+            return 'png';
+        case 'R':
+            return 'gif';
+        case 'U':
+            return 'webp';
+        default:
+            throw new Error('Unsupported image format');
+    }
+}
+
+/**
+ * @typedef {object} ImageBase64
+ * @property {string} data
+ * @property {string} format
+ *
+ * @param url
+ * @returns {Promise}
+ */
+export async function imageToBase64String(url) {
+    const base64String = await urlToBase64String(url);
+    const format = getImageFormatFromBase64(base64String);
+    return {
+        data: base64String,
+        format: `image/${format}`
+    };
+}
diff --git a/wrangler-example.toml b/wrangler-example.toml
index 7c9cb243..5d79de91 100644
--- a/wrangler-example.toml
+++ b/wrangler-example.toml
@@ -3,6 +3,7 @@ name = 'chatgpt-telegram-workers'
 compatibility_date = '2023-10-07'
 main = './dist/index.js'
 workers_dev = true
+compatibility_flags = [ "nodejs_compat" ]
 
 # 这里的 id 为必填项 请填写你的 kv namespace id
 # preview_id 只在调试时使用,不需要请删除

From 2bc1db96fd6dc9a5c807075eb61f12d1d7e50143 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Fri, 2 Aug 2024 20:02:30 +0800
Subject: [PATCH 08/24] =?UTF-8?q?perf:=20=E5=9B=BE=E7=89=87base64=E5=85=BC?=
 =?UTF-8?q?=E5=AE=B9=E9=9D=9Enodejs=E7=8E=AF=E5=A2=83?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 dist/buildinfo.json     |   2 +-
 dist/index.js           | 240 ++++++++++++++++++++++++++--------------
 dist/timestamp          |   2 +-
 src/agent/anthropic.js  |   5 +-
 src/agent/openai.js     |  10 +-
 src/telegram/message.js |  13 ++-
 src/utils/utils.js      |  14 ++-
 7 files changed, 183 insertions(+), 103 deletions(-)

diff --git a/dist/buildinfo.json b/dist/buildinfo.json
index 4b18c304..796cffd2 100644
--- a/dist/buildinfo.json
+++ b/dist/buildinfo.json
@@ -1 +1 @@
-{"sha": "8e2362c", "timestamp": 1722507009}
+{"sha": "febfe32", "timestamp": 1722600034}
diff --git a/dist/index.js b/dist/index.js
index 83efbaa8..53f770ce 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -89,9 +89,9 @@ var Environment = class {
   // -- 版本数据 --
   //
   // 当前版本
-  BUILD_TIMESTAMP = 1722507009;
+  BUILD_TIMESTAMP = 1722600034;
   // 当前版本 commit id
-  BUILD_VERSION = "8e2362c";
+  BUILD_VERSION = "febfe32";
   // -- 基础配置 --
   /**
    * @type {I18n | null}
@@ -113,6 +113,10 @@ var Environment = class {
   DEFAULT_PARSE_MODE = "Markdown";
   // 最小stream模式消息间隔,小于等于0则不限制
   TELEGRAM_MIN_STREAM_INTERVAL = 0;
+  // 图片尺寸偏移 0为第一位,-1为最后一位, 越靠后的图片越大。PS: 图片过大可能导致token消耗过多,或者workers超时或内存不足
+  TELEGRAM_PHOTO_SIZE_OFFSET = -2;
+  // 向LLM优先传递图片方式:url, base64
+  TELEGRAM_IMAGE_TRANSFER_MODE = "url";
   // --  权限相关 --
   //
   // 允许所有人使用
@@ -1048,6 +1052,114 @@ ERROR: ${e.message}`;
   }
 }
 
+// src/utils/utils.js
+function renderHTML(body) {
+  return `
+  
+  
+    ChatGPT-Telegram-Workers
+    
+    
+    
+    
+    
+  
+  
+    ${body}
+  
+
+  `;
+}
+function errorToString(e) {
+  return JSON.stringify({
+    message: e.message,
+    stack: e.stack
+  });
+}
+async function makeResponse200(resp) {
+  if (resp === null) {
+    return new Response("NOT HANDLED", { status: 200 });
+  }
+  if (resp.status === 200) {
+    return resp;
+  } else {
+    return new Response(resp.body, {
+      status: 200,
+      headers: {
+        "Original-Status": resp.status,
+        ...resp.headers
+      }
+    });
+  }
+}
+function supportsNativeBase64() {
+  return typeof Buffer !== "undefined";
+}
+async function urlToBase64String(url) {
+  try {
+    const { Buffer: Buffer2 } = await import("node:buffer");
+    return fetch(url).then((resp) => resp.arrayBuffer()).then((buffer) => Buffer2.from(buffer).toString("base64"));
+  } catch {
+    return fetch(url).then((resp) => resp.arrayBuffer()).then((buffer) => btoa(String.fromCharCode.apply(null, new Uint8Array(buffer))));
+  }
+}
+function getImageFormatFromBase64(base64String) {
+  const firstChar = base64String.charAt(0);
+  switch (firstChar) {
+    case "/":
+      return "jpeg";
+    case "i":
+      return "png";
+    case "R":
+      return "gif";
+    case "U":
+      return "webp";
+    default:
+      throw new Error("Unsupported image format");
+  }
+}
+async function imageToBase64String(url) {
+  const base64String = await urlToBase64String(url);
+  const format = getImageFormatFromBase64(base64String);
+  return {
+    data: base64String,
+    format: `image/${format}`
+  };
+}
+function renderImageBase64DataURI(params) {
+  return `data:image/${params.format};base64,${params.data}`;
+}
+
 // src/agent/openai.js
 function openAIKeyFromContext(context) {
   const length = context.USER_CONFIG.OPENAI_API_KEY.length;
@@ -1056,7 +1168,7 @@ function openAIKeyFromContext(context) {
 function isOpenAIEnable(context) {
   return context.USER_CONFIG.OPENAI_API_KEY.length > 0;
 }
-function renderOpenAiMessage(item) {
+async function renderOpenAIMessage(item) {
   const res = {
     role: item.role,
     content: item.content
@@ -1067,7 +1179,15 @@ function renderOpenAiMessage(item) {
       res.content.push({ type: "text", text: item.content });
     }
     for (const image of item.images) {
-      res.content.push({ type: "image_url", image_url: { url: image } });
+      switch (ENV.TELEGRAM_IMAGE_TRANSFER_MODE) {
+        case "base64":
+          res.content.push({ type: "image_url", url: renderImageBase64DataURI(await imageToBase64String(image)) });
+          break;
+        case "url":
+        default:
+          res.content.push({ type: "image_url", image_url: { url: image } });
+          break;
+      }
     }
   }
   return res;
@@ -1086,7 +1206,7 @@ async function requestCompletionsFromOpenAI(params, context, onStream) {
   const body = {
     model: context.USER_CONFIG.OPENAI_CHAT_MODEL,
     ...context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS,
-    messages: messages.map(renderOpenAiMessage),
+    messages: await Promise.all(messages.map(renderOpenAIMessage)),
     stream: onStream != null
   };
   return requestChatCompletions(url, header, body, context, onStream);
@@ -1310,25 +1430,37 @@ async function requestCompletionsFromCohereAI(params, context, onStream) {
 function isAnthropicAIEnable(context) {
   return !!context.USER_CONFIG.ANTHROPIC_API_KEY;
 }
-function renderAnthropicMessage(item) {
-  return {
+async function renderAnthropicMessage(item) {
+  const res = {
     role: item.role,
     content: item.content
   };
+  if (item.images && item.images.length > 0) {
+    res.content = [];
+    if (item.content) {
+      res.content.push({ type: "text", text: item.content });
+    }
+    for (const image of item.images) {
+      res.content.push(await imageToBase64String(image).then(({ format, data }) => {
+        return { type: "image", source: { type: "base64", media_type: format, data } };
+      }));
+    }
+  }
+  return res;
 }
 async function requestCompletionsFromAnthropicAI(params, context, onStream) {
-  const { message, prompt, history } = params;
+  const { message, images, prompt, history } = params;
   const url = `${context.USER_CONFIG.ANTHROPIC_API_BASE}/messages`;
   const header = {
     "x-api-key": context.USER_CONFIG.ANTHROPIC_API_KEY,
     "anthropic-version": "2023-06-01",
     "content-type": "application/json"
   };
-  const messages = [...history || [], { role: "user", content: message }].map(renderAnthropicMessage);
+  const messages = [...history || [], { role: "user", content: message, images }];
   const body = {
     system: prompt,
     model: context.USER_CONFIG.ANTHROPIC_CHAT_MODEL,
-    messages,
+    messages: await Promise.all(messages.map(renderAnthropicMessage)),
     stream: onStream != null,
     max_tokens: ENV.MAX_TOKEN_LENGTH
   };
@@ -1362,19 +1494,19 @@ function isAzureImageEnable(context) {
   return !!(context.USER_CONFIG.AZURE_API_KEY && context.USER_CONFIG.AZURE_DALLE_API);
 }
 async function requestCompletionsFromAzureOpenAI(params, context, onStream) {
-  const { message, prompt, history } = params;
+  const { message, images, prompt, history } = params;
   const url = context.USER_CONFIG.AZURE_COMPLETIONS_API;
   const header = {
     "Content-Type": "application/json",
     "api-key": azureKeyFromContext(context)
   };
-  const messages = [...history || [], { role: "user", content: message }];
+  const messages = [...history || [], { role: "user", content: message, images }];
   if (prompt) {
     messages.unshift({ role: context.USER_CONFIG.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
   }
   const body = {
     ...context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS,
-    messages: messages.map(renderOpenAiMessage),
+    messages: await Promise.all(messages.map(renderOpenAIMessage)),
     stream: onStream != null
   };
   return requestChatCompletions(url, header, body, context, onStream);
@@ -2097,77 +2229,6 @@ function commandsDocument() {
   });
 }
 
-// src/utils/utils.js
-function renderHTML(body) {
-  return `
-  
-  
-    ChatGPT-Telegram-Workers
-    
-    
-    
-    
-    
-  
-  
-    ${body}
-  
-
-  `;
-}
-function errorToString(e) {
-  return JSON.stringify({
-    message: e.message,
-    stack: e.stack
-  });
-}
-async function makeResponse200(resp) {
-  if (resp === null) {
-    return new Response("NOT HANDLED", { status: 200 });
-  }
-  if (resp.status === 200) {
-    return resp;
-  } else {
-    return new Response(resp.body, {
-      status: 200,
-      headers: {
-        "Original-Status": resp.status,
-        ...resp.headers
-      }
-    });
-  }
-}
-
 // src/telegram/message.js
 async function msgInitChatContext(message, context) {
   await context.initContext(message);
@@ -2326,7 +2387,16 @@ async function msgChatWithLLM(message, context) {
   }
   const params = { message: content };
   if (message.photo && message.photo.length > 0) {
-    const fileId = message.photo[message.photo.length - 1].file_id;
+    let sizeIndex = 0;
+    if (supportsNativeBase64()) {
+      if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET >= 0) {
+        sizeIndex = ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
+      } else if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET < 0) {
+        sizeIndex = message.photo.length + ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
+      }
+    }
+    sizeIndex = Math.max(0, Math.min(sizeIndex, message.photo.length - 1));
+    const fileId = message.photo[sizeIndex].file_id;
     const url = await getFileLink(fileId, context.SHARE_CONTEXT.currentBotToken);
     params.images = [url];
   }
diff --git a/dist/timestamp b/dist/timestamp
index 6192f18f..0c74c70f 100644
--- a/dist/timestamp
+++ b/dist/timestamp
@@ -1 +1 @@
-1722507009
+1722600034
diff --git a/src/agent/anthropic.js b/src/agent/anthropic.js
index 58aad29b..c3dd7d3b 100644
--- a/src/agent/anthropic.js
+++ b/src/agent/anthropic.js
@@ -30,8 +30,9 @@ async function renderAnthropicMessage(item) {
             res.content.push({type: 'text', text: item.content});
         }
         for (const image of item.images) {
-            const { data, format } = await imageToBase64String(image);
-            res.content.push({type: 'image', source: {type: 'base64', media_type: format, data: data}});
+            res.content.push( await imageToBase64String(image).then(({format, data}) => {
+                return {type: 'image', source: {type: 'base64', media_type: format, data: data}};
+            }));
         }
     }
     return res;
diff --git a/src/agent/openai.js b/src/agent/openai.js
index 8c3fd6ad..1cfe7a6e 100644
--- a/src/agent/openai.js
+++ b/src/agent/openai.js
@@ -1,7 +1,7 @@
 import "../types/context.js";
 import {requestChatCompletions} from "./request.js";
 import {ENV} from "../config/env.js";
-import {imageToBase64String, supportsNativeBase64} from "../utils/utils.js";
+import {imageToBase64String, renderImageBase64DataURI} from '../utils/utils.js';
 
 
 /**
@@ -40,12 +40,8 @@ export async function renderOpenAIMessage(item) {
         for (const image of item.images) {
             switch (ENV.TELEGRAM_IMAGE_TRANSFER_MODE) {
                 case 'base64':
-                    if (supportsNativeBase64()) {
-                        const { data, format } = await imageToBase64String(image);
-                        res.content.push({type: 'image_url', url: `data:${format};base64,${data}`});
-                        break;
-                    }
-                    // fallthrough
+                    res.content.push({type: 'image_url', url: renderImageBase64DataURI(await imageToBase64String(image))});
+                    break;
                 case 'url':
                 default:
                     res.content.push({type: 'image_url', image_url: {url: image}});
diff --git a/src/telegram/message.js b/src/telegram/message.js
index a40134f3..6d6246f4 100644
--- a/src/telegram/message.js
+++ b/src/telegram/message.js
@@ -2,7 +2,7 @@ import {CONST, DATABASE, ENV} from '../config/env.js';
 import {Context} from '../config/context.js';
 import {getBot, getFileLink, sendMessageToTelegramWithContext} from './telegram.js';
 import {handleCommandMessage} from './command.js';
-import {errorToString} from '../utils/utils.js';
+import {errorToString, supportsNativeBase64} from '../utils/utils.js';
 import {chatWithLLM} from '../agent/llm.js';
 
 import '../types/telegram.js';
@@ -262,10 +262,13 @@ async function msgChatWithLLM(message, context) {
     const params = { message: content };
     if (message.photo && message.photo.length > 0) {
         let sizeIndex = 0;
-        if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET >= 0) {
-            sizeIndex = ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
-        } else if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET < 0) {
-            sizeIndex = message.photo.length + ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
+        if (supportsNativeBase64()) {
+            // 仅在支持原生base64的环境下运行选择更高质量的图片防止workers中base64编码超时
+            if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET >= 0) {
+                sizeIndex = ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
+            } else if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET < 0) {
+                sizeIndex = message.photo.length + ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
+            }
         }
         sizeIndex = Math.max(0, Math.min(sizeIndex, message.photo.length - 1));
         const fileId = message.photo[sizeIndex].file_id;
diff --git a/src/utils/utils.js b/src/utils/utils.js
index da3deabd..57018b65 100644
--- a/src/utils/utils.js
+++ b/src/utils/utils.js
@@ -115,9 +115,11 @@ async function urlToBase64String(url) {
            .then(buffer => Buffer.from(buffer).toString('base64'));
    } catch {
        // 非原生base64编码速度太慢不适合在workers中使用
-       // 在wrangler.toml中添加 nodejs 选项启用nodejs兼容
+       // 在wrangler.toml中添加 Node.js 选项启用nodejs兼容
        // compatibility_flags = [ "nodejs_compat" ]
-       throw new Error('Need to enable nodejs compatibility to support base64 encoding');
+       return fetch(url)
+         .then(resp  => resp.arrayBuffer())
+         .then(buffer => btoa(String.fromCharCode.apply(null, new Uint8Array(buffer))));
    }
 }
 
@@ -157,3 +159,11 @@ export async function imageToBase64String(url) {
         format: `image/${format}`
     };
 }
+
+/**
+ * @param {ImageBase64} params
+ * @returns {string}
+ */
+export function renderImageBase64DataURI(params) {
+    return `data:image/${params.format};base64,${params.data}`;
+}
\ No newline at end of file

From 5d4795b193a4b80b3b79d166b31adaa27cbe3bc9 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Fri, 2 Aug 2024 21:13:11 +0800
Subject: [PATCH 09/24] fix: renderImageBase64DataURI

---
 dist/buildinfo.json |  2 +-
 dist/index.js       | 10 +++++-----
 dist/timestamp      |  2 +-
 src/agent/openai.js |  4 ++--
 src/utils/utils.js  | 10 +++++-----
 5 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/dist/buildinfo.json b/dist/buildinfo.json
index 796cffd2..749853e0 100644
--- a/dist/buildinfo.json
+++ b/dist/buildinfo.json
@@ -1 +1 @@
-{"sha": "febfe32", "timestamp": 1722600034}
+{"sha": "2bc1db9", "timestamp": 1722604354}
diff --git a/dist/index.js b/dist/index.js
index 53f770ce..c97348b5 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -89,9 +89,9 @@ var Environment = class {
   // -- 版本数据 --
   //
   // 当前版本
-  BUILD_TIMESTAMP = 1722600034;
+  BUILD_TIMESTAMP = 1722604354;
   // 当前版本 commit id
-  BUILD_VERSION = "febfe32";
+  BUILD_VERSION = "2bc1db9";
   // -- 基础配置 --
   /**
    * @type {I18n | null}
@@ -1156,8 +1156,8 @@ async function imageToBase64String(url) {
     format: `image/${format}`
   };
 }
-function renderImageBase64DataURI(params) {
-  return `data:image/${params.format};base64,${params.data}`;
+function renderBase64DataURI(params) {
+  return `data:${params.format};base64,${params.data}`;
 }
 
 // src/agent/openai.js
@@ -1181,7 +1181,7 @@ async function renderOpenAIMessage(item) {
     for (const image of item.images) {
       switch (ENV.TELEGRAM_IMAGE_TRANSFER_MODE) {
         case "base64":
-          res.content.push({ type: "image_url", url: renderImageBase64DataURI(await imageToBase64String(image)) });
+          res.content.push({ type: "image_url", url: renderBase64DataURI(await imageToBase64String(image)) });
           break;
         case "url":
         default:
diff --git a/dist/timestamp b/dist/timestamp
index 0c74c70f..9a059ce9 100644
--- a/dist/timestamp
+++ b/dist/timestamp
@@ -1 +1 @@
-1722600034
+1722604354
diff --git a/src/agent/openai.js b/src/agent/openai.js
index 1cfe7a6e..b4cc22aa 100644
--- a/src/agent/openai.js
+++ b/src/agent/openai.js
@@ -1,7 +1,7 @@
 import "../types/context.js";
 import {requestChatCompletions} from "./request.js";
 import {ENV} from "../config/env.js";
-import {imageToBase64String, renderImageBase64DataURI} from '../utils/utils.js';
+import {imageToBase64String, renderBase64DataURI} from '../utils/utils.js';
 
 
 /**
@@ -40,7 +40,7 @@ export async function renderOpenAIMessage(item) {
         for (const image of item.images) {
             switch (ENV.TELEGRAM_IMAGE_TRANSFER_MODE) {
                 case 'base64':
-                    res.content.push({type: 'image_url', url: renderImageBase64DataURI(await imageToBase64String(image))});
+                    res.content.push({type: 'image_url', url: renderBase64DataURI(await imageToBase64String(image))});
                     break;
                 case 'url':
                 default:
diff --git a/src/utils/utils.js b/src/utils/utils.js
index 57018b65..c15b654c 100644
--- a/src/utils/utils.js
+++ b/src/utils/utils.js
@@ -144,12 +144,12 @@ function getImageFormatFromBase64(base64String) {
 }
 
 /**
- * @typedef {object} ImageBase64
+ * @typedef {object} DataBase64
  * @property {string} data
  * @property {string} format
  *
  * @param url
- * @returns {Promise}
+ * @returns {Promise}
  */
 export async function imageToBase64String(url) {
     const base64String = await urlToBase64String(url);
@@ -161,9 +161,9 @@ export async function imageToBase64String(url) {
 }
 
 /**
- * @param {ImageBase64} params
+ * @param {DataBase64} params
  * @returns {string}
  */
-export function renderImageBase64DataURI(params) {
-    return `data:image/${params.format};base64,${params.data}`;
+export function renderBase64DataURI(params) {
+    return `data:${params.format};base64,${params.data}`;
 }
\ No newline at end of file

From 072653a6e49b02dfc1945855d61c293e3349a41e Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Fri, 2 Aug 2024 21:25:38 +0800
Subject: [PATCH 10/24] =?UTF-8?q?perf:=20=E9=BB=98=E8=AE=A4=E9=80=89?=
 =?UTF-8?q?=E6=8B=A9=E6=9C=80=E4=BD=8E=E5=88=86=E8=BE=A8=E7=8E=87=E7=9A=84?=
 =?UTF-8?q?=E7=85=A7=E7=89=87?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 src/config/env.js       |  3 ++-
 src/telegram/message.js | 14 ++++++--------
 2 files changed, 8 insertions(+), 9 deletions(-)

diff --git a/src/config/env.js b/src/config/env.js
index bee29432..d14fc89e 100644
--- a/src/config/env.js
+++ b/src/config/env.js
@@ -135,7 +135,8 @@ class Environment {
     // 最小stream模式消息间隔,小于等于0则不限制
     TELEGRAM_MIN_STREAM_INTERVAL = 0;
     // 图片尺寸偏移 0为第一位,-1为最后一位, 越靠后的图片越大。PS: 图片过大可能导致token消耗过多,或者workers超时或内存不足
-    TELEGRAM_PHOTO_SIZE_OFFSET = -2;
+    // 默认选择最低质量的图片
+    TELEGRAM_PHOTO_SIZE_OFFSET = 0;
     // 向LLM优先传递图片方式:url, base64
     TELEGRAM_IMAGE_TRANSFER_MODE = 'url';
 
diff --git a/src/telegram/message.js b/src/telegram/message.js
index 6d6246f4..a1e68895 100644
--- a/src/telegram/message.js
+++ b/src/telegram/message.js
@@ -2,7 +2,7 @@ import {CONST, DATABASE, ENV} from '../config/env.js';
 import {Context} from '../config/context.js';
 import {getBot, getFileLink, sendMessageToTelegramWithContext} from './telegram.js';
 import {handleCommandMessage} from './command.js';
-import {errorToString, supportsNativeBase64} from '../utils/utils.js';
+import {errorToString} from '../utils/utils.js';
 import {chatWithLLM} from '../agent/llm.js';
 
 import '../types/telegram.js';
@@ -262,13 +262,11 @@ async function msgChatWithLLM(message, context) {
     const params = { message: content };
     if (message.photo && message.photo.length > 0) {
         let sizeIndex = 0;
-        if (supportsNativeBase64()) {
-            // 仅在支持原生base64的环境下运行选择更高质量的图片防止workers中base64编码超时
-            if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET >= 0) {
-                sizeIndex = ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
-            } else if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET < 0) {
-                sizeIndex = message.photo.length + ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
-            }
+        // 仅在支持原生base64的环境下运行选择更高质量的图片防止workers中base64编码超时
+        if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET >= 0) {
+            sizeIndex = ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
+        } else if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET < 0) {
+            sizeIndex = message.photo.length + ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
         }
         sizeIndex = Math.max(0, Math.min(sizeIndex, message.photo.length - 1));
         const fileId = message.photo[sizeIndex].file_id;

From c133378fecfacc87d381f9c8564c8eeab33e3c80 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Sat, 3 Aug 2024 00:45:03 +0800
Subject: [PATCH 11/24] doc: fix context jsdoc

---
 src/config/env.js    | 4 ++--
 src/types/context.js | 8 ++++----
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/src/config/env.js b/src/config/env.js
index d14fc89e..b45f3803 100644
--- a/src/config/env.js
+++ b/src/config/env.js
@@ -135,8 +135,8 @@ class Environment {
     // 最小stream模式消息间隔,小于等于0则不限制
     TELEGRAM_MIN_STREAM_INTERVAL = 0;
     // 图片尺寸偏移 0为第一位,-1为最后一位, 越靠后的图片越大。PS: 图片过大可能导致token消耗过多,或者workers超时或内存不足
-    // 默认选择最低质量的图片
-    TELEGRAM_PHOTO_SIZE_OFFSET = 0;
+    // 默认选择次低质量的图片
+    TELEGRAM_PHOTO_SIZE_OFFSET = 1;
     // 向LLM优先传递图片方式:url, base64
     TELEGRAM_IMAGE_TRANSFER_MODE = 'url';
 
diff --git a/src/types/context.js b/src/types/context.js
index e814835a..2c8b71f4 100644
--- a/src/types/context.js
+++ b/src/types/context.js
@@ -8,7 +8,7 @@
  *
  * @property {string} AI_PROVIDER
  * @property {string} AI_IMAGE_PROVIDER
- * @property {string} SYSTEM_INIT_MESSAGE
+ * @property {?string} SYSTEM_INIT_MESSAGE
  * @property {string} SYSTEM_INIT_MESSAGE_ROLE
  *
  * @property {string[]} OPENAI_API_KEY
@@ -75,8 +75,8 @@
  * @property {?string} parse_mode
  * @property {?TelegramID} message_id - 编辑消息的ID
  * @property {?object} reply_markup -  回复键盘
- * @property {boolean} allow_sending_without_reply
- * @property {boolean} disable_web_page_preview
+ * @property {?boolean} allow_sending_without_reply
+ * @property {?boolean} disable_web_page_preview
  */
 
 /**
@@ -85,5 +85,5 @@
  * @property {UserConfigType} USER_CONFIG - 用户配置
  * @property {CurrentChatContextType} CURRENT_CHAT_CONTEXT - 当前聊天上下文
  * @property {ShareContextType} SHARE_CONTEXT - 共享上下文
- * @property {function(TelegramMessage)} initContext
+ * @property {function(TelegramMessage): Promise} initContext
  */

From dd685bbeed508d50c41ebef659af241f655f07f3 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Mon, 5 Aug 2024 10:06:29 +0800
Subject: [PATCH 12/24] =?UTF-8?q?doc:=20=E4=BC=98=E5=8C=96=E9=85=8D?=
 =?UTF-8?q?=E7=BD=AE=E6=96=87=E6=A1=A3?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 doc/cn/CONFIG.md | 30 +++++++++++++++---------------
 doc/en/CONFIG.md | 30 +++++++++++++++---------------
 2 files changed, 30 insertions(+), 30 deletions(-)

diff --git a/doc/cn/CONFIG.md b/doc/cn/CONFIG.md
index 0c7845eb..a61746f4 100644
--- a/doc/cn/CONFIG.md
+++ b/doc/cn/CONFIG.md
@@ -12,7 +12,7 @@
 
 为每个用户通用的配置,只能在Workers配置界面或者toml中配置填写,不支持通过Telegram发送消息来修改。
 
-数组为空字符串,表示没有设置值,如果需要设置值,设置为`'value1,value2'`,多个值用逗号分隔。
+> `array string`: 数组为空字符串,表示没有设置值,如果需要设置值,设置为`'value1,value2'`,多个值用逗号分隔。
 
 ### 基础配置
 
@@ -27,13 +27,13 @@
 | KEY                       | 名称             | 默认值                         | 描述                                      |
 |---------------------------|----------------|-----------------------------|-----------------------------------------|
 | TELEGRAM_API_DOMAIN       | Telegram API域名 | `https://api.telegram.org/` | Telegram API的域名                         |
-| TELEGRAM_AVAILABLE_TOKENS | 可用的Telegram令牌  | `''//()`                    | 允许访问的Telegram Token,设置时以逗号分隔            |
+| TELEGRAM_AVAILABLE_TOKENS | 可用的Telegram令牌  | `''`(array string)          | 允许访问的Telegram Token,设置时以逗号分隔            |
 | DEFAULT_PARSE_MODE        | 默认解析模式         | `Markdown`                  | 默认消息解析模式                                |
 | I_AM_A_GENEROUS_PERSON    | 允许所有人使用        | `false`                     | 是否允许所有人使用                               |
-| CHAT_WHITE_LIST           | 聊天白名单          | `''//(array string)`        | 允许使用的聊天ID白名单                            |
+| CHAT_WHITE_LIST           | 聊天白名单          | `''`(array string)          | 允许使用的聊天ID白名单                            |
 | LOCK_USER_CONFIG_KEYS     | 锁定的用户配置键       | 默认值为所有API的URL               | 防止被替换导致token泄露的配置键                      |
-| TELEGRAM_BOT_NAME         | Telegram机器人名称  | `''//(array string)`        | 允许访问的Telegram Token对应的Bot Name,设置时以逗号分隔 |
-| CHAT_GROUP_WHITE_LIST     | 群组白名单          | `''//(array string)`        | 允许使用的群组ID白名单                            |
+| TELEGRAM_BOT_NAME         | Telegram机器人名称  | `''`(array string)          | 允许访问的Telegram Token对应的Bot Name,设置时以逗号分隔 |
+| CHAT_GROUP_WHITE_LIST     | 群组白名单          | `''`(array string)          | 允许使用的群组ID白名单                            |
 | GROUP_CHAT_BOT_ENABLE     | 群组机器人开关        | `true`                      | 是否启用群组机器人                               |
 | GROUP_CHAT_BOT_SHARE_MODE | 群组机器人共享模式      | `false`                     | 开启后同个群组的人使用同一个聊天上下文                     |
 
@@ -53,15 +53,15 @@
 
 ### 特性开关
 
-| KEY                   | 名称       | 默认值                  | 描述              |
-|-----------------------|----------|----------------------|-----------------|
-| HIDE_COMMAND_BUTTONS  | 隐藏命令按钮   | `''//(array string)` | 修改后需要重新init     |
-| SHOW_REPLY_BUTTON     | 显示快捷回复按钮 | `false`              | 是否显示快捷回复按钮      |
-| EXTRA_MESSAGE_CONTEXT | 额外消息上下文  | `false`              | 引用的消息也会假如上下文    |
-| STREAM_MODE           | 流模式      | `true`               | 打字机模式           |
-| SAFE_MODE             | 安全模式     | `true`               | 开启后会保存最新一条消息的ID |
-| DEBUG_MODE            | 调试模式     | `false`              | 开启后会保存最新一条消息    |
-| DEV_MODE              | 开发模式     | `false`              | 开启后会展示更多调试信息    |
+| KEY                   | 名称       | 默认值                | 描述              |
+|-----------------------|----------|--------------------|-----------------|
+| HIDE_COMMAND_BUTTONS  | 隐藏命令按钮   | `''`(array string) | 修改后需要重新init     |
+| SHOW_REPLY_BUTTON     | 显示快捷回复按钮 | `false`            | 是否显示快捷回复按钮      |
+| EXTRA_MESSAGE_CONTEXT | 额外消息上下文  | `false`            | 引用的消息也会假如上下文    |
+| STREAM_MODE           | 流模式      | `true`             | 打字机模式           |
+| SAFE_MODE             | 安全模式     | `true`             | 开启后会保存最新一条消息的ID |
+| DEBUG_MODE            | 调试模式     | `false`            | 开启后会保存最新一条消息    |
+| DEV_MODE              | 开发模式     | `false`            | 开启后会展示更多调试信息    |
 
 ## 用户配置
 
@@ -80,7 +80,7 @@
 
 | KEY                     | 名称                      | 默认值                         |
 |-------------------------|-------------------------|-----------------------------|
-| OPENAI_API_KEY          | OpenAI API Key          | `''//(array string)`        |
+| OPENAI_API_KEY          | OpenAI API Key          | `''`(array string)          |
 | OPENAI_CHAT_MODEL       | OpenAI的模型名称             | `gpt-3.5-turbo`             |
 | OPENAI_API_BASE         | OpenAI API BASE         | `https://api.openai.com/v1` |
 | OPENAI_API_EXTRA_PARAMS | OpenAI API Extra Params | `{}`                        |
diff --git a/doc/en/CONFIG.md b/doc/en/CONFIG.md
index 95abe920..16313c54 100644
--- a/doc/en/CONFIG.md
+++ b/doc/en/CONFIG.md
@@ -12,7 +12,7 @@ It is recommended to fill in environment variables in the Workers configuration
 
 The configuration that is common to each user can only be configured and filled in through the Workers configuration interface or toml, and it is not supported to modify it by sending messages through Telegram.
 
-An empty string in the array indicates that no value has been set. If a value needs to be set, it should be set as `'value1,value2'`, with multiple values separated by commas.
+> `array string`:  An empty string in the array indicates that no value has been set. If a value needs to be set, it should be set as `'value1,value2'`, with multiple values separated by commas.
 
 ### Basic configuration
 
@@ -27,13 +27,13 @@ An empty string in the array indicates that no value has been set. If a value ne
 | KEY                       | Name                           | Default                                    | Description                                                                                                   |
 |---------------------------|--------------------------------|--------------------------------------------|---------------------------------------------------------------------------------------------------------------|
 | TELEGRAM_API_DOMAIN       | Telegram API Domain            | `https://api.telegram.org/`                | Telegram API domain                                                                                           |
-| TELEGRAM_AVAILABLE_TOKENS | Available Telegram tokens.     | `''//(array string)`                       | Telegram Tokens allowed to access, separated by commas when setting.                                          |
+| TELEGRAM_AVAILABLE_TOKENS | Available Telegram tokens.     | `''`(array string)                         | Telegram Tokens allowed to access, separated by commas when setting.                                          |
 | DEFAULT_PARSE_MODE        | Default parsing mode.          | `Markdown`                                 | Default message parsing mode.                                                                                 |
 | I_AM_A_GENEROUS_PERSON    | Allow everyone to use.         | `false`                                    | Is it allowed for everyone to use?                                                                            |
-| CHAT_WHITE_LIST           | Chat whitelist                 | `''//(array string)`                       | Allowed Chat ID Whitelist                                                                                     |
+| CHAT_WHITE_LIST           | Chat whitelist                 | `''`(array string)                         | Allowed Chat ID Whitelist                                                                                     |
 | LOCK_USER_CONFIG_KEYS     | Locked user configuration key. | The default value is the URL for all APIs. | Configuration key to prevent token leakage caused by replacement.                                             |
-| TELEGRAM_BOT_NAME         | Telegram bot name              | `''//(array string)`                       | The Bot Name corresponding to the Telegram Token that is allowed to access, separated by commas when setting. |
-| CHAT_GROUP_WHITE_LIST     | Group whitelist                | `''//(array string)`                       | Allowed group ID whitelist.                                                                                   |
+| TELEGRAM_BOT_NAME         | Telegram bot name              | `''`(array string)                         | The Bot Name corresponding to the Telegram Token that is allowed to access, separated by commas when setting. |
+| CHAT_GROUP_WHITE_LIST     | Group whitelist                | `''`(array string)                         | Allowed group ID whitelist.                                                                                   |
 | GROUP_CHAT_BOT_ENABLE     | Whether to enable group bots.  | `true`                                     | Whether to enable group robots.                                                                               |
 | GROUP_CHAT_BOT_SHARE_MODE | Group robot sharing mode       | `false`                                    | After opening, people in the same group use the same chat context.                                            |
 
@@ -53,15 +53,15 @@ An empty string in the array indicates that no value has been set. If a value ne
 
 ### Feature configuration
 
-| KEY                   | Name                    | Default              | Description                                                 |
-|-----------------------|-------------------------|----------------------|-------------------------------------------------------------|
-| HIDE_COMMAND_BUTTONS  | Hide command buttons    | `''//(array string)` | Need to re-initiate after modification                      |
-| SHOW_REPLY_BUTTON     | Show quick reply button | `false`              | Whether to display the quick reply button                   |
-| EXTRA_MESSAGE_CONTEXT | Extra message context   | `false`              | The referenced message will also be included in the context |
-| STREAM_MODE           | Stream mode             | `true`               | Typewriter mode                                             |
-| SAFE_MODE             | Safe mode               | `true`               | When enabled, the ID of the latest message will be saved    |
-| DEBUG_MODE            | Debug mode              | `false`              | When enabled, the latest message will be saved              |
-| DEV_MODE              | Development mode        | `false`              | When enabled, more debugging information will be displayed  |
+| KEY                   | Name                    | Default            | Description                                                 |
+|-----------------------|-------------------------|--------------------|-------------------------------------------------------------|
+| HIDE_COMMAND_BUTTONS  | Hide command buttons    | `''`(array string) | Need to re-initiate after modification                      |
+| SHOW_REPLY_BUTTON     | Show quick reply button | `false`            | Whether to display the quick reply button                   |
+| EXTRA_MESSAGE_CONTEXT | Extra message context   | `false`            | The referenced message will also be included in the context |
+| STREAM_MODE           | Stream mode             | `true`             | Typewriter mode                                             |
+| SAFE_MODE             | Safe mode               | `true`             | When enabled, the ID of the latest message will be saved    |
+| DEBUG_MODE            | Debug mode              | `false`            | When enabled, the latest message will be saved              |
+| DEV_MODE              | Development mode        | `false`            | When enabled, more debugging information will be displayed  |
 
 ## User configuration
 
@@ -80,7 +80,7 @@ Each user's custom configuration can only be modified by sending a message throu
 
 | KEY                     | Name                    | Default                     | 
 |-------------------------|-------------------------|-----------------------------|
-| OPENAI_API_KEY          | OpenAI API Key          | `''//(array string)`        |
+| OPENAI_API_KEY          | OpenAI API Key          | `''`(array string)          |
 | OPENAI_CHAT_MODEL       | OpenAI Model            | `gpt-3.5-turbo`             |
 | OPENAI_API_BASE         | OpenAI API BASE         | `https://api.openai.com/v1` |
 | OPENAI_API_EXTRA_PARAMS | OpenAI API Extra Params | `{}`                        |

From ca98cb5baa7fea616cd66e4f8465766ca91e7682 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Mon, 5 Aug 2024 10:15:25 +0800
Subject: [PATCH 13/24] doc: fix custom commands document

---
 doc/cn/CONFIG.md | 24 ++++++++++++------------
 doc/en/CONFIG.md | 24 ++++++++++++------------
 2 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/doc/cn/CONFIG.md b/doc/cn/CONFIG.md
index a61746f4..5cc588ed 100644
--- a/doc/cn/CONFIG.md
+++ b/doc/cn/CONFIG.md
@@ -193,20 +193,20 @@ CUSTOM_COMMAND_cn2en = '/setenvs {"SYSTEM_INIT_MESSAGE": "你是一个翻译下
 
 下面是一些自定义指令帮助信息例子
 
-| 指令描述                        | 值                              |
-|-----------------------------|--------------------------------|
-| COMMAND_DESCRIPTION_azure   | `切换AI提供商为Azure`                |
-| COMMAND_DESCRIPTION_workers | `切换AI提供商为Workers`              |
-| COMMAND_DESCRIPTION_gpt3    | `切换AI提供商为OpenAI GPT-3.5 Turbo` |
-| COMMAND_DESCRIPTION_gpt4    | `切换AI提供商为OpenAI GPT-4`         |
-| COMMAND_DESCRIPTION_cn2en   | `将对话内容翻译成英文`                   |
+| 指令描述                        | 描述                           | 值                                                                                                                 |
+|-----------------------------|------------------------------|-------------------------------------------------------------------------------------------------------------------|
+| COMMAND_DESCRIPTION_azure   | 切换AI提供商为Azure                | `/setenvs {"AI_PROVIDER": "azure"}`                                                                               |
+| COMMAND_DESCRIPTION_workers | 切换AI提供商为Workers              | `/setenvs {"AI_PROVIDER": "workers"}`                                                                             |
+| COMMAND_DESCRIPTION_gpt3    | 切换AI提供商为OpenAI GPT-3.5 Turbo | `/setenvs {"AI_PROVIDER": "openai", "OPENAI_CHAT_MODEL": "gpt-3.5-turbo"}`                                        |
+| COMMAND_DESCRIPTION_gpt4    | 切换AI提供商为OpenAI GPT-4         | `/setenvs {"AI_PROVIDER": "openai", "OPENAI_CHAT_MODEL": "gpt-4"}`                                                |
+| COMMAND_DESCRIPTION_cn2en   | 将对话内容翻译成英文                   | `/setenvs {"SYSTEM_INIT_MESSAGE": "You are a translator. Please translate everything I say below into English."}` |
 
 如果你是用toml进行配置,可以使用下面的方式:
 
 ```toml
-COMMAND_DESCRIPTION_azure = '切换AI提供商为Azure'
-COMMAND_DESCRIPTION_workers = '切换AI提供商为Workers'
-COMMAND_DESCRIPTION_gpt3 = '切换AI提供商为OpenAI GPT-3.5 Turbo'
-COMMAND_DESCRIPTION_gpt4 = '切换AI提供商为OpenAI GPT-4'
-COMMAND_DESCRIPTION_cn2en = '将对话内容翻译成英文'
+COMMAND_DESCRIPTION_azure = '/setenvs {"AI_PROVIDER": "azure"}'
+COMMAND_DESCRIPTION_workers = '/setenvs {"AI_PROVIDER": "workers"}'
+COMMAND_DESCRIPTION_gpt3 = '/setenvs {"AI_PROVIDER": "openai", "OPENAI_CHAT_MODEL": "gpt-3.5-turbo"}'
+COMMAND_DESCRIPTION_gpt4 = '/setenvs {"AI_PROVIDER": "openai", "OPENAI_CHAT_MODEL": "gpt-4"}'
+COMMAND_DESCRIPTION_cn2en = '/setenvs {"SYSTEM_INIT_MESSAGE": "You are a translator. Please translate everything I say below into English."}'
 ```
diff --git a/doc/en/CONFIG.md b/doc/en/CONFIG.md
index 16313c54..774df635 100644
--- a/doc/en/CONFIG.md
+++ b/doc/en/CONFIG.md
@@ -192,20 +192,20 @@ If you want to add help information for a custom command, you can use environmen
 
 The following are some examples of custom command help information.
 
-| Command Description         | Value                                              |
-|-----------------------------|----------------------------------------------------|
-| COMMAND_DESCRIPTION_azure   | `Switch AI provider to Azure.`                     |
-| COMMAND_DESCRIPTION_workers | `Switch AI provider to Workers`                    |
-| COMMAND_DESCRIPTION_gpt3    | `Switch AI provider to OpenAI GPT-3.5 Turbo`       |
-| COMMAND_DESCRIPTION_gpt4    | `Switch AI provider to OpenAI GPT-4`               |
-| COMMAND_DESCRIPTION_cn2en   | `Translate the conversation content into English.` |
+| Command                     | Description                                      | Value                                                                                                             |
+|-----------------------------|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
+| COMMAND_DESCRIPTION_azure   | Switch AI provider to Azure.                     | `/setenvs {"AI_PROVIDER": "azure"}`                                                                               |
+| COMMAND_DESCRIPTION_workers | Switch AI provider to Workers                    | `/setenvs {"AI_PROVIDER": "workers"}`                                                                             |
+| COMMAND_DESCRIPTION_gpt3    | Switch AI provider to OpenAI GPT-3.5 Turbo.      | `/setenvs {"AI_PROVIDER": "openai", "OPENAI_CHAT_MODEL": "gpt-3.5-turbo"}`                                        |
+| COMMAND_DESCRIPTION_gpt4    | Switch AI provider to OpenAI GPT-4.              | `/setenvs {"AI_PROVIDER": "openai", "OPENAI_CHAT_MODEL": "gpt-4"}`                                                |
+| COMMAND_DESCRIPTION_cn2en   | Translate the conversation content into English. | `/setenvs {"SYSTEM_INIT_MESSAGE": "You are a translator. Please translate everything I say below into English."}` |
 
 If you are using TOML for configuration, you can use the following method:
 
 ```toml
-COMMAND_DESCRIPTION_azure = "Switch AI provider to Azure."
-COMMAND_DESCRIPTION_workers = "Switch AI provider to Workers"
-COMMAND_DESCRIPTION_gpt3 = "Switch AI provider to OpenAI GPT-3.5 Turbo"
-COMMAND_DESCRIPTION_gpt4 = "Switch AI provider to OpenAI GPT-4"
-COMMAND_DESCRIPTION_cn2en = "Translate the conversation content into English."
+COMMAND_DESCRIPTION_azure = '/setenvs {"AI_PROVIDER": "azure"}'
+COMMAND_DESCRIPTION_workers = '/setenvs {"AI_PROVIDER": "workers"}'
+COMMAND_DESCRIPTION_gpt3 = '/setenvs {"AI_PROVIDER": "openai", "OPENAI_CHAT_MODEL": "gpt-3.5-turbo"}'
+COMMAND_DESCRIPTION_gpt4 = '/setenvs {"AI_PROVIDER": "openai", "OPENAI_CHAT_MODEL": "gpt-4"}'
+COMMAND_DESCRIPTION_cn2en = '/setenvs {"SYSTEM_INIT_MESSAGE": "You are a translator. Please translate everything I say below into English."}'
 ```

From 725ccf5d1c3721476ccfa798afafa8d1364944f1 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Mon, 5 Aug 2024 10:26:05 +0800
Subject: [PATCH 14/24] =?UTF-8?q?=20doc:=20=E6=B7=BB=E5=8A=A0`LOCK=5FUSER?=
 =?UTF-8?q?=5FCONFIG=5FKEYS`=E6=96=87=E6=A1=A3?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 doc/cn/CONFIG.md | 7 +++++++
 doc/en/CONFIG.md | 8 ++++++++
 2 files changed, 15 insertions(+)

diff --git a/doc/cn/CONFIG.md b/doc/cn/CONFIG.md
index 5cc588ed..3e4c16d8 100644
--- a/doc/cn/CONFIG.md
+++ b/doc/cn/CONFIG.md
@@ -43,6 +43,13 @@
 
 > IMPORTANT: 必须在botfather中设置`/setprivacy`为`Disable`,否则机器人无法响应`@机器人`的聊天消息。
 
+#### `LOCK_USER_CONFIG_KEYS` 锁定配置
+
+> IMPORTANT: 如果你遇到`Key XXX is locked`的错误,说明你的配置被锁定了,需要解锁才能修改。
+
+`LOCK_USER_CONFIG_KEYS`的默认值为所有API的BASE URL。为了防止用户替换API BASE URL导致token泄露,所以默认情况下会锁定所有API的BASE URL。如果你想解锁某个API的BASE URL,可以将其从`LOCK_USER_CONFIG_KEYS`中删除。
+`LOCK_USER_CONFIG_KEYS`是一个字符串数组,默认值为:`OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPIC_API_BASE,AZURE_COMPLETIONS_API,AZURE_DALLE_API`
+
 ### 历史记录配置
 
 | KEY                | 名称       | 默认值    | 描述                 |
diff --git a/doc/en/CONFIG.md b/doc/en/CONFIG.md
index 774df635..88ff6c5a 100644
--- a/doc/en/CONFIG.md
+++ b/doc/en/CONFIG.md
@@ -43,6 +43,14 @@ The configuration that is common to each user can only be configured and filled
 
 > IMPORTANT: You must set `/setprivacy` to `Disable` in botfather, otherwise the bot will not respond to chat messages with `@bot`.
 
+#### `LOCK_USER_CONFIG_KEYS` Lock configuration
+
+> IMPORTANT: If you encounter the error "Key XXX is locked", it means that your configuration is locked and needs to be unlocked before modification.
+
+The default value of `LOCK_USER_CONFIG_KEYS` is the BASE URL of all APIs. In order to prevent users from replacing the API BASE URL and causing token leakage, the BASE URL of all APIs is locked by default. If you want to unlock the BASE URL of a certain API, you can remove it from `LOCK_USER_CONFIG_KEYS`.
+`LOCK_USER_CONFIG_KEYS` is a string array with a default value is `OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPIC_API_BASE,AZURE_COMPLETIONS_API,AZURE_DALLE_API`
+
+
 ### History configuration
 
 | KEY                | Name                                  | Default | Description                                                   |

From d83dea952d961a758bcfecf88799bceb1090c12f Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Mon, 5 Aug 2024 10:37:07 +0800
Subject: [PATCH 15/24] =?UTF-8?q?doc:=20=E4=BC=98=E5=8C=96=E9=85=8D?=
 =?UTF-8?q?=E7=BD=AE=E6=96=87=E6=A1=A3?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 doc/cn/CONFIG.md | 12 ++++++++----
 doc/en/CONFIG.md | 11 +++++++----
 2 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/doc/cn/CONFIG.md b/doc/cn/CONFIG.md
index 3e4c16d8..f36dffb5 100644
--- a/doc/cn/CONFIG.md
+++ b/doc/cn/CONFIG.md
@@ -43,12 +43,16 @@
 
 > IMPORTANT: 必须在botfather中设置`/setprivacy`为`Disable`,否则机器人无法响应`@机器人`的聊天消息。
 
-#### `LOCK_USER_CONFIG_KEYS` 锁定配置
+#### 锁定配置 `LOCK_USER_CONFIG_KEYS`
 
 > IMPORTANT: 如果你遇到`Key XXX is locked`的错误,说明你的配置被锁定了,需要解锁才能修改。
 
 `LOCK_USER_CONFIG_KEYS`的默认值为所有API的BASE URL。为了防止用户替换API BASE URL导致token泄露,所以默认情况下会锁定所有API的BASE URL。如果你想解锁某个API的BASE URL,可以将其从`LOCK_USER_CONFIG_KEYS`中删除。
-`LOCK_USER_CONFIG_KEYS`是一个字符串数组,默认值为:`OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPIC_API_BASE,AZURE_COMPLETIONS_API,AZURE_DALLE_API`
+`LOCK_USER_CONFIG_KEYS`是一个字符串数组,默认值为:
+
+```
+OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPIC_API_BASE,AZURE_COMPLETIONS_API,AZURE_DALLE_API
+```
 
 ### 历史记录配置
 
@@ -98,7 +102,7 @@
 
 ### Azure OpenAI
 
->  AZURE_COMPLETIONS_API `https://RESOURCE_NAME.openai.azure.com/openai/deployments/MODEL_NAME/chat/completions?api-version=VERSION_NAME`
+> AZURE_COMPLETIONS_API `https://RESOURCE_NAME.openai.azure.com/openai/deployments/MODEL_NAME/chat/completions?api-version=VERSION_NAME`
 
 > AZURE_DALLE_API `https://RESOURCE_NAME.openai.azure.com/openai/deployments/MODEL_NAME/images/generations?api-version=VERSION_NAME`
 
@@ -172,7 +176,7 @@ cloudflare workers 暂时不支持访问
 
 除了上述系统定义的指令,你也可以自定义快捷指令, 可以将某些较长的指令简化为一个单词的指令。
 
-自定义指令使用环境变量设置 CUSTOM_COMMAND_XXX,其中XXX为指令名,比如`CUSTOM_COMMAND_azure`,值为指令内容,比如`/setenvs {"AI_PROVIDER": "azure"}`。 这样就可以使用`/azure`来代替`/setenvs {"AI_PROVIDER": "azure"}`实现快速切换AI提供商。
+自定义指令使用环境变量设置 `CUSTOM_COMMAND_XXX`,其中XXX为指令名,比如`CUSTOM_COMMAND_azure`,值为指令内容,比如`/setenvs {"AI_PROVIDER": "azure"}`。 这样就可以使用`/azure`来代替`/setenvs {"AI_PROVIDER": "azure"}`实现快速切换AI提供商。
 
 下面是一些自定义指令例子
 
diff --git a/doc/en/CONFIG.md b/doc/en/CONFIG.md
index 88ff6c5a..ce508140 100644
--- a/doc/en/CONFIG.md
+++ b/doc/en/CONFIG.md
@@ -43,13 +43,16 @@ The configuration that is common to each user can only be configured and filled
 
 > IMPORTANT: You must set `/setprivacy` to `Disable` in botfather, otherwise the bot will not respond to chat messages with `@bot`.
 
-#### `LOCK_USER_CONFIG_KEYS` Lock configuration
+#### Lock configuration `LOCK_USER_CONFIG_KEYS`
 
 > IMPORTANT: If you encounter the error "Key XXX is locked", it means that your configuration is locked and needs to be unlocked before modification.
 
 The default value of `LOCK_USER_CONFIG_KEYS` is the BASE URL of all APIs. In order to prevent users from replacing the API BASE URL and causing token leakage, the BASE URL of all APIs is locked by default. If you want to unlock the BASE URL of a certain API, you can remove it from `LOCK_USER_CONFIG_KEYS`.
-`LOCK_USER_CONFIG_KEYS` is a string array with a default value is `OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPIC_API_BASE,AZURE_COMPLETIONS_API,AZURE_DALLE_API`
+`LOCK_USER_CONFIG_KEYS` is a string array with a default value is 
 
+```
+OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPIC_API_BASE,AZURE_COMPLETIONS_API,AZURE_DALLE_API
+```
 
 ### History configuration
 
@@ -99,7 +102,7 @@ Each user's custom configuration can only be modified by sending a message throu
 
 ### Azure OpenAI
 
->  AZURE_COMPLETIONS_API `https://RESOURCE_NAME.openai.azure.com/openai/deployments/MODEL_NAME/chat/completions?api-version=VERSION_NAME`
+> AZURE_COMPLETIONS_API `https://RESOURCE_NAME.openai.azure.com/openai/deployments/MODEL_NAME/chat/completions?api-version=VERSION_NAME`
 
 > AZURE_DALLE_API `https://RESOURCE_NAME.openai.azure.com/openai/deployments/MODEL_NAME/images/generations?api-version=VERSION_NAME`
 
@@ -172,7 +175,7 @@ Each user's custom configuration can only be modified by sending a message throu
 
 In addition to the commands defined by the system, you can also customize shortcut commands, which can simplify some longer commands into a single word command.
 
-Custom commands use environment variables to set CUSTOM_COMMAND_XXX, where XXX is the command name, such as `CUSTOM_COMMAND_azure`, and the value is the command content, such as `/setenvs {"AI_PROVIDER": "azure"}`. This allows you to use `/azure` instead of `/setenvs {"AI_PROVIDER": "azure"}` to quickly switch AI providers.
+Custom commands use environment variables to set `CUSTOM_COMMAND_XXX`, where XXX is the command name, such as `CUSTOM_COMMAND_azure`, and the value is the command content, such as `/setenvs {"AI_PROVIDER": "azure"}`. This allows you to use `/azure` instead of `/setenvs {"AI_PROVIDER": "azure"}` to quickly switch AI providers.
 
 Here are some examples of custom commands.
 

From 9609e7ab9dacfffa8663b460d8be1458efb13273 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Mon, 5 Aug 2024 17:43:14 +0800
Subject: [PATCH 16/24] =?UTF-8?q?perf:=20=E4=B8=8D=E9=99=90=E5=88=B6token?=
 =?UTF-8?q?=E9=95=BF=E5=BA=A6?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

由于大模型模型更新和降价
 - 默认参数不限制token长度,只显示对话会回合长度
 - 更新默认模型为更低价的gpt-4o-mini
---
 doc/cn/CONFIG.md  | 12 ++++++------
 doc/en/CONFIG.md  |  4 ++--
 src/agent/llm.js  |  2 +-
 src/config/env.js |  4 ++--
 4 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/doc/cn/CONFIG.md b/doc/cn/CONFIG.md
index f36dffb5..d4e57509 100644
--- a/doc/cn/CONFIG.md
+++ b/doc/cn/CONFIG.md
@@ -56,11 +56,11 @@ OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPI
 
 ### 历史记录配置
 
-| KEY                | 名称       | 默认值    | 描述                 |
-|--------------------|----------|--------|--------------------|
-| AUTO_TRIM_HISTORY  | 自动裁剪历史记录 | `true` | 为避免4096字符限制,自动裁剪消息 |
-| MAX_HISTORY_LENGTH | 最大历史记录长度 | `20`   | 保留的最大历史记录条数        |
-| MAX_TOKEN_LENGTH   | 最大令牌长度   | `2048` | 历史记录的最大令牌长度        |
+| KEY                | 名称       | 默认值     | 描述                 |
+|--------------------|----------|---------|--------------------|
+| AUTO_TRIM_HISTORY  | 自动裁剪历史记录 | `true`  | 为避免4096字符限制,自动裁剪消息 |
+| MAX_HISTORY_LENGTH | 最大历史记录长度 | `20`    | 保留的最大历史记录条数        |
+| MAX_TOKEN_LENGTH   | 最大令牌长度   | `20480` | 历史记录的最大令牌长度        |
 
 ### 特性开关
 
@@ -92,7 +92,7 @@ OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPI
 | KEY                     | 名称                      | 默认值                         |
 |-------------------------|-------------------------|-----------------------------|
 | OPENAI_API_KEY          | OpenAI API Key          | `''`(array string)          |
-| OPENAI_CHAT_MODEL       | OpenAI的模型名称             | `gpt-3.5-turbo`             |
+| OPENAI_CHAT_MODEL       | OpenAI的模型名称             | `gpt-4o-mini`               |
 | OPENAI_API_BASE         | OpenAI API BASE         | `https://api.openai.com/v1` |
 | OPENAI_API_EXTRA_PARAMS | OpenAI API Extra Params | `{}`                        |
 | DALL_E_MODEL            | DALL-E的模型名称             | `dall-e-2`                  |
diff --git a/doc/en/CONFIG.md b/doc/en/CONFIG.md
index ce508140..ed9629cb 100644
--- a/doc/en/CONFIG.md
+++ b/doc/en/CONFIG.md
@@ -60,7 +60,7 @@ OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPI
 |--------------------|---------------------------------------|---------|---------------------------------------------------------------|
 | AUTO_TRIM_HISTORY  | Automatic trimming of message history | `true`  | Automatically trim messages to avoid the 4096 character limit |
 | MAX_HISTORY_LENGTH | Maximum length of message history     | `20`    | Maximum number of message history entries to keep             |
-| MAX_TOKEN_LENGTH   | Maximum token length                  | `2048`  | Maximum token length for message history                      |
+| MAX_TOKEN_LENGTH   | Maximum token length                  | `20480` | Maximum token length for message history                      |
 
 ### Feature configuration
 
@@ -92,7 +92,7 @@ Each user's custom configuration can only be modified by sending a message throu
 | KEY                     | Name                    | Default                     | 
 |-------------------------|-------------------------|-----------------------------|
 | OPENAI_API_KEY          | OpenAI API Key          | `''`(array string)          |
-| OPENAI_CHAT_MODEL       | OpenAI Model            | `gpt-3.5-turbo`             |
+| OPENAI_CHAT_MODEL       | OpenAI Model            | `gpt-4o-mini`               |
 | OPENAI_API_BASE         | OpenAI API BASE         | `https://api.openai.com/v1` |
 | OPENAI_API_EXTRA_PARAMS | OpenAI API Extra Params | `{}`                        |
 | DALL_E_MODEL            | DALL-E model name.      | `dall-e-2`                  |
diff --git a/src/agent/llm.js b/src/agent/llm.js
index a6e914dd..3723545f 100644
--- a/src/agent/llm.js
+++ b/src/agent/llm.js
@@ -44,7 +44,7 @@ async function loadHistory(key) {
             list = list.splice(list.length - maxLength);
         }
         // 处理token长度问题, 小于0不裁剪
-        if (maxToken >= 0) {
+        if (maxToken > 0) {
             let tokenLength = initLength;
             for (let i = list.length - 1; i >= 0; i--) {
                 const historyItem = list[i];
diff --git a/src/config/env.js b/src/config/env.js
index b45f3803..592671be 100644
--- a/src/config/env.js
+++ b/src/config/env.js
@@ -25,7 +25,7 @@ export class UserConfig {
     // OpenAI API Key
     OPENAI_API_KEY = [];
     // OpenAI的模型名称
-    OPENAI_CHAT_MODEL = 'gpt-3.5-turbo';
+    OPENAI_CHAT_MODEL = 'gpt-4o-mini';
     // OpenAI API BASE ``
     OPENAI_API_BASE = 'https://api.openai.com/v1';
     // OpenAI API Extra Params
@@ -178,7 +178,7 @@ class Environment {
     // 最大历史记录长度
     MAX_HISTORY_LENGTH = 20;
     // 最大消息长度
-    MAX_TOKEN_LENGTH = 2048;
+    MAX_TOKEN_LENGTH = -1;
 
 
     // -- 特性开关 --

From dca43f0b0f180ad538cad5ec5df33635bf91b77f Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Tue, 6 Aug 2024 21:24:36 +0800
Subject: [PATCH 17/24] =?UTF-8?q?perf:=20=E4=BD=BF=E7=94=A8js=E8=84=9A?=
 =?UTF-8?q?=E6=9C=AC=E4=BB=A3=E6=9B=BFmakefile?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 .../ISSUE_TEMPLATE/bug.md                     |  0
 Makefile                                      | 22 --------
 dist/buildinfo.json                           |  2 +-
 dist/index.js                                 | 26 ++++-----
 dist/timestamp                                |  2 +-
 esbuild.config.js                             | 53 +++++++++++++++++++
 package.json                                  |  4 +-
 7 files changed, 68 insertions(+), 41 deletions(-)
 rename ".github/ISSUE_TEMPLATE/bug\345\217\215\351\246\210.md" => .github/ISSUE_TEMPLATE/bug.md (100%)
 delete mode 100644 Makefile
 create mode 100644 esbuild.config.js

diff --git "a/.github/ISSUE_TEMPLATE/bug\345\217\215\351\246\210.md" b/.github/ISSUE_TEMPLATE/bug.md
similarity index 100%
rename from ".github/ISSUE_TEMPLATE/bug\345\217\215\351\246\210.md"
rename to .github/ISSUE_TEMPLATE/bug.md
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 99e1b22b..00000000
--- a/Makefile
+++ /dev/null
@@ -1,22 +0,0 @@
-TIMESTAMP_FILE := ./dist/timestamp # 兼容旧版更新逻辑
-BUILD_INFO_JSON := ./dist/buildinfo.json
-OUTPUT_FILE := ./dist/index.js
-ENTRY_FILE := main.js
-ESLINT := ./node_modules/.bin/eslint
-ESBUILD := ./node_modules/.bin/esbuild
-
-.PHONY: build
-build: clean
-	COMMIT_HASH=$$(git rev-parse --short HEAD) && \
-	TIMESTAMP=$$(date +%s) && \
-	echo "$$TIMESTAMP" > $(TIMESTAMP_FILE) && \
-	echo "{\"sha\": \"$$COMMIT_HASH\", \"timestamp\": $$TIMESTAMP}" > $(BUILD_INFO_JSON) && \
-	$(ESBUILD) $(ENTRY_FILE) --bundle --outfile=$(OUTPUT_FILE) --format=esm --platform=node --define:process.env.BUILD_VERSION="'$$COMMIT_HASH'" --define:process.env.BUILD_TIMESTAMP="$$TIMESTAMP"
-
-.PHONY: clean
-clean:
-	rm -f $(TIMESTAMP_FILE) $(BUILD_INFO_JSON) $(OUTPUT_FILE)
-
-.PHONY: lint
-lint:
-	$(ESLINT) --fix main.js src adapter
diff --git a/dist/buildinfo.json b/dist/buildinfo.json
index 749853e0..3890b49e 100644
--- a/dist/buildinfo.json
+++ b/dist/buildinfo.json
@@ -1 +1 @@
-{"sha": "2bc1db9", "timestamp": 1722604354}
+{"sha":"9609e7a","timestamp":1722950584}
\ No newline at end of file
diff --git a/dist/index.js b/dist/index.js
index c97348b5..f93f3a21 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -17,7 +17,7 @@ var UserConfig = class {
   // OpenAI API Key
   OPENAI_API_KEY = [];
   // OpenAI的模型名称
-  OPENAI_CHAT_MODEL = "gpt-3.5-turbo";
+  OPENAI_CHAT_MODEL = "gpt-4o-mini";
   // OpenAI API BASE ``
   OPENAI_API_BASE = "https://api.openai.com/v1";
   // OpenAI API Extra Params
@@ -89,9 +89,9 @@ var Environment = class {
   // -- 版本数据 --
   //
   // 当前版本
-  BUILD_TIMESTAMP = 1722604354;
+  BUILD_TIMESTAMP = 1722950584;
   // 当前版本 commit id
-  BUILD_VERSION = "2bc1db9";
+  BUILD_VERSION = "9609e7a";
   // -- 基础配置 --
   /**
    * @type {I18n | null}
@@ -114,7 +114,8 @@ var Environment = class {
   // 最小stream模式消息间隔,小于等于0则不限制
   TELEGRAM_MIN_STREAM_INTERVAL = 0;
   // 图片尺寸偏移 0为第一位,-1为最后一位, 越靠后的图片越大。PS: 图片过大可能导致token消耗过多,或者workers超时或内存不足
-  TELEGRAM_PHOTO_SIZE_OFFSET = -2;
+  // 默认选择次低质量的图片
+  TELEGRAM_PHOTO_SIZE_OFFSET = 1;
   // 向LLM优先传递图片方式:url, base64
   TELEGRAM_IMAGE_TRANSFER_MODE = "url";
   // --  权限相关 --
@@ -151,7 +152,7 @@ var Environment = class {
   // 最大历史记录长度
   MAX_HISTORY_LENGTH = 20;
   // 最大消息长度
-  MAX_TOKEN_LENGTH = 2048;
+  MAX_TOKEN_LENGTH = -1;
   // -- 特性开关 --
   //
   // 隐藏部分命令按钮
@@ -1122,9 +1123,6 @@ async function makeResponse200(resp) {
     });
   }
 }
-function supportsNativeBase64() {
-  return typeof Buffer !== "undefined";
-}
 async function urlToBase64String(url) {
   try {
     const { Buffer: Buffer2 } = await import("node:buffer");
@@ -1716,7 +1714,7 @@ async function loadHistory(key) {
     if (maxLength >= 0 && list.length > maxLength) {
       list = list.splice(list.length - maxLength);
     }
-    if (maxToken >= 0) {
+    if (maxToken > 0) {
       let tokenLength = initLength;
       for (let i = list.length - 1; i >= 0; i--) {
         const historyItem = list[i];
@@ -2388,12 +2386,10 @@ async function msgChatWithLLM(message, context) {
   const params = { message: content };
   if (message.photo && message.photo.length > 0) {
     let sizeIndex = 0;
-    if (supportsNativeBase64()) {
-      if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET >= 0) {
-        sizeIndex = ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
-      } else if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET < 0) {
-        sizeIndex = message.photo.length + ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
-      }
+    if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET >= 0) {
+      sizeIndex = ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
+    } else if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET < 0) {
+      sizeIndex = message.photo.length + ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
     }
     sizeIndex = Math.max(0, Math.min(sizeIndex, message.photo.length - 1));
     const fileId = message.photo[sizeIndex].file_id;
diff --git a/dist/timestamp b/dist/timestamp
index 9a059ce9..243660ed 100644
--- a/dist/timestamp
+++ b/dist/timestamp
@@ -1 +1 @@
-1722604354
+1722950584
\ No newline at end of file
diff --git a/esbuild.config.js b/esbuild.config.js
new file mode 100644
index 00000000..e8632f87
--- /dev/null
+++ b/esbuild.config.js
@@ -0,0 +1,53 @@
+import esbuild from 'esbuild';
+import fs from 'fs/promises';
+import { execSync } from 'child_process';
+
+const TIMESTAMP_FILE = './dist/timestamp';
+const BUILD_INFO_JSON = './dist/buildinfo.json';
+const OUTPUT_FILE = './dist/index.js';
+const ENTRY_FILE = 'main.js';
+
+async function clean() {
+  for (const file of [TIMESTAMP_FILE, BUILD_INFO_JSON, OUTPUT_FILE]) {
+    try {
+      await fs.unlink(file);
+    } catch (error) {
+      if (error.code !== 'ENOENT') {
+        throw error;
+      }
+    }
+  }
+}
+
+async function build() {
+  await clean();
+
+  const COMMIT_HASH = execSync('git rev-parse --short HEAD').toString().trim();
+  const TIMESTAMP = Math.floor(Date.now() / 1000);
+
+  await fs.writeFile(TIMESTAMP_FILE, TIMESTAMP.toString());
+  await fs.writeFile(BUILD_INFO_JSON, JSON.stringify({
+    sha: COMMIT_HASH,
+    timestamp: TIMESTAMP
+  }));
+
+  try {
+    await esbuild.build({
+      entryPoints: [ENTRY_FILE],
+      bundle: true,
+      outfile: OUTPUT_FILE,
+      format: 'esm',
+      platform: 'node',
+      define: {
+        'process.env.BUILD_VERSION': `'${COMMIT_HASH}'`,
+        'process.env.BUILD_TIMESTAMP': TIMESTAMP.toString()
+      }
+    });
+    console.log('Build successfully!');
+  } catch (error) {
+    console.error('Build failed:', error);
+    process.exit(1);
+  }
+}
+
+build();
\ No newline at end of file
diff --git a/package.json b/package.json
index d721281a..6668e0e7 100644
--- a/package.json
+++ b/package.json
@@ -5,8 +5,8 @@
   "main": "main.js",
   "type": "module",
   "scripts": {
-    "lint": "make lint",
-    "build": "make build",
+    "lint": "eslint --fix main.js src adapter",
+    "build": "node esbuild.config.js",
     "debug": "wrangler dev --local",
     "wrangler": "wrangler",
     "deploy:dist": "wrangler deploy",

From 49b5a26a063093921a9946c8cc2b8adf40cbfac6 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Wed, 7 Aug 2024 17:19:43 +0800
Subject: [PATCH 18/24] =?UTF-8?q?doc:=20eslint=E6=B7=BB=E5=8A=A0jsdoc?=
 =?UTF-8?q?=E6=8F=92=E4=BB=B6?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 adapter/render/index.js  |   6 +++
 eslint.config.js         |  11 +++-
 package.json             |   1 +
 src/agent/agents.js      |  11 ++--
 src/agent/anthropic.js   |   9 ++--
 src/agent/azure.js       |  14 +++---
 src/agent/cohere.js      |   9 ++--
 src/agent/gemini.js      |   9 ++--
 src/agent/llm.js         |  18 ++++---
 src/agent/mistralai.js   |   9 ++--
 src/agent/openai.js      |  14 +++---
 src/agent/request.js     |  32 +++++-------
 src/agent/stream.js      |  26 ++++++++++
 src/agent/workersai.js   |  16 +++---
 src/config/context.js    |   6 +--
 src/config/env.js        |  10 ++++
 src/i18n/index.js        |   2 +-
 src/router.js            |  17 +++----
 src/telegram/command.js  |  41 ++++++---------
 src/telegram/message.js  |  33 ++++--------
 src/telegram/telegram.js |  34 ++++++-------
 src/types/agent.js       |   9 ++--
 src/types/context.js     |  19 +------
 src/types/i18n.js        |   9 ++--
 src/types/telegram.js    |  14 +++---
 src/utils/md2tgmd.js     |   4 +-
 src/utils/utils.js       |  11 ++--
 yarn.lock                | 106 ++++++++++++++++++++++++++++++++++++++-
 28 files changed, 295 insertions(+), 205 deletions(-)

diff --git a/adapter/render/index.js b/adapter/render/index.js
index 7cce7245..5680ad6a 100644
--- a/adapter/render/index.js
+++ b/adapter/render/index.js
@@ -19,6 +19,12 @@ try {
 
 const bodyMethods = new Set(['POST', 'PUT', 'PATCH', 'DELETE']);
 
+/**
+ *
+ * @param {string} baseURL
+ * @param {Request} req
+ * @returns {Request}
+ */
 function requestBuilder(baseURL, req) {
   const reqHost = req.headers['x-forwarded-host'] || req.headers['host'];
   const reqScheme = req.headers['x-forwarded-proto'] || req.headers['x-scheme'];
diff --git a/eslint.config.js b/eslint.config.js
index 64128c98..1ed0bd3f 100644
--- a/eslint.config.js
+++ b/eslint.config.js
@@ -1,13 +1,22 @@
 import globals from "globals";
 import pluginJs from "@eslint/js";
+import jsdoc from "eslint-plugin-jsdoc";
 
 
 export default [
   {languageOptions: { globals: {...globals.browser, ...globals.node} }},
   pluginJs.configs.recommended,
+  jsdoc.configs['flat/recommended'],
   {
     rules: {
       semi : ["error", "always"],
+      'jsdoc/no-undefined-types': 'off',
+      'jsdoc/require-returns-description': 'off',
+      'jsdoc/require-property-description': 'off',
+      'jsdoc/require-param-description': 'off',
+    },
+    plugins: {
+      jsdoc
     }
   }
-];
+];
\ No newline at end of file
diff --git a/package.json b/package.json
index 6668e0e7..dd923bb7 100644
--- a/package.json
+++ b/package.json
@@ -16,6 +16,7 @@
     "@eslint/js": "^9.1.1",
     "esbuild": "^0.17.11",
     "eslint": "^9.1.1",
+    "eslint-plugin-jsdoc": "^49.0.0",
     "globals": "^15.1.0",
     "wrangler": "^3.0.0"
   },
diff --git a/src/agent/agents.js b/src/agent/agents.js
index 0bc07d12..aca0a11e 100644
--- a/src/agent/agents.js
+++ b/src/agent/agents.js
@@ -114,9 +114,8 @@ export function chatModelKey(agentName) {
 
 /**
  * 加载聊天AI
- *
  * @param {ContextType} context
- * @return {?ChatAgent}
+ * @returns {?ChatAgent}
  */
 export function loadChatLLM(context) {
     for (const llm of chatLlmAgents) {
@@ -136,14 +135,15 @@ export function loadChatLLM(context) {
 
 /**
  *
- * @typedef {function} ImageAgentRequest
+ * @typedef {Function} ImageAgentRequest
  * @param {string} prompt
  * @param {ContextType} context
  */
+
 /**
  * @typedef {object} ImageAgent
  * @property {string} name
- * @property {function} enable
+ * @property {Function} enable
  * @property {ImageAgentRequest} request
  */
 /**
@@ -170,9 +170,8 @@ export const imageGenAgents = [
 
 /**
  * 加载图片AI
- *
  * @param {ContextType} context
- * @return {?ImageAgent}
+ * @returns {?ImageAgent}
  */
 export function loadImageGen(context) {
     for (const imgGen of imageGenAgents) {
diff --git a/src/agent/anthropic.js b/src/agent/anthropic.js
index c3dd7d3b..3a6238de 100644
--- a/src/agent/anthropic.js
+++ b/src/agent/anthropic.js
@@ -8,7 +8,7 @@ import {imageToBase64String} from "../utils/utils.js";
 
 /**
  * @param {ContextType} context
- * @return {boolean}
+ * @returns {boolean}
  */
 export function isAnthropicAIEnable(context) {
     return !!(context.USER_CONFIG.ANTHROPIC_API_KEY);
@@ -16,7 +16,7 @@ export function isAnthropicAIEnable(context) {
 
 /**
  * @param {HistoryItem} item
- * @return {Promise}
+ * @returns {Promise}
  */
 async function renderAnthropicMessage(item) {
     const res = {
@@ -41,11 +41,10 @@ async function renderAnthropicMessage(item) {
 
 /**
  * 发送消息到Anthropic AI
- *
  * @param {LlmParams} params
  * @param {ContextType} context
- * @param {function} onStream
- * @return {Promise}
+ * @param {Function} onStream
+ * @returns {Promise}
  */
 export async function requestCompletionsFromAnthropicAI(params, context, onStream) {
     const { message, images, prompt, history } = params;
diff --git a/src/agent/azure.js b/src/agent/azure.js
index 1180cdb2..9ce137ad 100644
--- a/src/agent/azure.js
+++ b/src/agent/azure.js
@@ -5,7 +5,7 @@ import {renderOpenAIMessage} from "./openai.js";
 
 /**
  * @param {ContextType} context
- * @return {string|null}
+ * @returns {string|null}
  */
 function azureKeyFromContext(context) {
     return context.USER_CONFIG.AZURE_API_KEY;
@@ -14,7 +14,7 @@ function azureKeyFromContext(context) {
 
 /**
  * @param {ContextType} context
- * @return {boolean}
+ * @returns {boolean}
  */
 export function isAzureEnable(context) {
     return !!(context.USER_CONFIG.AZURE_API_KEY && context.USER_CONFIG.AZURE_COMPLETIONS_API);
@@ -22,7 +22,7 @@ export function isAzureEnable(context) {
 
 /**
  * @param {ContextType} context
- * @return {boolean}
+ * @returns {boolean}
  */
 export function isAzureImageEnable(context) {
     return !!(context.USER_CONFIG.AZURE_API_KEY && context.USER_CONFIG.AZURE_DALLE_API);
@@ -31,11 +31,10 @@ export function isAzureImageEnable(context) {
 
 /**
  * 发送消息到Azure ChatGPT
- *
  * @param {LlmParams} params
  * @param {ContextType} context
- * @param {function} onStream
- * @return {Promise}
+ * @param {Function} onStream
+ * @returns {Promise}
  */
 export async function requestCompletionsFromAzureOpenAI(params, context, onStream) {
     const { message, images, prompt, history } = params;
@@ -61,10 +60,9 @@ export async function requestCompletionsFromAzureOpenAI(params, context, onStrea
 
 /**
  * 请求AzureOpenai生成图片
- *
  * @param {string} prompt
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function requestImageFromAzureOpenAI(prompt, context) {
     const url = context.USER_CONFIG.AZURE_DALLE_API;
diff --git a/src/agent/cohere.js b/src/agent/cohere.js
index fc8075b5..3c86a475 100644
--- a/src/agent/cohere.js
+++ b/src/agent/cohere.js
@@ -5,7 +5,7 @@ import {requestChatCompletions} from "./request.js";
 
 /**
  * @param {ContextType} context
- * @return {boolean}
+ * @returns {boolean}
  */
 export function isCohereAIEnable(context) {
     return !!(context.USER_CONFIG.COHERE_API_KEY);
@@ -18,7 +18,7 @@ const COHERE_ROLE_MAP = {
 
 /**
  * @param {HistoryItem} item
- * @return {Object}
+ * @returns {object}
  */
 function renderCohereMessage(item) {
     return {
@@ -30,11 +30,10 @@ function renderCohereMessage(item) {
 
 /**
  * 发送消息到Cohere AI
- *
  * @param {LlmParams} params
  * @param {ContextType} context
- * @param {function} onStream
- * @return {Promise}
+ * @param {Function} onStream
+ * @returns {Promise}
  */
 export async function requestCompletionsFromCohereAI(params, context, onStream) {
     const { message, prompt, history } = params;
diff --git a/src/agent/gemini.js b/src/agent/gemini.js
index 14e49448..a06433b1 100644
--- a/src/agent/gemini.js
+++ b/src/agent/gemini.js
@@ -2,7 +2,7 @@ import "../types/context.js";
 
 /**
  * @param {ContextType} context
- * @return {boolean}
+ * @returns {boolean}
  */
 export function isGeminiAIEnable(context) {
     return !!(context.USER_CONFIG.GOOGLE_API_KEY);
@@ -17,7 +17,7 @@ const GEMINI_ROLE_MAP =  {
 
 /**
  * @param {HistoryItem} item
- * @return {Object}
+ * @returns {object}
  */
 function renderGeminiMessage(item) {
     return {
@@ -32,11 +32,10 @@ function renderGeminiMessage(item) {
 
 /**
  * 发送消息到Gemini
- *
  * @param {LlmParams} params
  * @param {ContextType} context
- * @param {function} onStream
- * @return {Promise}
+ * @param {Function} onStream
+ * @returns {Promise}
  */
 export async function requestCompletionsFromGeminiAI(params, context, onStream) {
     const { message, prompt, history } = params;
diff --git a/src/agent/llm.js b/src/agent/llm.js
index 3723545f..dc61c84b 100644
--- a/src/agent/llm.js
+++ b/src/agent/llm.js
@@ -8,7 +8,7 @@ import {loadChatLLM} from "./agents.js";
 import "../types/agent.js";
 
 /**
- * @return {(function(string): number)}
+ * @returns {(function(string): number)}
  */
 function tokensCounter() {
     return (text) => {
@@ -19,9 +19,8 @@ function tokensCounter() {
 
 /**
  * 加载历史TG消息
- *
  * @param {string} key
- * @return {Promise}
+ * @returns {Promise}
  */
 async function loadHistory(key) {
 
@@ -77,18 +76,22 @@ async function loadHistory(key) {
  * @typedef {object} LlmModifierResult
  * @property {HistoryItem[]} history
  * @property {string} message
- *
  * @typedef {function(HistoryItem[], string): LlmModifierResult} LlmModifier
  */
 
+/**
+ * @callback StreamResultHandler
+ * @param {string} text
+ */
+
 /**
  *
  * @param {LlmRequestParams} params
  * @param {ContextType} context
  * @param {ChatAgentRequest} llm
  * @param {LlmModifier} modifier
- * @param {function(string)} onStream
- * @return {Promise}
+ * @param {StreamResultHandler} onStream
+ * @returns {Promise}
  */
 async function requestCompletionsFromLLM(params, context, llm, modifier, onStream) {
     const historyDisable = ENV.AUTO_TRIM_HISTORY && ENV.MAX_HISTORY_LENGTH <= 0;
@@ -116,11 +119,10 @@ async function requestCompletionsFromLLM(params, context, llm, modifier, onStrea
 
 /**
  * 与LLM聊天
- *
  * @param {LlmRequestParams} params
  * @param {ContextType} context
  * @param {LlmModifier} modifier
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function chatWithLLM(params, context, modifier) {
     try {
diff --git a/src/agent/mistralai.js b/src/agent/mistralai.js
index 6fb780c6..7a197a65 100644
--- a/src/agent/mistralai.js
+++ b/src/agent/mistralai.js
@@ -3,7 +3,7 @@ import {requestChatCompletions} from "./request.js";
 
 /**
  * @param {ContextType} context
- * @return {boolean}
+ * @returns {boolean}
  */
 export function isMistralAIEnable(context) {
     return !!(context.USER_CONFIG.MISTRAL_API_KEY);
@@ -11,7 +11,7 @@ export function isMistralAIEnable(context) {
 
 /**
  * @param {HistoryItem} item
- * @return {Object}
+ * @returns {object}
  */
 function renderMistralMessage(item) {
     return {
@@ -23,11 +23,10 @@ function renderMistralMessage(item) {
 
 /**
  * 发送消息到Mistral AI
- *
  * @param {LlmParams} params
  * @param {ContextType} context
- * @param {function} onStream
- * @return {Promise}
+ * @param {Function} onStream
+ * @returns {Promise}
  */
 export async function requestCompletionsFromMistralAI(params, context, onStream) {
     const {message, prompt, history} = params;
diff --git a/src/agent/openai.js b/src/agent/openai.js
index b4cc22aa..cdda2caa 100644
--- a/src/agent/openai.js
+++ b/src/agent/openai.js
@@ -6,7 +6,7 @@ import {imageToBase64String, renderBase64DataURI} from '../utils/utils.js';
 
 /**
  * @param {ContextType} context
- * @return {string|null}
+ * @returns {string|null}
  */
 function openAIKeyFromContext(context) {
     const length = context.USER_CONFIG.OPENAI_API_KEY.length;
@@ -16,7 +16,7 @@ function openAIKeyFromContext(context) {
 
 /**
  * @param {ContextType} context
- * @return {boolean}
+ * @returns {boolean}
  */
 export function isOpenAIEnable(context) {
     return context.USER_CONFIG.OPENAI_API_KEY.length > 0;
@@ -25,7 +25,7 @@ export function isOpenAIEnable(context) {
 
 /**
  * @param {HistoryItem} item
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function renderOpenAIMessage(item) {
     const res = {
@@ -55,11 +55,10 @@ export async function renderOpenAIMessage(item) {
 
 /**
  * 发送消息到ChatGPT
- *
  * @param {LlmParams} params
  * @param {ContextType} context
- * @param {function} onStream
- * @return {Promise}
+ * @param {Function} onStream
+ * @returns {Promise}
  */
 export async function requestCompletionsFromOpenAI(params, context, onStream) {
 
@@ -88,10 +87,9 @@ export async function requestCompletionsFromOpenAI(params, context, onStream) {
 
 /**
  * 请求Openai生成图片
- *
  * @param {string} prompt
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function requestImageFromOpenAI(prompt, context) {
     const url = `${context.USER_CONFIG.OPENAI_API_BASE}/images/generations`;
diff --git a/src/agent/request.js b/src/agent/request.js
index 7fe26c29..e518a169 100644
--- a/src/agent/request.js
+++ b/src/agent/request.js
@@ -5,23 +5,19 @@ import {Stream} from "./stream.js";
 
 /**
  *
- * @typedef {function} StreamBuilder
+ * @typedef {Function} StreamBuilder
  * @param {Response} resp
  * @param {AbortController} controller
- * @return {Stream}
- *
- * @typedef {function} SSEContentExtractor
+ * @returns {Stream}
+ * @typedef {Function} SSEContentExtractor
  * @param {object} data
- * @return {string|null}
- *
- * @typedef {function} FullContentExtractor
+ * @returns {string|null}
+ * @typedef {Function} FullContentExtractor
  * @param {object} data
- * @return {string|null}
- *
+ * @returns {string|null}
  * @typedef {object} ErrorExtractor
  * @param {object} data
- * @return {string|null}
- *
+ * @returns {string|null}
  * @typedef {object} SseChatCompatibleOptions
  * @property {StreamBuilder} streamBuilder
  * @property {SSEContentExtractor} contentExtractor
@@ -31,9 +27,8 @@ import {Stream} from "./stream.js";
 
 /**
  * 修复OpenAI兼容的选项
- *
  * @param {SseChatCompatibleOptions | null} options
- * @return {SseChatCompatibleOptions}
+ * @returns {SseChatCompatibleOptions}
  */
 function fixOpenAICompatibleOptions(options) {
     options = options || {};
@@ -54,7 +49,7 @@ function fixOpenAICompatibleOptions(options) {
 
 /**
  * @param {Response} resp
- * @return {boolean}
+ * @returns {boolean}
  */
 export function isJsonResponse(resp) {
     return resp.headers.get('content-type').indexOf('json') !== -1;
@@ -62,7 +57,7 @@ export function isJsonResponse(resp) {
 
 /**
  * @param {Response} resp
- * @return {boolean}
+ * @returns {boolean}
  */
 export function isEventStreamResponse(resp) {
     const types = ['application/stream+json', 'text/event-stream'];
@@ -77,15 +72,14 @@ export function isEventStreamResponse(resp) {
 
 /**
  * 发送请求到支持sse的聊天接口
- *
  * @param {string} url
  * @param {object} header
  * @param {object} body
  * @param {ContextType} context
- * @param {function} onStream
- * @param {function} onResult
+ * @param {Function} onStream
+ * @param {Function} onResult
  * @param {SseChatCompatibleOptions | null} options
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function requestChatCompletions(url, header, body, context, onStream, onResult = null, options = null) {
     const controller = new AbortController();
diff --git a/src/agent/stream.js b/src/agent/stream.js
index a3212d46..a6517f1b 100644
--- a/src/agent/stream.js
+++ b/src/agent/stream.js
@@ -112,7 +112,23 @@ export class SSEDecoder {
     }
 }
 
+/**
+ * @typedef {object} SSEMessage
+ * @property {string} [event]
+ * @property {string} [data]
+ */
 
+/**
+ * @typedef {object} SSEParserResult
+ * @property {boolean} [finish]
+ * @property {any} [data]
+ */
+
+/**
+ *
+ * @param {SSEMessage} sse
+ * @returns {SSEParserResult}
+ */
 export function openaiSseJsonParser(sse) {
     // example:
     //      data: {}
@@ -130,6 +146,11 @@ export function openaiSseJsonParser(sse) {
     return {};
 }
 
+/**
+ *
+ * @param {SSEMessage} sse
+ * @returns {SSEParserResult}
+ */
 export function cohereSseJsonParser(sse) {
     // example:
     //      event: text-generation
@@ -154,6 +175,11 @@ export function cohereSseJsonParser(sse) {
     }
 }
 
+/**
+ *
+ * @param {SSEMessage} sse
+ * @returns {SSEParserResult}
+ */
 export function anthropicSseJsonParser(sse) {
     // example:
     //      event: content_block_delta
diff --git a/src/agent/workersai.js b/src/agent/workersai.js
index 2f3518d7..cffa19bc 100644
--- a/src/agent/workersai.js
+++ b/src/agent/workersai.js
@@ -3,12 +3,11 @@ import {requestChatCompletions} from "./request.js";
 
 /**
  * Run the specified AI model with the provided body data.
- *
  * @param {string} model - The AI model to run.
- * @param {Object} body - The data to provide to the AI model.
+ * @param {object} body - The data to provide to the AI model.
  * @param {string} id
  * @param {string} token
- * @return {Promise} The response from the AI model.
+ * @returns {Promise} The response from the AI model.
  */
 async function run(model, body, id, token) {
     return await fetch(
@@ -23,7 +22,7 @@ async function run(model, body, id, token) {
 
 /**
  * @param {ContextType} context
- * @return {boolean}
+ * @returns {boolean}
  */
 export function isWorkersAIEnable(context) {
     return !!(context.USER_CONFIG.CLOUDFLARE_ACCOUNT_ID && context.USER_CONFIG.CLOUDFLARE_TOKEN);
@@ -31,7 +30,7 @@ export function isWorkersAIEnable(context) {
 
 /**
  * @param {HistoryItem} item
- * @return {Object}
+ * @returns {object}
  */
 function renderWorkerAIMessage(item) {
     return {
@@ -43,11 +42,10 @@ function renderWorkerAIMessage(item) {
 
 /**
  * 发送消息到Workers AI
- *
  * @param {LlmParams} params
  * @param {ContextType} context
- * @param {function} onStream
- * @return {Promise}
+ * @param {Function} onStream
+ * @returns {Promise}
  */
 export async function requestCompletionsFromWorkersAI(params, context, onStream) {
 
@@ -89,7 +87,7 @@ export async function requestCompletionsFromWorkersAI(params, context, onStream)
 /**
  * @param {string} prompt
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function requestImageFromWorkersAI(prompt, context) {
     const id = context.USER_CONFIG.CLOUDFLARE_ACCOUNT_ID;
diff --git a/src/config/context.js b/src/config/context.js
index 2b128464..86031cbf 100644
--- a/src/config/context.js
+++ b/src/config/context.js
@@ -3,7 +3,7 @@ import '../types/telegram.js';
 
 /**
  * @param {UserConfigType} userConfig
- * @return {object}
+ * @returns {object}
  */
 export function trimUserConfig(userConfig) {
     const config = {
@@ -81,9 +81,9 @@ export class Context {
     }
 
     //
+
     /**
      * 初始化用户配置
-     *
      * @inner
      * @param {string | null} storeKey
      */
@@ -186,7 +186,7 @@ export class Context {
 
     /**
      * @param {TelegramMessage} message
-     * @return {Promise}
+     * @returns {Promise}
      */
     async initContext(message) {
         // 按顺序初始化上下文
diff --git a/src/config/env.js b/src/config/env.js
index 592671be..3b870809 100644
--- a/src/config/env.js
+++ b/src/config/env.js
@@ -240,6 +240,11 @@ export const ENV_KEY_MAPPER = {
     WORKERS_AI_MODEL: 'WORKERS_CHAT_MODEL',
 };
 
+/**
+ *
+ * @param {string} raw
+ * @returns {string[]}
+ */
 function parseArray(raw) {
     if (raw.startsWith('[') && raw.endsWith(']')) {
         try {
@@ -251,6 +256,11 @@ function parseArray(raw) {
     return raw.split(',');
 }
 
+/**
+ *
+ * @param {object} target
+ * @param {object} source
+ */
 export function mergeEnvironment(target, source) {
     const sourceKeys = new Set(Object.keys(source));
     for (const key of Object.keys(target)) {
diff --git a/src/i18n/index.js b/src/i18n/index.js
index d670f982..a6b8e89a 100644
--- a/src/i18n/index.js
+++ b/src/i18n/index.js
@@ -7,7 +7,7 @@ import '../types/i18n.js';
 /**
  * @type {I18nGenerator}
  * @param {string} lang
- * @return {I18n}
+ * @returns {I18n}
  */
 export default function i18n(lang) {
     switch (lang.toLowerCase()) {
diff --git a/src/router.js b/src/router.js
index 3f3e8a83..85d9fe13 100644
--- a/src/router.js
+++ b/src/router.js
@@ -17,7 +17,7 @@ const footer = `
 
 /**
  * @param {string} key
- * @return {string}
+ * @returns {string}
  */
 function buildKeyNotFoundHTML(key) {
     return `

Please set the ${key} environment variable in Cloudflare Workers.

`; @@ -26,7 +26,7 @@ function buildKeyNotFoundHTML(key) { /** * * @param {Request} request - * @return {Promise} + * @returns {Promise} */ async function bindWebHookAction(request) { const result = []; @@ -65,7 +65,7 @@ async function bindWebHookAction(request) { /** * 处理Telegram回调 * @param {Request} request - * @return {Promise} + * @returns {Promise} */ async function telegramWebhook(request) { try { @@ -79,10 +79,9 @@ async function telegramWebhook(request) { /** * - * 用API_GUARD处理Telegram回调 - * + *用API_GUARD处理Telegram回调 * @param {Request} request - * @return {Promise} + * @returns {Promise} */ async function telegramSafeHook(request) { try { @@ -101,7 +100,7 @@ async function telegramSafeHook(request) { } /** - * @return {Promise} + * @returns {Promise} */ async function defaultIndexAction() { const HTML = renderHTML(` @@ -125,7 +124,7 @@ async function defaultIndexAction() { } /** - * @return {Promise} + * @returns {Promise} */ async function loadBotInfo() { const result = []; @@ -154,7 +153,7 @@ async function loadBotInfo() { /** * @param {Request} request - * @return {Promise} + * @returns {Promise} */ export async function handleRequest(request) { const {pathname} = new URL(request.url); diff --git a/src/telegram/command.js b/src/telegram/command.js index 49772d47..5da19384 100644 --- a/src/telegram/command.js +++ b/src/telegram/command.js @@ -117,12 +117,11 @@ const commandHandlers = { /** * /img 命令 - * * @param {TelegramMessage} message * @param {string} command * @param {string} subcommand * @param {ContextType} context - * @return {Promise} + * @returns {Promise} */ async function commandGenerateImg(message, command, subcommand, context) { if (subcommand === '') { @@ -143,12 +142,11 @@ async function commandGenerateImg(message, command, subcommand, context) { /** * /help 获取帮助信息 - * * @param {TelegramMessage} message * @param {string} command * @param {string} subcommand * @param {ContextType} context - * @return {Promise} + * @returns {Promise} */ async function commandGetHelp(message, command, subcommand, context) { let helpMsg = ENV.I18N.command.help.summary + '\n'; @@ -164,12 +162,11 @@ async function commandGetHelp(message, command, subcommand, context) { /** * /new /start 新的会话 - * * @param {TelegramMessage} message * @param {string} command * @param {string} subcommand * @param {ContextType} context - * @return {Promise} + * @returns {Promise} */ async function commandCreateNewChatContext(message, command, subcommand, context) { try { @@ -191,12 +188,11 @@ async function commandCreateNewChatContext(message, command, subcommand, context /** * /setenv 用户配置修改 - * * @param {TelegramMessage} message * @param {string} command * @param {string} subcommand * @param {ContextType} context - * @return {Promise} + * @returns {Promise} */ async function commandUpdateUserConfig(message, command, subcommand, context) { const kv = subcommand.indexOf('='); @@ -231,12 +227,11 @@ async function commandUpdateUserConfig(message, command, subcommand, context) { /** * /setenvs 批量用户配置修改 - * * @param {TelegramMessage} message * @param {string} command * @param {string} subcommand * @param {ContextType} context - * @return {Promise} + * @returns {Promise} */ async function commandUpdateUserConfigs(message, command, subcommand, context) { try { @@ -270,12 +265,11 @@ async function commandUpdateUserConfigs(message, command, subcommand, context) { /** * /delenv 用户配置修改 - * * @param {TelegramMessage} message * @param {string} command * @param {string} subcommand * @param {ContextType} context - * @return {Promise} + * @returns {Promise} */ async function commandDeleteUserConfig(message, command, subcommand, context) { if (ENV.LOCK_USER_CONFIG_KEYS.includes(subcommand)) { @@ -298,12 +292,11 @@ async function commandDeleteUserConfig(message, command, subcommand, context) { /** * /clearenv 清空用户配置 - * * @param {TelegramMessage} message * @param {string} command * @param {string} subcommand * @param {ContextType} context - * @return {Promise} + * @returns {Promise} */ async function commandClearUserConfig(message, command, subcommand, context) { try { @@ -320,12 +313,11 @@ async function commandClearUserConfig(message, command, subcommand, context) { /** * /version 获得更新信息 - * * @param {TelegramMessage} message * @param {string} command * @param {string} subcommand * @param {ContextType} context - * @return {Promise} + * @returns {Promise} */ async function commandFetchUpdate(message, command, subcommand, context) { @@ -353,12 +345,11 @@ async function commandFetchUpdate(message, command, subcommand, context) { /** * /system 获得系统信息 - * * @param {TelegramMessage} message * @param {string} command * @param {string} subcommand * @param {ContextType} context - * @return {Promise} + * @returns {Promise} */ async function commandSystem(message, command, subcommand, context) { let chatAgent = loadChatLLM(context)?.name; @@ -400,12 +391,11 @@ async function commandSystem(message, command, subcommand, context) { /** * /redo 重新生成上一条消息 - * * @param {TelegramMessage} message * @param {string} command * @param {string} subcommand * @param {ContextType} context - * @return {Promise} + * @returns {Promise} */ async function commandRegenerate(message, command, subcommand, context) { const mf = (history, text) => { @@ -435,12 +425,11 @@ async function commandRegenerate(message, command, subcommand, context) { /** * /echo 回显消息 - * * @param {TelegramMessage} message * @param {string} command * @param {string} subcommand * @param {ContextType} context - * @return {Promise} + * @returns {Promise} */ async function commandEcho(message, command, subcommand, context) { let msg = '
';
@@ -452,10 +441,9 @@ async function commandEcho(message, command, subcommand, context) {
 
 /**
  * 处理命令消息
- *
  * @param {TelegramMessage} message
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function handleCommandMessage(message, context) {
     if (ENV.DEV_MODE) {
@@ -503,9 +491,8 @@ export async function handleCommandMessage(message, context) {
 
 /**
  * 绑定命令到Telegram
- *
  * @param {string} token
- * @return {Promise<{result: {}, ok: boolean}>}
+ * @returns {Promise<{result: {}, ok: boolean}>}
  */
 export async function bindCommandForTelegram(token) {
     const scopeCommandMap = {
@@ -553,7 +540,7 @@ export async function bindCommandForTelegram(token) {
 
 /**
  * 获取所有命令的描述
- * @return {{description: *, command: *}[]}
+ * @returns {{description: *, command: *}[]}
  */
 export function commandsDocument() {
     return Object.keys(commandHandlers).map((key) => {
diff --git a/src/telegram/message.js b/src/telegram/message.js
index a1e68895..12f84aad 100644
--- a/src/telegram/message.js
+++ b/src/telegram/message.js
@@ -10,10 +10,9 @@ import '../types/telegram.js';
 
 /**
  * 初始化聊天上下文
- *
  * @param {TelegramMessage} message
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 async function msgInitChatContext(message, context) {
     await context.initContext(message);
@@ -23,10 +22,9 @@ async function msgInitChatContext(message, context) {
 
 /**
  * 保存最后一条消息
- *
  * @param {TelegramMessage} message
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 async function msgSaveLastMessage(message, context) {
     if (ENV.DEBUG_MODE) {
@@ -38,10 +36,9 @@ async function msgSaveLastMessage(message, context) {
 
 /**
  * 忽略旧的消息
- *
  * @param {TelegramMessage} message
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 async function msgIgnoreOldMessage(message, context) {
     if (ENV.SAFE_MODE) {
@@ -67,10 +64,9 @@ async function msgIgnoreOldMessage(message, context) {
 
 /**
  * 检查环境变量是否设置
- *
  * @param {TelegramMessage} message
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 async function msgCheckEnvIsReady(message, context) {
     if (!DATABASE) {
@@ -81,10 +77,9 @@ async function msgCheckEnvIsReady(message, context) {
 
 /**
  * 过滤非白名单用户
- *
  * @param {TelegramMessage} message
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 async function msgFilterWhiteList(message, context) {
     if (ENV.I_AM_A_GENEROUS_PERSON) {
@@ -123,10 +118,9 @@ async function msgFilterWhiteList(message, context) {
 
 /**
  * 过滤不支持的消息
- *
  * @param {TelegramMessage} message
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 // eslint-disable-next-line no-unused-vars
 async function msgFilterUnsupportedMessage(message, context) {
@@ -141,10 +135,9 @@ async function msgFilterUnsupportedMessage(message, context) {
 
 /**
  * 处理群消息
- *
  * @param {TelegramMessage} message
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 async function msgHandleGroupMessage(message, context) {
     // 非群组消息不作处理
@@ -230,10 +223,9 @@ async function msgHandleGroupMessage(message, context) {
 
 /**
  * 响应命令消息
- *
  * @param {TelegramMessage} message
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 async function msgHandleCommand(message, context) {
     if (!message.text) {
@@ -245,10 +237,9 @@ async function msgHandleCommand(message, context) {
 
 /**
  * 与llm聊天
- *
  * @param {TelegramMessage} message
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 async function msgChatWithLLM(message, context) {
     const { text, caption } = message;
@@ -279,10 +270,9 @@ async function msgChatWithLLM(message, context) {
 
 /**
  * 加载真实TG消息
- *
  * @param {Request} request
  * @param {ContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 // eslint-disable-next-line no-unused-vars
 async function loadMessage(request, context) {
@@ -302,9 +292,8 @@ async function loadMessage(request, context) {
 
 /**
  * 处理消息
- *
  * @param {Request} request
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function handleMessage(request) {
     const context = new Context();
diff --git a/src/telegram/telegram.js b/src/telegram/telegram.js
index bfac3412..fe7b53f1 100644
--- a/src/telegram/telegram.js
+++ b/src/telegram/telegram.js
@@ -6,7 +6,7 @@ import "../types/context.js";
  * @param {string} message
  * @param {string} token
  * @param {object} context
- * @return {Promise}
+ * @returns {Promise}
  */
 async function sendMessage(message, token, context) {
     const body = {
@@ -38,7 +38,7 @@ async function sendMessage(message, token, context) {
  * @param {string} message
  * @param {string} token
  * @param {CurrentChatContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function sendMessageToTelegram(message, token, context) {
     const chatContext = context;
@@ -76,7 +76,7 @@ export async function sendMessageToTelegram(message, token, context) {
 
 /**
  * @param {ContextType} context
- * @return {function(string): Promise}
+ * @returns {function(string): Promise}
  */
 export function sendMessageToTelegramWithContext(context) {
     return async (message) => {
@@ -86,7 +86,7 @@ export function sendMessageToTelegramWithContext(context) {
 
 /**
  * @param {ContextType} context
- * @return {function(string): Promise}
+ * @returns {function(string): Promise}
  */
 export function deleteMessageFromTelegramWithContext(context) {
     return async (messageId) => {
@@ -109,11 +109,10 @@ export function deleteMessageFromTelegramWithContext(context) {
 
 /**
  * 发送图片消息到Telegram
- *
  * @param {string | Blob} photo
  * @param {string} token
  * @param {CurrentChatContextType} context
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function sendPhotoToTelegram(photo, token, context) {
     const url = `${ENV.TELEGRAM_API_DOMAIN}/bot${token}/sendPhoto`;
@@ -150,7 +149,7 @@ export async function sendPhotoToTelegram(photo, token, context) {
 
 /**
  * @param {ContextType} context
- * @return {function(string): Promise}
+ * @returns {function(string): Promise}
  */
 export function sendPhotoToTelegramWithContext(context) {
     return (url) => {
@@ -161,12 +160,10 @@ export function sendPhotoToTelegramWithContext(context) {
 
 /**
  * 发送聊天动作到TG
- *
  * @param {string} action
  * @param {string} token
  * @param {string | number} chatId
- *
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function sendChatActionToTelegram(action, token, chatId) {
     return await fetch(
@@ -186,7 +183,7 @@ export async function sendChatActionToTelegram(action, token, chatId) {
 
 /**
  * @param {ContextType} context
- * @return {function(string): Promise}
+ * @returns {function(string): Promise}
  */
 export function sendChatActionToTelegramWithContext(context) {
     return (action) => {
@@ -197,7 +194,7 @@ export function sendChatActionToTelegramWithContext(context) {
 /**
  * @param {string} token
  * @param {string} url
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function bindTelegramWebHook(token, url) {
     return await fetch(
@@ -216,12 +213,11 @@ export async function bindTelegramWebHook(token, url) {
 
 /**
  * 判断是否为群组管理员
- *
  * @param {string | number} id
  * @param {string} groupAdminKey
  * @param {string | number} chatId
  * @param {string} token
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function getChatRole(id, groupAdminKey, chatId, token) {
     let groupAdmin;
@@ -255,9 +251,8 @@ export async function getChatRole(id, groupAdminKey, chatId, token) {
 
 /**
  * 判断是否为群组管理员
- *
  * @param {ContextType} context
- * @return {function(*): Promise}
+ * @returns {function(*): Promise}
  */
 export function getChatRoleWithContext(context) {
     return (id) => {
@@ -267,10 +262,9 @@ export function getChatRoleWithContext(context) {
 
 /**
  * 获取群组管理员信息
- *
  * @param {string | number} chatId
  * @param {string} token
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function getChatAdminister(chatId, token) {
     try {
@@ -296,6 +290,7 @@ export async function getChatAdminister(chatId, token) {
 }
 
 // 获取机器人信息
+
 /**
  * @typedef {object} BotInfo
  * @property {boolean} ok
@@ -305,10 +300,11 @@ export async function getChatAdminister(chatId, token) {
  * @property {boolean} info.can_join_groups
  * @property {boolean} info.can_read_all_group_messages
  */
+
 /**
  *
  * @param {string} token
- * @return {Promise}
+ * @returns {Promise}
  */
 export async function getBot(token) {
     const resp = await fetch(
diff --git a/src/types/agent.js b/src/types/agent.js
index e3596fd7..08e7ef08 100644
--- a/src/types/agent.js
+++ b/src/types/agent.js
@@ -12,18 +12,17 @@
 
 /**
  *
- * @typedef {function} ChatAgentRequest
+ * @typedef {Function} ChatAgentRequest
  * @param {LlmParams} params
  * @param {ContextType} context
- * @param {function} onStream
- * @return {Promise}
- *
+ * @param {Function} onStream
+ * @returns {Promise}
  */
 
 /**
  * @typedef {object} ChatAgent
  * @property {string} name
- * @property {function} enable
+ * @property {Function} enable
  * @property {ChatAgentRequest} request
  */
 
diff --git a/src/types/context.js b/src/types/context.js
index 2c8b71f4..0409780d 100644
--- a/src/types/context.js
+++ b/src/types/context.js
@@ -1,56 +1,43 @@
 
 /**
  * 用于保存用户配置
- *
- * @typedef {Object} UserConfigType
- *
+ * @typedef {object} UserConfigType
  * @property {string[]} DEFINE_KEYS
- *
  * @property {string} AI_PROVIDER
  * @property {string} AI_IMAGE_PROVIDER
  * @property {?string} SYSTEM_INIT_MESSAGE
  * @property {string} SYSTEM_INIT_MESSAGE_ROLE
- *
  * @property {string[]} OPENAI_API_KEY
  * @property {string} OPENAI_CHAT_MODEL
  * @property {string} OPENAI_API_BASE
  * @property {object} OPENAI_API_EXTRA_PARAMS
- *
  * @property {string} DALL_E_MODEL
  * @property {string} DALL_E_IMAGE_SIZE
  * @property {string} DALL_E_IMAGE_QUALITY
  * @property {string} DALL_E_IMAGE_STYLE
- *
  * @property {?string} AZURE_API_KEY
  * @property {?string} AZURE_COMPLETIONS_API
  * @property {?string} AZURE_DALLE_API
- *
  * @property {?string} CLOUDFLARE_ACCOUNT_ID
  * @property {?string} CLOUDFLARE_TOKEN
  * @property {string} WORKERS_CHAT_MODEL
  * @property {string} WORKERS_IMAGE_MODEL
- *
  * @property {?string} GOOGLE_API_KEY
  * @property {string} GOOGLE_COMPLETIONS_API
  * @property {string} GOOGLE_COMPLETIONS_MODEL
- *
  * @property {?string} MISTRAL_API_KEY
  * @property {string} MISTRAL_API_BASE
  * @property {string} MISTRAL_CHAT_MODEL
- *
  * @property {?string} COHERE_API_KEY
  * @property {string} COHERE_API_BASE
  * @property {string} COHERE_CHAT_MODEL
- *
  * @property {?string} ANTHROPIC_API_KEY
  * @property {string} ANTHROPIC_API_BASE
  * @property {string} ANTHROPIC_CHAT_MODEL
- *
  */
 
 /**
  * 用于保存全局使用的临时变量
- *
  * @typedef {object} ShareContextType
  * @property {?string} currentBotId - 当前机器人 ID
  * @property {?string} currentBotToken - 当前机器人 Token
@@ -64,11 +51,10 @@
  * @property {?TelegramID} chatId - 会话 id, private 场景为发言人 id, group/supergroup 场景为群组 id
  * @property {?TelegramID} speakerId - 发言人 id
  * @property {?object} extraMessageContext - 额外消息上下文
- * */
+ */
 
 /**
  * 用于保存发起telegram请求的聊天上下文
- *
  * @typedef {object} CurrentChatContextType
  * @property {?TelegramID} chat_id
  * @property {?TelegramID} reply_to_message_id - 如果是群组,这个值为消息ID,否则为null
@@ -81,7 +67,6 @@
 
 /**
  * @typedef {object} ContextType
- *
  * @property {UserConfigType} USER_CONFIG - 用户配置
  * @property {CurrentChatContextType} CURRENT_CHAT_CONTEXT - 当前聊天上下文
  * @property {ShareContextType} SHARE_CONTEXT - 共享上下文
diff --git a/src/types/i18n.js b/src/types/i18n.js
index 91ee4468..e2852b82 100644
--- a/src/types/i18n.js
+++ b/src/types/i18n.js
@@ -1,8 +1,8 @@
 /**
- * @typedef {Object} I18n
- *
+ * @typedef {object} I18n
+ * @property {object} env
  * @property {string} env.system_init_message
- *
+ * @property {object} command
  * @property {string} command.help.summary
  * @property {string} command.help.help
  * @property {string} command.help.new
@@ -15,12 +15,11 @@
  * @property {string} command.help.system
  * @property {string} command.help.redo
  * @property {string} command.help.echo
- *
  * @property {string} command.new.new_chat_start
  */
 
 /**
  * @callback I18nGenerator
  * @param {string} lang
- * @return {I18n}
+ * @returns {I18n}
  */
diff --git a/src/types/telegram.js b/src/types/telegram.js
index 7f9413ec..f334d964 100644
--- a/src/types/telegram.js
+++ b/src/types/telegram.js
@@ -4,15 +4,13 @@
 
 
 /**
- * @typedef {Object} TelegramBaseFile
+ * @typedef {object} TelegramBaseFile
  * @property {string} file_id - Unique identifier for this file.
  * @property {string} file_unique_id - Unique identifier for this file, which is supposed to be the same over time and for different bots.
  * @property {number} file_size - Optional. File size, if known.
- *
  * @typedef {TelegramBaseFile} TelegramPhoto
  * @property {number} width - Photo width.
  * @property {number} height - Photo height.
- *
  * @typedef {TelegramBaseFile} TelegramVoice
  * @property {number} duration - Duration of the audio in seconds.
  * @property {string} mime_type - Optional. MIME type of the file as defined by sender.
@@ -20,7 +18,7 @@
 
 
 /**
- * @typedef {Object} TelegramUser
+ * @typedef {object} TelegramUser
  * @property {TelegramID} id - The ID of the user.
  * @property {boolean} is_bot - True, if the user is a bot.
  * @property {string} first_name - The first name of the user.
@@ -30,14 +28,14 @@
  */
 
 /**
- * @typedef {Object} TelegramChat
+ * @typedef {object} TelegramChat
  * @property {TelegramID} id - The ID of the chat.
  * @property {string} type - The type of the chat.
  * @property {boolean} is_forum - True, if the chat is a forum.
  */
 
 /**
- * @typedef {Object} TelegramMessageEntity
+ * @typedef {object} TelegramMessageEntity
  * @property {string} type - Type of the entity.
  * @property {number} offset - Offset in UTF-16 code units to the start of the entity.
  * @property {number} length - Length of the entity in UTF-16 code units.
@@ -46,7 +44,7 @@
  */
 
 /**
- * @typedef {Object} TelegramMessage
+ * @typedef {object} TelegramMessage
  * @property {number} message_id - The message's unique identifier.
  * @property {TelegramUser} from - The user that sent the message.
  * @property {TelegramChat} chat - The chat where the message was sent.
@@ -62,7 +60,7 @@
  */
 
 /**
- * @typedef {Object} TelegramWebhookRequest
+ * @typedef {object} TelegramWebhookRequest
  * @property {TelegramID} update_id - The update's unique identifier.
  * @property {TelegramMessage} message - The message
  * @property {TelegramMessage} edited_message - The edited message
diff --git a/src/utils/md2tgmd.js b/src/utils/md2tgmd.js
index 11983b93..d45a1607 100644
--- a/src/utils/md2tgmd.js
+++ b/src/utils/md2tgmd.js
@@ -4,7 +4,7 @@ const escapeChars = /([\_\*\[\]\(\)\\\~\`\>\#\+\-\=\|\{\}\.\!])/g;
 /**
  * 分割代码块文本 适配嵌套代码块
  * @param {string} text
- * @return {string} text
+ * @returns {string} text
  */
 export function escape(text) {
     const lines = text.split('\n');
@@ -45,7 +45,7 @@ export function escape(text) {
  * 处理转义
  * @param {string} text
  * @param {string} type
- * @return {string} text
+ * @returns {string} text
  */
 function handleEscape(text, type = 'text') {
     if (!text.trim()) {
diff --git a/src/utils/utils.js b/src/utils/utils.js
index c15b654c..9ca38567 100644
--- a/src/utils/utils.js
+++ b/src/utils/utils.js
@@ -1,6 +1,6 @@
 /**
  * @param {number} length
- * @return {string}
+ * @returns {string}
  */
 export function randomString(length) {
     const chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ';
@@ -11,7 +11,7 @@ export function randomString(length) {
 
 /**
  * @param {string} body
- * @return {string}
+ * @returns {string}
  */
 export function renderHTML(body) {
     return `
@@ -63,7 +63,7 @@ export function renderHTML(body) {
 
 /**
  * @param {Error} e
- * @return {string}
+ * @returns {string}
  */
 export function errorToString(e) {
     return JSON.stringify({
@@ -76,7 +76,7 @@ export function errorToString(e) {
 /**
  *
  * @param {Response} resp
- * @return {Response}
+ * @returns {Response}
  */
 export async function makeResponse200(resp) {
     if (resp === null) {
@@ -147,8 +147,7 @@ function getImageFormatFromBase64(base64String) {
  * @typedef {object} DataBase64
  * @property {string} data
  * @property {string} format
- *
- * @param url
+ * @param {string} url
  * @returns {Promise}
  */
 export async function imageToBase64String(url) {
diff --git a/yarn.lock b/yarn.lock
index e5bd4751..afecd589 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -41,6 +41,15 @@
   dependencies:
     "@jridgewell/trace-mapping" "0.3.9"
 
+"@es-joy/jsdoccomment@~0.46.0":
+  version "0.46.0"
+  resolved "https://registry.yarnpkg.com/@es-joy/jsdoccomment/-/jsdoccomment-0.46.0.tgz#47a2ee4bfc0081f252e058272dfab680aaed464d"
+  integrity sha512-C3Axuq1xd/9VqFZpW4YAzOx5O9q/LP46uIQy/iNDpHG3fmPa6TBtvfglMCs3RBiBxAIi0Go97r8+jvTt55XMyQ==
+  dependencies:
+    comment-parser "1.4.1"
+    esquery "^1.6.0"
+    jsdoc-type-pratt-parser "~4.0.0"
+
 "@esbuild-plugins/node-globals-polyfill@^0.2.3":
   version "0.2.3"
   resolved "https://registry.yarnpkg.com/@esbuild-plugins/node-globals-polyfill/-/node-globals-polyfill-0.2.3.tgz#0e4497a2b53c9e9485e149bc92ddb228438d6bcf"
@@ -264,6 +273,11 @@
     "@nodelib/fs.scandir" "2.1.5"
     fastq "^1.6.0"
 
+"@pkgr/core@^0.1.0":
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/@pkgr/core/-/core-0.1.1.tgz#1ec17e2edbec25c8306d424ecfbf13c7de1aaa31"
+  integrity sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA==
+
 "@types/node-forge@^1.3.0":
   version "1.3.11"
   resolved "https://registry.yarnpkg.com/@types/node-forge/-/node-forge-1.3.11.tgz#0972ea538ddb0f4d9c2fa0ec5db5724773a604da"
@@ -325,6 +339,11 @@ anymatch@~3.1.2:
     normalize-path "^3.0.0"
     picomatch "^2.0.4"
 
+are-docs-informative@^0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/are-docs-informative/-/are-docs-informative-0.0.2.tgz#387f0e93f5d45280373d387a59d34c96db321963"
+  integrity sha512-ixiS0nLNNG5jNQzgZJNoUpBKdo9yTYZMGJ+QgT2jmjR7G7+QHRCc4v6LQ3NgE7EBJq+o0ams3waJwkrlBom8Ig==
+
 argparse@^2.0.1:
   version "2.0.1"
   resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38"
@@ -415,6 +434,11 @@ color-name@~1.1.4:
   resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2"
   integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
 
+comment-parser@1.4.1:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/comment-parser/-/comment-parser-1.4.1.tgz#bdafead37961ac079be11eb7ec65c4d021eaf9cc"
+  integrity sha512-buhp5kePrmda3vhc5B9t7pUQXAb2Tnd0qgpkIhPhkHXxJpiPJ11H0ZEU0oBpJ2QztSbzG/ZxMj/CHsYJqRHmyg==
+
 concat-map@0.0.1:
   version "0.0.1"
   resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
@@ -456,6 +480,13 @@ debug@^4.3.1, debug@^4.3.2:
   dependencies:
     ms "2.1.2"
 
+debug@^4.3.5:
+  version "4.3.6"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.6.tgz#2ab2c38fbaffebf8aa95fdfe6d88438c7a13c52b"
+  integrity sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg==
+  dependencies:
+    ms "2.1.2"
+
 deep-is@^0.1.3:
   version "0.1.4"
   resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.4.tgz#a6f2dce612fadd2ef1f519b73551f17e85199831"
@@ -466,6 +497,11 @@ defu@^6.1.4:
   resolved "https://registry.yarnpkg.com/defu/-/defu-6.1.4.tgz#4e0c9cf9ff68fe5f3d7f2765cc1a012dfdcb0479"
   integrity sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==
 
+es-module-lexer@^1.5.3:
+  version "1.5.4"
+  resolved "https://registry.yarnpkg.com/es-module-lexer/-/es-module-lexer-1.5.4.tgz#a8efec3a3da991e60efa6b633a7cad6ab8d26b78"
+  integrity sha512-MVNK56NiMrOwitFB7cqDwq0CQutbw+0BvLshJSse0MUNU+y1FC3bUS/AQg7oUng+/wKrrki7JfmwtVHkVfPLlw==
+
 esbuild@0.17.19, esbuild@^0.17.11:
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.17.19.tgz#087a727e98299f0462a3d0bcdd9cd7ff100bd955"
@@ -499,6 +535,23 @@ escape-string-regexp@^4.0.0:
   resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34"
   integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==
 
+eslint-plugin-jsdoc@^49.0.0:
+  version "49.0.0"
+  resolved "https://registry.yarnpkg.com/eslint-plugin-jsdoc/-/eslint-plugin-jsdoc-49.0.0.tgz#8520227f30cdd4ce20e321979acf7fa68107a503"
+  integrity sha512-MlLQpFmann7YkTpjJfsWiSSG877RrijLfg5A1lwfl2FAFTM343Kd+HRt/cnLG+rjypSwaWVqgEgtBTIovnS6qA==
+  dependencies:
+    "@es-joy/jsdoccomment" "~0.46.0"
+    are-docs-informative "^0.0.2"
+    comment-parser "1.4.1"
+    debug "^4.3.5"
+    escape-string-regexp "^4.0.0"
+    espree "^10.1.0"
+    esquery "^1.6.0"
+    parse-imports "^2.1.1"
+    semver "^7.6.3"
+    spdx-expression-parse "^4.0.0"
+    synckit "^0.9.1"
+
 eslint-scope@^8.0.2:
   version "8.0.2"
   resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-8.0.2.tgz#5cbb33d4384c9136083a71190d548158fe128f94"
@@ -566,7 +619,7 @@ espree@^10.0.1, espree@^10.1.0:
     acorn-jsx "^5.3.2"
     eslint-visitor-keys "^4.0.0"
 
-esquery@^1.5.0:
+esquery@^1.5.0, esquery@^1.6.0:
   version "1.6.0"
   resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.6.0.tgz#91419234f804d852a82dceec3e16cdc22cf9dae7"
   integrity sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==
@@ -782,6 +835,11 @@ js-yaml@^4.1.0:
   dependencies:
     argparse "^2.0.1"
 
+jsdoc-type-pratt-parser@~4.0.0:
+  version "4.0.0"
+  resolved "https://registry.yarnpkg.com/jsdoc-type-pratt-parser/-/jsdoc-type-pratt-parser-4.0.0.tgz#136f0571a99c184d84ec84662c45c29ceff71114"
+  integrity sha512-YtOli5Cmzy3q4dP26GraSOeAhqecewG04hoO8DY56CH4KJ9Fvv5qKWUCCo3HZob7esJQHCv6/+bnTy72xZZaVQ==
+
 json-buffer@3.0.1:
   version "3.0.1"
   resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13"
@@ -929,6 +987,14 @@ parent-module@^1.0.0:
   dependencies:
     callsites "^3.0.0"
 
+parse-imports@^2.1.1:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/parse-imports/-/parse-imports-2.1.1.tgz#ce52141df24990065d72a446a364bffd595577f4"
+  integrity sha512-TDT4HqzUiTMO1wJRwg/t/hYk8Wdp3iF/ToMIlAoVQfL1Xs/sTxq1dKWSMjMbQmIarfWKymOyly40+zmPHXMqCA==
+  dependencies:
+    es-module-lexer "^1.5.3"
+    slashes "^3.0.12"
+
 path-exists@^4.0.0:
   version "4.0.0"
   resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3"
@@ -1048,6 +1114,11 @@ selfsigned@^2.0.1:
     "@types/node-forge" "^1.3.0"
     node-forge "^1"
 
+semver@^7.6.3:
+  version "7.6.3"
+  resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143"
+  integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==
+
 shebang-command@^2.0.0:
   version "2.0.0"
   resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea"
@@ -1060,6 +1131,11 @@ shebang-regex@^3.0.0:
   resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172"
   integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==
 
+slashes@^3.0.12:
+  version "3.0.12"
+  resolved "https://registry.yarnpkg.com/slashes/-/slashes-3.0.12.tgz#3d664c877ad542dc1509eaf2c50f38d483a6435a"
+  integrity sha512-Q9VME8WyGkc7pJf6QEkj3wE+2CnvZMI+XJhwdTPR8Z/kWQRXi7boAWLDibRPyHRTUTPx5FaU7MsyrjI3yLB4HA==
+
 source-map@^0.6.1:
   version "0.6.1"
   resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263"
@@ -1070,6 +1146,24 @@ sourcemap-codec@^1.4.8:
   resolved "https://registry.yarnpkg.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz#ea804bd94857402e6992d05a38ef1ae35a9ab4c4"
   integrity sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==
 
+spdx-exceptions@^2.1.0:
+  version "2.5.0"
+  resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz#5d607d27fc806f66d7b64a766650fa890f04ed66"
+  integrity sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==
+
+spdx-expression-parse@^4.0.0:
+  version "4.0.0"
+  resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-4.0.0.tgz#a23af9f3132115465dac215c099303e4ceac5794"
+  integrity sha512-Clya5JIij/7C6bRR22+tnGXbc4VKlibKSVj2iHvVeX5iMW7s1SIQlqu699JkODJJIhh/pUu8L0/VLh8xflD+LQ==
+  dependencies:
+    spdx-exceptions "^2.1.0"
+    spdx-license-ids "^3.0.0"
+
+spdx-license-ids@^3.0.0:
+  version "3.0.18"
+  resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.18.tgz#22aa922dcf2f2885a6494a261f2d8b75345d0326"
+  integrity sha512-xxRs31BqRYHwiMzudOrpSiHtZ8i/GeionCBDSilhYRj+9gIcI8wCZTlXZKu9vZIVqViP3dcp9qE5G6AlIaD+TQ==
+
 stacktracey@^2.1.8:
   version "2.1.8"
   resolved "https://registry.yarnpkg.com/stacktracey/-/stacktracey-2.1.8.tgz#bf9916020738ce3700d1323b32bd2c91ea71199d"
@@ -1107,6 +1201,14 @@ supports-preserve-symlinks-flag@^1.0.0:
   resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09"
   integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==
 
+synckit@^0.9.1:
+  version "0.9.1"
+  resolved "https://registry.yarnpkg.com/synckit/-/synckit-0.9.1.tgz#febbfbb6649979450131f64735aa3f6c14575c88"
+  integrity sha512-7gr8p9TQP6RAHusBOSLs46F4564ZrjV8xFmw5zCmgmhGUcw2hxsShhJ6CEiHQMgPDwAQ1fWHPM0ypc4RMAig4A==
+  dependencies:
+    "@pkgr/core" "^0.1.0"
+    tslib "^2.6.2"
+
 text-table@^0.2.0:
   version "0.2.0"
   resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
@@ -1119,7 +1221,7 @@ to-regex-range@^5.0.1:
   dependencies:
     is-number "^7.0.0"
 
-tslib@^2.2.0:
+tslib@^2.2.0, tslib@^2.6.2:
   version "2.6.3"
   resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.3.tgz#0438f810ad7a9edcde7a241c3d80db693c8cbfe0"
   integrity sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==

From 1fb3fbccea65880385ddb7f622dfa15b12adfbdd Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Wed, 7 Aug 2024 18:28:43 +0800
Subject: [PATCH 19/24] =?UTF-8?q?doc:=20=E4=BC=98=E5=8C=96jsdoc?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 src/agent/llm.js  | 3 +--
 src/types/i18n.js | 4 +---
 2 files changed, 2 insertions(+), 5 deletions(-)

diff --git a/src/agent/llm.js b/src/agent/llm.js
index dc61c84b..f3e575b7 100644
--- a/src/agent/llm.js
+++ b/src/agent/llm.js
@@ -80,8 +80,7 @@ async function loadHistory(key) {
  */
 
 /**
- * @callback StreamResultHandler
- * @param {string} text
+ * @typedef {function (string): Promise} StreamResultHandler
  */
 
 /**
diff --git a/src/types/i18n.js b/src/types/i18n.js
index e2852b82..14a6c832 100644
--- a/src/types/i18n.js
+++ b/src/types/i18n.js
@@ -19,7 +19,5 @@
  */
 
 /**
- * @callback I18nGenerator
- * @param {string} lang
- * @returns {I18n}
+ * @typedef {function (string): I18n} I18nGenerator
  */

From 6ee183882aaa58a243378b235e62b4e7b04144b7 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Thu, 8 Aug 2024 14:12:34 +0800
Subject: [PATCH 20/24] =?UTF-8?q?chore:=20=E4=BC=98=E5=8C=96local/docker?=
 =?UTF-8?q?=20adapter=E7=A4=BA=E4=BE=8B,=20=E5=8D=87=E7=BA=A7=E4=BE=9D?=
 =?UTF-8?q?=E8=B5=96?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 adapter/docker/index.js     |   2 +-
 adapter/docker/package.json |   4 +-
 adapter/docker/yarn.lock    | 138 -----------------
 adapter/local/index.js      |   2 +-
 adapter/local/package.json  |   2 +-
 adapter/local/yarn.lock     |  17 ++-
 dist/buildinfo.json         |   2 +-
 dist/index.js               |  13 +-
 dist/timestamp              |   2 +-
 package.json                |  11 +-
 src/agent/agents.js         |   4 +-
 src/agent/anthropic.js      |   4 +-
 src/agent/azure.js          |   2 +-
 src/agent/cohere.js         |   2 +-
 src/agent/gemini.js         |   4 +-
 src/agent/llm.js            |   2 +-
 src/agent/openai.js         |   2 +-
 src/agent/request.js        |   2 +-
 src/config/env.js           |   1 -
 src/telegram/message.js     |   8 +-
 src/types/context.js        |   1 -
 src/utils/utils.js          |  28 ++--
 yarn.lock                   | 286 +++++++++++++++++++++++++++---------
 23 files changed, 281 insertions(+), 258 deletions(-)

diff --git a/adapter/docker/index.js b/adapter/docker/index.js
index 96471440..987660a5 100644
--- a/adapter/docker/index.js
+++ b/adapter/docker/index.js
@@ -1,5 +1,5 @@
 import adapter from 'cloudflare-worker-adapter';
-import {default as worker} from '../../main.js';
+import {default as worker} from 'chatgpt-telegram-workers';
 import fs from 'fs';
 
 
diff --git a/adapter/docker/package.json b/adapter/docker/package.json
index 25a933a5..5320cfe9 100644
--- a/adapter/docker/package.json
+++ b/adapter/docker/package.json
@@ -13,7 +13,5 @@
     "chatgpt-telegram-workers": "github:TBXark/ChatGPT-Telegram-Workers#dev",
     "cloudflare-worker-adapter": "github:TBXark/cloudflare-worker-adapter"
   },
-  "devDependencies": {
-    "esbuild": "^0.17.12"
-  }
+  "devDependencies": {}
 }
diff --git a/adapter/docker/yarn.lock b/adapter/docker/yarn.lock
index a9ed2006..8cc05648 100644
--- a/adapter/docker/yarn.lock
+++ b/adapter/docker/yarn.lock
@@ -2,116 +2,6 @@
 # yarn lockfile v1
 
 
-"@esbuild/android-arm64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.17.19.tgz#bafb75234a5d3d1b690e7c2956a599345e84a2fd"
-  integrity sha512-KBMWvEZooR7+kzY0BtbTQn0OAYY7CsiydT63pVEaPtVYF0hXbUaOyZog37DKxK7NF3XacBJOpYT4adIJh+avxA==
-
-"@esbuild/android-arm@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.17.19.tgz#5898f7832c2298bc7d0ab53701c57beb74d78b4d"
-  integrity sha512-rIKddzqhmav7MSmoFCmDIb6e2W57geRsM94gV2l38fzhXMwq7hZoClug9USI2pFRGL06f4IOPHHpFNOkWieR8A==
-
-"@esbuild/android-x64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.17.19.tgz#658368ef92067866d95fb268719f98f363d13ae1"
-  integrity sha512-uUTTc4xGNDT7YSArp/zbtmbhO0uEEK9/ETW29Wk1thYUJBz3IVnvgEiEwEa9IeLyvnpKrWK64Utw2bgUmDveww==
-
-"@esbuild/darwin-arm64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.17.19.tgz#584c34c5991b95d4d48d333300b1a4e2ff7be276"
-  integrity sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==
-
-"@esbuild/darwin-x64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.17.19.tgz#7751d236dfe6ce136cce343dce69f52d76b7f6cb"
-  integrity sha512-IJM4JJsLhRYr9xdtLytPLSH9k/oxR3boaUIYiHkAawtwNOXKE8KoU8tMvryogdcT8AU+Bflmh81Xn6Q0vTZbQw==
-
-"@esbuild/freebsd-arm64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.17.19.tgz#cacd171665dd1d500f45c167d50c6b7e539d5fd2"
-  integrity sha512-pBwbc7DufluUeGdjSU5Si+P3SoMF5DQ/F/UmTSb8HXO80ZEAJmrykPyzo1IfNbAoaqw48YRpv8shwd1NoI0jcQ==
-
-"@esbuild/freebsd-x64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.17.19.tgz#0769456eee2a08b8d925d7c00b79e861cb3162e4"
-  integrity sha512-4lu+n8Wk0XlajEhbEffdy2xy53dpR06SlzvhGByyg36qJw6Kpfk7cp45DR/62aPH9mtJRmIyrXAS5UWBrJT6TQ==
-
-"@esbuild/linux-arm64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.17.19.tgz#38e162ecb723862c6be1c27d6389f48960b68edb"
-  integrity sha512-ct1Tg3WGwd3P+oZYqic+YZF4snNl2bsnMKRkb3ozHmnM0dGWuxcPTTntAF6bOP0Sp4x0PjSF+4uHQ1xvxfRKqg==
-
-"@esbuild/linux-arm@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.17.19.tgz#1a2cd399c50040184a805174a6d89097d9d1559a"
-  integrity sha512-cdmT3KxjlOQ/gZ2cjfrQOtmhG4HJs6hhvm3mWSRDPtZ/lP5oe8FWceS10JaSJC13GBd4eH/haHnqf7hhGNLerA==
-
-"@esbuild/linux-ia32@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.17.19.tgz#e28c25266b036ce1cabca3c30155222841dc035a"
-  integrity sha512-w4IRhSy1VbsNxHRQpeGCHEmibqdTUx61Vc38APcsRbuVgK0OPEnQ0YD39Brymn96mOx48Y2laBQGqgZ0j9w6SQ==
-
-"@esbuild/linux-loong64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.17.19.tgz#0f887b8bb3f90658d1a0117283e55dbd4c9dcf72"
-  integrity sha512-2iAngUbBPMq439a+z//gE+9WBldoMp1s5GWsUSgqHLzLJ9WoZLZhpwWuym0u0u/4XmZ3gpHmzV84PonE+9IIdQ==
-
-"@esbuild/linux-mips64el@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.17.19.tgz#f5d2a0b8047ea9a5d9f592a178ea054053a70289"
-  integrity sha512-LKJltc4LVdMKHsrFe4MGNPp0hqDFA1Wpt3jE1gEyM3nKUvOiO//9PheZZHfYRfYl6AwdTH4aTcXSqBerX0ml4A==
-
-"@esbuild/linux-ppc64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.17.19.tgz#876590e3acbd9fa7f57a2c7d86f83717dbbac8c7"
-  integrity sha512-/c/DGybs95WXNS8y3Ti/ytqETiW7EU44MEKuCAcpPto3YjQbyK3IQVKfF6nbghD7EcLUGl0NbiL5Rt5DMhn5tg==
-
-"@esbuild/linux-riscv64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.17.19.tgz#7f49373df463cd9f41dc34f9b2262d771688bf09"
-  integrity sha512-FC3nUAWhvFoutlhAkgHf8f5HwFWUL6bYdvLc/TTuxKlvLi3+pPzdZiFKSWz/PF30TB1K19SuCxDTI5KcqASJqA==
-
-"@esbuild/linux-s390x@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.17.19.tgz#e2afd1afcaf63afe2c7d9ceacd28ec57c77f8829"
-  integrity sha512-IbFsFbxMWLuKEbH+7sTkKzL6NJmG2vRyy6K7JJo55w+8xDk7RElYn6xvXtDW8HCfoKBFK69f3pgBJSUSQPr+4Q==
-
-"@esbuild/linux-x64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.17.19.tgz#8a0e9738b1635f0c53389e515ae83826dec22aa4"
-  integrity sha512-68ngA9lg2H6zkZcyp22tsVt38mlhWde8l3eJLWkyLrp4HwMUr3c1s/M2t7+kHIhvMjglIBrFpncX1SzMckomGw==
-
-"@esbuild/netbsd-x64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.17.19.tgz#c29fb2453c6b7ddef9a35e2c18b37bda1ae5c462"
-  integrity sha512-CwFq42rXCR8TYIjIfpXCbRX0rp1jo6cPIUPSaWwzbVI4aOfX96OXY8M6KNmtPcg7QjYeDmN+DD0Wp3LaBOLf4Q==
-
-"@esbuild/openbsd-x64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.17.19.tgz#95e75a391403cb10297280d524d66ce04c920691"
-  integrity sha512-cnq5brJYrSZ2CF6c35eCmviIN3k3RczmHz8eYaVlNasVqsNY+JKohZU5MKmaOI+KkllCdzOKKdPs762VCPC20g==
-
-"@esbuild/sunos-x64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.17.19.tgz#722eaf057b83c2575937d3ffe5aeb16540da7273"
-  integrity sha512-vCRT7yP3zX+bKWFeP/zdS6SqdWB8OIpaRq/mbXQxTGHnIxspRtigpkUcDMlSCOejlHowLqII7K2JKevwyRP2rg==
-
-"@esbuild/win32-arm64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.17.19.tgz#9aa9dc074399288bdcdd283443e9aeb6b9552b6f"
-  integrity sha512-yYx+8jwowUstVdorcMdNlzklLYhPxjniHWFKgRqH7IFlUEa0Umu3KuYplf1HUZZ422e3NU9F4LGb+4O0Kdcaag==
-
-"@esbuild/win32-ia32@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.17.19.tgz#95ad43c62ad62485e210f6299c7b2571e48d2b03"
-  integrity sha512-eggDKanJszUtCdlVs0RB+h35wNlb5v4TWEkq4vZcmVt5u/HiDZrTXe2bWFQUez3RgNHwx/x4sk5++4NSSicKkw==
-
-"@esbuild/win32-x64@0.17.19":
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.17.19.tgz#8cfaf2ff603e9aabb910e9c0558c26cf32744061"
-  integrity sha512-lAhycmKnVOuRYNtRtatQR1LPQf2oYCkRGkSFnseDAKPl8lu5SOsK/e1sXe5a0Pc5kHIHe6P2I/ilntNv2xf3cA==
-
 "@gar/promisify@^1.0.1":
   version "1.1.3"
   resolved "https://registry.yarnpkg.com/@gar/promisify/-/promisify-1.1.3.tgz#555193ab2e3bb3b6adc3d551c9c030d9e860daf6"
@@ -370,34 +260,6 @@ err-code@^2.0.2:
   resolved "https://registry.yarnpkg.com/err-code/-/err-code-2.0.3.tgz#23c2f3b756ffdfc608d30e27c9a941024807e7f9"
   integrity sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==
 
-esbuild@^0.17.12:
-  version "0.17.19"
-  resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.17.19.tgz#087a727e98299f0462a3d0bcdd9cd7ff100bd955"
-  integrity sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==
-  optionalDependencies:
-    "@esbuild/android-arm" "0.17.19"
-    "@esbuild/android-arm64" "0.17.19"
-    "@esbuild/android-x64" "0.17.19"
-    "@esbuild/darwin-arm64" "0.17.19"
-    "@esbuild/darwin-x64" "0.17.19"
-    "@esbuild/freebsd-arm64" "0.17.19"
-    "@esbuild/freebsd-x64" "0.17.19"
-    "@esbuild/linux-arm" "0.17.19"
-    "@esbuild/linux-arm64" "0.17.19"
-    "@esbuild/linux-ia32" "0.17.19"
-    "@esbuild/linux-loong64" "0.17.19"
-    "@esbuild/linux-mips64el" "0.17.19"
-    "@esbuild/linux-ppc64" "0.17.19"
-    "@esbuild/linux-riscv64" "0.17.19"
-    "@esbuild/linux-s390x" "0.17.19"
-    "@esbuild/linux-x64" "0.17.19"
-    "@esbuild/netbsd-x64" "0.17.19"
-    "@esbuild/openbsd-x64" "0.17.19"
-    "@esbuild/sunos-x64" "0.17.19"
-    "@esbuild/win32-arm64" "0.17.19"
-    "@esbuild/win32-ia32" "0.17.19"
-    "@esbuild/win32-x64" "0.17.19"
-
 expand-template@^2.0.3:
   version "2.0.3"
   resolved "https://registry.yarnpkg.com/expand-template/-/expand-template-2.0.3.tgz#6e14b3fcee0f3a6340ecb57d2e8918692052a47c"
diff --git a/adapter/local/index.js b/adapter/local/index.js
index b7f9390f..c00fe4d9 100644
--- a/adapter/local/index.js
+++ b/adapter/local/index.js
@@ -1,7 +1,7 @@
 import adapter, {bindGlobal} from 'cloudflare-worker-adapter';
 import {MemoryCache} from 'cloudflare-worker-adapter/cache/memory.js';
 import fs from 'fs';
-import HttpsProxyAgent from 'https-proxy-agent';
+import { HttpsProxyAgent } from 'https-proxy-agent';
 import fetch from 'node-fetch';
 import {default as worker} from '../../main.js';
 import {ENV} from '../../src/config/env.js';
diff --git a/adapter/local/package.json b/adapter/local/package.json
index a506af8c..c82a8408 100644
--- a/adapter/local/package.json
+++ b/adapter/local/package.json
@@ -12,7 +12,7 @@
   "license": "ISC",
   "dependencies": {
     "cloudflare-worker-adapter": "github:TBXark/cloudflare-worker-adapter",
-    "https-proxy-agent": "^5.0.1",
+    "https-proxy-agent": "^7.0.5",
     "node-fetch": "^3.3.2"
   }
 }
diff --git a/adapter/local/yarn.lock b/adapter/local/yarn.lock
index e0f77439..a473f8f2 100644
--- a/adapter/local/yarn.lock
+++ b/adapter/local/yarn.lock
@@ -45,6 +45,13 @@ agent-base@6, agent-base@^6.0.2:
   dependencies:
     debug "4"
 
+agent-base@^7.0.2:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/agent-base/-/agent-base-7.1.1.tgz#bdbded7dfb096b751a2a087eeeb9664725b2e317"
+  integrity sha512-H0TSyFNDMomMNJQBn8wFV5YC/2eJ+VXECwOadZJT554xP6cODZHPX3H9QMQECxvrgiSOP1pHjy1sMWQVYJOUOA==
+  dependencies:
+    debug "^4.3.4"
+
 agentkeepalive@^4.1.3:
   version "4.5.0"
   resolved "https://registry.yarnpkg.com/agentkeepalive/-/agentkeepalive-4.5.0.tgz#2673ad1389b3c418c5a20c5d7364f93ca04be923"
@@ -353,7 +360,7 @@ http-proxy-agent@^4.0.1:
     agent-base "6"
     debug "4"
 
-https-proxy-agent@^5.0.0, https-proxy-agent@^5.0.1:
+https-proxy-agent@^5.0.0:
   version "5.0.1"
   resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz#c59ef224a04fe8b754f3db0063a25ea30d0005d6"
   integrity sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==
@@ -361,6 +368,14 @@ https-proxy-agent@^5.0.0, https-proxy-agent@^5.0.1:
     agent-base "6"
     debug "4"
 
+https-proxy-agent@^7.0.5:
+  version "7.0.5"
+  resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-7.0.5.tgz#9e8b5013873299e11fab6fd548405da2d6c602b2"
+  integrity sha512-1e4Wqeblerz+tMKPIq2EMGiiWW1dIjZOksyHWSUm1rmuvw/how9hBHZ38lAGj5ID4Ik6EdkOw7NmWPy6LAwalw==
+  dependencies:
+    agent-base "^7.0.2"
+    debug "4"
+
 humanize-ms@^1.2.1:
   version "1.2.1"
   resolved "https://registry.yarnpkg.com/humanize-ms/-/humanize-ms-1.2.1.tgz#c46e3159a293f6b896da29316d8b6fe8bb79bbed"
diff --git a/dist/buildinfo.json b/dist/buildinfo.json
index 3890b49e..27a722fc 100644
--- a/dist/buildinfo.json
+++ b/dist/buildinfo.json
@@ -1 +1 @@
-{"sha":"9609e7a","timestamp":1722950584}
\ No newline at end of file
+{"sha":"1fb3fbc","timestamp":1723096985}
\ No newline at end of file
diff --git a/dist/index.js b/dist/index.js
index f93f3a21..c9ca540d 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -89,9 +89,9 @@ var Environment = class {
   // -- 版本数据 --
   //
   // 当前版本
-  BUILD_TIMESTAMP = 1722950584;
+  BUILD_TIMESTAMP = 1723096985;
   // 当前版本 commit id
-  BUILD_VERSION = "9609e7a";
+  BUILD_VERSION = "1fb3fbc";
   // -- 基础配置 --
   /**
    * @type {I18n | null}
@@ -351,7 +351,6 @@ var Context = class {
   //
   /**
    * 初始化用户配置
-   *
    * @inner
    * @param {string | null} storeKey
    */
@@ -426,7 +425,7 @@ var Context = class {
   }
   /**
    * @param {TelegramMessage} message
-   * @return {Promise}
+   * @returns {Promise}
    */
   async initContext(message) {
     const chatId = message?.chat?.id;
@@ -883,7 +882,7 @@ function anthropicSseJsonParser(sse) {
       return {};
   }
 }
-var LineDecoder = class {
+var LineDecoder = class _LineDecoder {
   constructor() {
     this.buffer = [];
     this.trailingCR = false;
@@ -901,8 +900,8 @@ var LineDecoder = class {
     if (!text) {
       return [];
     }
-    const trailingNewline = LineDecoder.NEWLINE_CHARS.has(text[text.length - 1] || "");
-    let lines = text.split(LineDecoder.NEWLINE_REGEXP);
+    const trailingNewline = _LineDecoder.NEWLINE_CHARS.has(text[text.length - 1] || "");
+    let lines = text.split(_LineDecoder.NEWLINE_REGEXP);
     if (lines.length === 1 && !trailingNewline) {
       this.buffer.push(lines[0]);
       return [];
diff --git a/dist/timestamp b/dist/timestamp
index 243660ed..00f7c7e9 100644
--- a/dist/timestamp
+++ b/dist/timestamp
@@ -1 +1 @@
-1722950584
\ No newline at end of file
+1723096985
\ No newline at end of file
diff --git a/package.json b/package.json
index dd923bb7..86141f65 100644
--- a/package.json
+++ b/package.json
@@ -13,12 +13,11 @@
     "deploy:build": "npm run build && wrangler deploy"
   },
   "devDependencies": {
-    "@eslint/js": "^9.1.1",
-    "esbuild": "^0.17.11",
-    "eslint": "^9.1.1",
-    "eslint-plugin-jsdoc": "^49.0.0",
-    "globals": "^15.1.0",
-    "wrangler": "^3.0.0"
+    "@eslint/js": "^9.8.0",
+    "esbuild": "^0.23.0",
+    "eslint": "^9.8.0",
+    "eslint-plugin-jsdoc": "^50.0.0",
+    "wrangler": "^3.69.1"
   },
   "exports": {
     "import": "./main.js"
diff --git a/src/agent/agents.js b/src/agent/agents.js
index aca0a11e..d217fb0a 100644
--- a/src/agent/agents.js
+++ b/src/agent/agents.js
@@ -66,7 +66,7 @@ export function currentChatModel(agentName, context) {
             try {
                 const url = new URL(context.USER_CONFIG.AZURE_COMPLETIONS_API);
                 return url.pathname.split("/")[3];
-            } catch  {
+            } catch {
                 return context.USER_CONFIG.AZURE_COMPLETIONS_API;
             }
         case "openai":
@@ -199,7 +199,7 @@ export function currentImageModel(agentName, context) {
             try {
                 const url = new URL(context.USER_CONFIG.AZURE_DALLE_API);
                 return url.pathname.split("/")[3];
-            } catch  {
+            } catch {
                 return context.USER_CONFIG.AZURE_DALLE_API;
             }
         case "openai":
diff --git a/src/agent/anthropic.js b/src/agent/anthropic.js
index 3a6238de..adee2761 100644
--- a/src/agent/anthropic.js
+++ b/src/agent/anthropic.js
@@ -30,7 +30,7 @@ async function renderAnthropicMessage(item) {
             res.content.push({type: 'text', text: item.content});
         }
         for (const image of item.images) {
-            res.content.push( await imageToBase64String(image).then(({format, data}) => {
+            res.content.push(await imageToBase64String(image).then(({format, data}) => {
                 return {type: 'image', source: {type: 'base64', media_type: format, data: data}};
             }));
         }
@@ -47,7 +47,7 @@ async function renderAnthropicMessage(item) {
  * @returns {Promise}
  */
 export async function requestCompletionsFromAnthropicAI(params, context, onStream) {
-    const { message, images, prompt, history } = params;
+    const {message, images, prompt, history} = params;
     const url = `${context.USER_CONFIG.ANTHROPIC_API_BASE}/messages`;
     const header = {
         'x-api-key': context.USER_CONFIG.ANTHROPIC_API_KEY,
diff --git a/src/agent/azure.js b/src/agent/azure.js
index 9ce137ad..48727c24 100644
--- a/src/agent/azure.js
+++ b/src/agent/azure.js
@@ -37,7 +37,7 @@ export function isAzureImageEnable(context) {
  * @returns {Promise}
  */
 export async function requestCompletionsFromAzureOpenAI(params, context, onStream) {
-    const { message, images, prompt, history } = params;
+    const {message, images, prompt, history} = params;
     const url = context.USER_CONFIG.AZURE_COMPLETIONS_API;
     const header = {
         'Content-Type': 'application/json',
diff --git a/src/agent/cohere.js b/src/agent/cohere.js
index 3c86a475..287e9a91 100644
--- a/src/agent/cohere.js
+++ b/src/agent/cohere.js
@@ -36,7 +36,7 @@ function renderCohereMessage(item) {
  * @returns {Promise}
  */
 export async function requestCompletionsFromCohereAI(params, context, onStream) {
-    const { message, prompt, history } = params;
+    const {message, prompt, history} = params;
     const url = `${context.USER_CONFIG.COHERE_API_BASE}/chat`;
     const header = {
         'Authorization': `Bearer ${context.USER_CONFIG.COHERE_API_KEY}`,
diff --git a/src/agent/gemini.js b/src/agent/gemini.js
index a06433b1..a330e86a 100644
--- a/src/agent/gemini.js
+++ b/src/agent/gemini.js
@@ -9,7 +9,7 @@ export function isGeminiAIEnable(context) {
 }
 
 
-const GEMINI_ROLE_MAP =  {
+const GEMINI_ROLE_MAP = {
     'assistant': 'model',
     'system': 'user',
     'user': 'user',
@@ -38,7 +38,7 @@ function renderGeminiMessage(item) {
  * @returns {Promise}
  */
 export async function requestCompletionsFromGeminiAI(params, context, onStream) {
-    const { message, prompt, history } = params;
+    const {message, prompt, history} = params;
     onStream = null; // 暂时不支持stream模式
     const url = `${context.USER_CONFIG.GOOGLE_COMPLETIONS_API}${context.USER_CONFIG.GOOGLE_COMPLETIONS_MODEL}:${
         onStream ? 'streamGenerateContent' : 'generateContent'
diff --git a/src/agent/llm.js b/src/agent/llm.js
index f3e575b7..8b29f3ad 100644
--- a/src/agent/llm.js
+++ b/src/agent/llm.js
@@ -95,7 +95,7 @@ async function loadHistory(key) {
 async function requestCompletionsFromLLM(params, context, llm, modifier, onStream) {
     const historyDisable = ENV.AUTO_TRIM_HISTORY && ENV.MAX_HISTORY_LENGTH <= 0;
     const historyKey = context.SHARE_CONTEXT.chatHistoryKey;
-    const { message, images } = params;
+    const {message, images} = params;
     let history = await loadHistory(historyKey);
     if (modifier) {
         const modifierData = modifier(history, message);
diff --git a/src/agent/openai.js b/src/agent/openai.js
index cdda2caa..ff4a0db3 100644
--- a/src/agent/openai.js
+++ b/src/agent/openai.js
@@ -62,7 +62,7 @@ export async function renderOpenAIMessage(item) {
  */
 export async function requestCompletionsFromOpenAI(params, context, onStream) {
 
-    const { message, images, prompt, history } = params;
+    const {message, images, prompt, history} = params;
     const url = `${context.USER_CONFIG.OPENAI_API_BASE}/chat/completions`;
     const header = {
         'Content-Type': 'application/json',
diff --git a/src/agent/request.js b/src/agent/request.js
index e518a169..732ebe1b 100644
--- a/src/agent/request.js
+++ b/src/agent/request.js
@@ -118,7 +118,7 @@ export async function requestChatCompletions(url, header, body, context, onStrea
                 lengthDelta += c.length;
                 contentFull = contentFull + c;
                 if (lengthDelta > updateStep) {
-                    if (ENV.TELEGRAM_MIN_STREAM_INTERVAL > 0 ){
+                    if (ENV.TELEGRAM_MIN_STREAM_INTERVAL > 0) {
                         const delta = Date.now() - lastUpdateTime;
                         if (delta < ENV.TELEGRAM_MIN_STREAM_INTERVAL) {
                             continue;
diff --git a/src/config/env.js b/src/config/env.js
index 3b870809..c262f581 100644
--- a/src/config/env.js
+++ b/src/config/env.js
@@ -141,7 +141,6 @@ class Environment {
     TELEGRAM_IMAGE_TRANSFER_MODE = 'url';
 
 
-
     // --  权限相关 --
     //
     // 允许所有人使用
diff --git a/src/telegram/message.js b/src/telegram/message.js
index 12f84aad..c52195e3 100644
--- a/src/telegram/message.js
+++ b/src/telegram/message.js
@@ -163,10 +163,10 @@ async function msgHandleGroupMessage(message, context) {
         throw new Error('Not set bot name');
     }
     if (!message.entities) {
-       throw new Error('No entities');
+        throw new Error('No entities');
     }
 
-    const { text, caption } = message;
+    const {text, caption} = message;
     let originContent = text || caption || '';
     if (!originContent) {
         throw new Error('Empty message');
@@ -242,7 +242,7 @@ async function msgHandleCommand(message, context) {
  * @returns {Promise}
  */
 async function msgChatWithLLM(message, context) {
-    const { text, caption } = message;
+    const {text, caption} = message;
     let content = text || caption;
     if (ENV.EXTRA_MESSAGE_CONTEXT && context.SHARE_CONTEXT.extraMessageContext && context.SHARE_CONTEXT.extraMessageContext.text) {
         content = context.SHARE_CONTEXT.extraMessageContext.text + '\n' + text;
@@ -250,7 +250,7 @@ async function msgChatWithLLM(message, context) {
     /**
      * @type {LlmRequestParams}
      */
-    const params = { message: content };
+    const params = {message: content};
     if (message.photo && message.photo.length > 0) {
         let sizeIndex = 0;
         // 仅在支持原生base64的环境下运行选择更高质量的图片防止workers中base64编码超时
diff --git a/src/types/context.js b/src/types/context.js
index 0409780d..311c3a0e 100644
--- a/src/types/context.js
+++ b/src/types/context.js
@@ -1,4 +1,3 @@
-
 /**
  * 用于保存用户配置
  * @typedef {object} UserConfigType
diff --git a/src/utils/utils.js b/src/utils/utils.js
index 9ca38567..57fcfe30 100644
--- a/src/utils/utils.js
+++ b/src/utils/utils.js
@@ -108,19 +108,19 @@ export function supportsNativeBase64() {
  * @returns {Promise}
  */
 async function urlToBase64String(url) {
-   try {
-       const { Buffer } = await import('node:buffer');
-       return fetch(url)
-           .then(resp  => resp.arrayBuffer())
-           .then(buffer => Buffer.from(buffer).toString('base64'));
-   } catch {
-       // 非原生base64编码速度太慢不适合在workers中使用
-       // 在wrangler.toml中添加 Node.js 选项启用nodejs兼容
-       // compatibility_flags = [ "nodejs_compat" ]
-       return fetch(url)
-         .then(resp  => resp.arrayBuffer())
-         .then(buffer => btoa(String.fromCharCode.apply(null, new Uint8Array(buffer))));
-   }
+    try {
+        const {Buffer} = await import('node:buffer');
+        return fetch(url)
+            .then(resp => resp.arrayBuffer())
+            .then(buffer => Buffer.from(buffer).toString('base64'));
+    } catch {
+        // 非原生base64编码速度太慢不适合在workers中使用
+        // 在wrangler.toml中添加 Node.js 选项启用nodejs兼容
+        // compatibility_flags = [ "nodejs_compat" ]
+        return fetch(url)
+            .then(resp => resp.arrayBuffer())
+            .then(buffer => btoa(String.fromCharCode.apply(null, new Uint8Array(buffer))));
+    }
 }
 
 /**
@@ -165,4 +165,4 @@ export async function imageToBase64String(url) {
  */
 export function renderBase64DataURI(params) {
     return `data:${params.format};base64,${params.data}`;
-}
\ No newline at end of file
+}
diff --git a/yarn.lock b/yarn.lock
index afecd589..315759a0 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -9,30 +9,35 @@
   dependencies:
     mime "^3.0.0"
 
-"@cloudflare/workerd-darwin-64@1.20240718.0":
-  version "1.20240718.0"
-  resolved "https://registry.yarnpkg.com/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20240718.0.tgz#46f438fb86ccd4772c29db52fe1d076bc9e6ffb4"
-  integrity sha512-BsPZcSCgoGnufog2GIgdPuiKicYTNyO/Dp++HbpLRH+yQdX3x4aWx83M+a0suTl1xv76dO4g9aw7SIB6OSgIyQ==
-
-"@cloudflare/workerd-darwin-arm64@1.20240718.0":
-  version "1.20240718.0"
-  resolved "https://registry.yarnpkg.com/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20240718.0.tgz#70e1dca5de4869ef3a9b9e296e934848bca6c74f"
-  integrity sha512-nlr4gaOO5gcJerILJQph3+2rnas/nx/lYsuaot1ntHu4LAPBoQo1q/Pucj2cSIav4UiMzTbDmoDwPlls4Kteog==
-
-"@cloudflare/workerd-linux-64@1.20240718.0":
-  version "1.20240718.0"
-  resolved "https://registry.yarnpkg.com/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20240718.0.tgz#802c04a1a5729f3881c675be3d158ee06c6b1a36"
-  integrity sha512-LJ/k3y47pBcjax0ee4K+6ZRrSsqWlfU4lbU8Dn6u5tSC9yzwI4YFNXDrKWInB0vd7RT3w4Yqq1S6ZEbfRrqVUg==
-
-"@cloudflare/workerd-linux-arm64@1.20240718.0":
-  version "1.20240718.0"
-  resolved "https://registry.yarnpkg.com/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20240718.0.tgz#cebff9115d48f8d0c2649fdf86ef46b726d1841f"
-  integrity sha512-zBEZvy88EcAMGRGfuVtS00Yl7lJdUM9sH7i651OoL+q0Plv9kphlCC0REQPwzxrEYT1qibSYtWcD9IxQGgx2/g==
-
-"@cloudflare/workerd-windows-64@1.20240718.0":
-  version "1.20240718.0"
-  resolved "https://registry.yarnpkg.com/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20240718.0.tgz#940893e62df7f5a8ec895572b834c95c1e256fbd"
-  integrity sha512-YpCRvvT47XanFum7C3SedOZKK6BfVhqmwdAAVAQFyc4gsCdegZo0JkUkdloC/jwuWlbCACOG2HTADHOqyeolzQ==
+"@cloudflare/workerd-darwin-64@1.20240725.0":
+  version "1.20240725.0"
+  resolved "https://registry.yarnpkg.com/@cloudflare/workerd-darwin-64/-/workerd-darwin-64-1.20240725.0.tgz#bb80bd26dde34cbda112a93ebc1086f8f125cf9d"
+  integrity sha512-KpE7eycdZ9ON+tKBuTyqZh8SdFWHGrh2Ru9LcbpeFwb7O9gDQv9ceSdoV/T598qlT0a0yVKM62R6xa5ec0UOWA==
+
+"@cloudflare/workerd-darwin-arm64@1.20240725.0":
+  version "1.20240725.0"
+  resolved "https://registry.yarnpkg.com/@cloudflare/workerd-darwin-arm64/-/workerd-darwin-arm64-1.20240725.0.tgz#308775be5a5448fdf4e2e773c2a4853f320a7dd4"
+  integrity sha512-/UQlI04FdXLpPlDzzsWGz8TuKrMZKLowTo+8PkxgEiWIaBhE4DIDM5bwj3jM4Bs8yOLiL2ovQNpld5CnAKqA8g==
+
+"@cloudflare/workerd-linux-64@1.20240725.0":
+  version "1.20240725.0"
+  resolved "https://registry.yarnpkg.com/@cloudflare/workerd-linux-64/-/workerd-linux-64-1.20240725.0.tgz#f22de097e7ef26d42d68077bb95a2343548c5b29"
+  integrity sha512-Z5t12qYLvHz0b3ZRBBm2HQ93RiHrAnjFfdhtjMcgJypAGkiWpOCEn2xar/WqDhMfqnk0sa8aYiYAbMAlP1WN6w==
+
+"@cloudflare/workerd-linux-arm64@1.20240725.0":
+  version "1.20240725.0"
+  resolved "https://registry.yarnpkg.com/@cloudflare/workerd-linux-arm64/-/workerd-linux-arm64-1.20240725.0.tgz#0fa0f476033ba10dc706f2d716ba7f9f75f184d0"
+  integrity sha512-j9gYXLOwOyNehLMzP7KxQ+Y6/nxcL9i6LTDJC6RChoaxLRbm0Y/9Otu+hfyzeNeRpt31ip6vqXZ1QQk6ygzI8A==
+
+"@cloudflare/workerd-windows-64@1.20240725.0":
+  version "1.20240725.0"
+  resolved "https://registry.yarnpkg.com/@cloudflare/workerd-windows-64/-/workerd-windows-64-1.20240725.0.tgz#f77ec905b28032c06a1789830aecda5ac5429635"
+  integrity sha512-fkrJLWNN6rrPjZ0eKJx328NVMo4BsainKxAfqaPMEd6uRwjOM8uN8V4sSLsXXP8GQMAx6hAG2hU86givS4GItg==
+
+"@cloudflare/workers-shared@0.1.0":
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/@cloudflare/workers-shared/-/workers-shared-0.1.0.tgz#1af7d2ffbb61ca27f37e4cbc6c4713adc7223900"
+  integrity sha512-SyD4iw6jM4anZaG+ujgVETV4fulF2KHBOW31eavbVN7TNpk2l4aJgwY1YSPK00IKSWsoQuH2TigR446KuT5lqQ==
 
 "@cspotcode/source-map-support@0.8.1":
   version "0.8.1"
@@ -63,116 +68,236 @@
     escape-string-regexp "^4.0.0"
     rollup-plugin-node-polyfills "^0.2.1"
 
+"@esbuild/aix-ppc64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.23.0.tgz#145b74d5e4a5223489cabdc238d8dad902df5259"
+  integrity sha512-3sG8Zwa5fMcA9bgqB8AfWPQ+HFke6uD3h1s3RIwUNK8EG7a4buxvuFTs3j1IMs2NXAk9F30C/FF4vxRgQCcmoQ==
+
 "@esbuild/android-arm64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.17.19.tgz#bafb75234a5d3d1b690e7c2956a599345e84a2fd"
   integrity sha512-KBMWvEZooR7+kzY0BtbTQn0OAYY7CsiydT63pVEaPtVYF0hXbUaOyZog37DKxK7NF3XacBJOpYT4adIJh+avxA==
 
+"@esbuild/android-arm64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.23.0.tgz#453bbe079fc8d364d4c5545069e8260228559832"
+  integrity sha512-EuHFUYkAVfU4qBdyivULuu03FhJO4IJN9PGuABGrFy4vUuzk91P2d+npxHcFdpUnfYKy0PuV+n6bKIpHOB3prQ==
+
 "@esbuild/android-arm@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.17.19.tgz#5898f7832c2298bc7d0ab53701c57beb74d78b4d"
   integrity sha512-rIKddzqhmav7MSmoFCmDIb6e2W57geRsM94gV2l38fzhXMwq7hZoClug9USI2pFRGL06f4IOPHHpFNOkWieR8A==
 
+"@esbuild/android-arm@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.23.0.tgz#26c806853aa4a4f7e683e519cd9d68e201ebcf99"
+  integrity sha512-+KuOHTKKyIKgEEqKbGTK8W7mPp+hKinbMBeEnNzjJGyFcWsfrXjSTNluJHCY1RqhxFurdD8uNXQDei7qDlR6+g==
+
 "@esbuild/android-x64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.17.19.tgz#658368ef92067866d95fb268719f98f363d13ae1"
   integrity sha512-uUTTc4xGNDT7YSArp/zbtmbhO0uEEK9/ETW29Wk1thYUJBz3IVnvgEiEwEa9IeLyvnpKrWK64Utw2bgUmDveww==
 
+"@esbuild/android-x64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.23.0.tgz#1e51af9a6ac1f7143769f7ee58df5b274ed202e6"
+  integrity sha512-WRrmKidLoKDl56LsbBMhzTTBxrsVwTKdNbKDalbEZr0tcsBgCLbEtoNthOW6PX942YiYq8HzEnb4yWQMLQuipQ==
+
 "@esbuild/darwin-arm64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.17.19.tgz#584c34c5991b95d4d48d333300b1a4e2ff7be276"
   integrity sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==
 
+"@esbuild/darwin-arm64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.23.0.tgz#d996187a606c9534173ebd78c58098a44dd7ef9e"
+  integrity sha512-YLntie/IdS31H54Ogdn+v50NuoWF5BDkEUFpiOChVa9UnKpftgwzZRrI4J132ETIi+D8n6xh9IviFV3eXdxfow==
+
 "@esbuild/darwin-x64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.17.19.tgz#7751d236dfe6ce136cce343dce69f52d76b7f6cb"
   integrity sha512-IJM4JJsLhRYr9xdtLytPLSH9k/oxR3boaUIYiHkAawtwNOXKE8KoU8tMvryogdcT8AU+Bflmh81Xn6Q0vTZbQw==
 
+"@esbuild/darwin-x64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.23.0.tgz#30c8f28a7ef4e32fe46501434ebe6b0912e9e86c"
+  integrity sha512-IMQ6eme4AfznElesHUPDZ+teuGwoRmVuuixu7sv92ZkdQcPbsNHzutd+rAfaBKo8YK3IrBEi9SLLKWJdEvJniQ==
+
 "@esbuild/freebsd-arm64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.17.19.tgz#cacd171665dd1d500f45c167d50c6b7e539d5fd2"
   integrity sha512-pBwbc7DufluUeGdjSU5Si+P3SoMF5DQ/F/UmTSb8HXO80ZEAJmrykPyzo1IfNbAoaqw48YRpv8shwd1NoI0jcQ==
 
+"@esbuild/freebsd-arm64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.23.0.tgz#30f4fcec8167c08a6e8af9fc14b66152232e7fb4"
+  integrity sha512-0muYWCng5vqaxobq6LB3YNtevDFSAZGlgtLoAc81PjUfiFz36n4KMpwhtAd4he8ToSI3TGyuhyx5xmiWNYZFyw==
+
 "@esbuild/freebsd-x64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.17.19.tgz#0769456eee2a08b8d925d7c00b79e861cb3162e4"
   integrity sha512-4lu+n8Wk0XlajEhbEffdy2xy53dpR06SlzvhGByyg36qJw6Kpfk7cp45DR/62aPH9mtJRmIyrXAS5UWBrJT6TQ==
 
+"@esbuild/freebsd-x64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.23.0.tgz#1003a6668fe1f5d4439e6813e5b09a92981bc79d"
+  integrity sha512-XKDVu8IsD0/q3foBzsXGt/KjD/yTKBCIwOHE1XwiXmrRwrX6Hbnd5Eqn/WvDekddK21tfszBSrE/WMaZh+1buQ==
+
 "@esbuild/linux-arm64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.17.19.tgz#38e162ecb723862c6be1c27d6389f48960b68edb"
   integrity sha512-ct1Tg3WGwd3P+oZYqic+YZF4snNl2bsnMKRkb3ozHmnM0dGWuxcPTTntAF6bOP0Sp4x0PjSF+4uHQ1xvxfRKqg==
 
+"@esbuild/linux-arm64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.23.0.tgz#3b9a56abfb1410bb6c9138790f062587df3e6e3a"
+  integrity sha512-j1t5iG8jE7BhonbsEg5d9qOYcVZv/Rv6tghaXM/Ug9xahM0nX/H2gfu6X6z11QRTMT6+aywOMA8TDkhPo8aCGw==
+
 "@esbuild/linux-arm@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.17.19.tgz#1a2cd399c50040184a805174a6d89097d9d1559a"
   integrity sha512-cdmT3KxjlOQ/gZ2cjfrQOtmhG4HJs6hhvm3mWSRDPtZ/lP5oe8FWceS10JaSJC13GBd4eH/haHnqf7hhGNLerA==
 
+"@esbuild/linux-arm@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.23.0.tgz#237a8548e3da2c48cd79ae339a588f03d1889aad"
+  integrity sha512-SEELSTEtOFu5LPykzA395Mc+54RMg1EUgXP+iw2SJ72+ooMwVsgfuwXo5Fn0wXNgWZsTVHwY2cg4Vi/bOD88qw==
+
 "@esbuild/linux-ia32@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.17.19.tgz#e28c25266b036ce1cabca3c30155222841dc035a"
   integrity sha512-w4IRhSy1VbsNxHRQpeGCHEmibqdTUx61Vc38APcsRbuVgK0OPEnQ0YD39Brymn96mOx48Y2laBQGqgZ0j9w6SQ==
 
+"@esbuild/linux-ia32@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.23.0.tgz#4269cd19cb2de5de03a7ccfc8855dde3d284a238"
+  integrity sha512-P7O5Tkh2NbgIm2R6x1zGJJsnacDzTFcRWZyTTMgFdVit6E98LTxO+v8LCCLWRvPrjdzXHx9FEOA8oAZPyApWUA==
+
 "@esbuild/linux-loong64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.17.19.tgz#0f887b8bb3f90658d1a0117283e55dbd4c9dcf72"
   integrity sha512-2iAngUbBPMq439a+z//gE+9WBldoMp1s5GWsUSgqHLzLJ9WoZLZhpwWuym0u0u/4XmZ3gpHmzV84PonE+9IIdQ==
 
+"@esbuild/linux-loong64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.23.0.tgz#82b568f5658a52580827cc891cb69d2cb4f86280"
+  integrity sha512-InQwepswq6urikQiIC/kkx412fqUZudBO4SYKu0N+tGhXRWUqAx+Q+341tFV6QdBifpjYgUndV1hhMq3WeJi7A==
+
 "@esbuild/linux-mips64el@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.17.19.tgz#f5d2a0b8047ea9a5d9f592a178ea054053a70289"
   integrity sha512-LKJltc4LVdMKHsrFe4MGNPp0hqDFA1Wpt3jE1gEyM3nKUvOiO//9PheZZHfYRfYl6AwdTH4aTcXSqBerX0ml4A==
 
+"@esbuild/linux-mips64el@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.23.0.tgz#9a57386c926262ae9861c929a6023ed9d43f73e5"
+  integrity sha512-J9rflLtqdYrxHv2FqXE2i1ELgNjT+JFURt/uDMoPQLcjWQA5wDKgQA4t/dTqGa88ZVECKaD0TctwsUfHbVoi4w==
+
 "@esbuild/linux-ppc64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.17.19.tgz#876590e3acbd9fa7f57a2c7d86f83717dbbac8c7"
   integrity sha512-/c/DGybs95WXNS8y3Ti/ytqETiW7EU44MEKuCAcpPto3YjQbyK3IQVKfF6nbghD7EcLUGl0NbiL5Rt5DMhn5tg==
 
+"@esbuild/linux-ppc64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.23.0.tgz#f3a79fd636ba0c82285d227eb20ed8e31b4444f6"
+  integrity sha512-cShCXtEOVc5GxU0fM+dsFD10qZ5UpcQ8AM22bYj0u/yaAykWnqXJDpd77ublcX6vdDsWLuweeuSNZk4yUxZwtw==
+
 "@esbuild/linux-riscv64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.17.19.tgz#7f49373df463cd9f41dc34f9b2262d771688bf09"
   integrity sha512-FC3nUAWhvFoutlhAkgHf8f5HwFWUL6bYdvLc/TTuxKlvLi3+pPzdZiFKSWz/PF30TB1K19SuCxDTI5KcqASJqA==
 
+"@esbuild/linux-riscv64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.23.0.tgz#f9d2ef8356ce6ce140f76029680558126b74c780"
+  integrity sha512-HEtaN7Y5UB4tZPeQmgz/UhzoEyYftbMXrBCUjINGjh3uil+rB/QzzpMshz3cNUxqXN7Vr93zzVtpIDL99t9aRw==
+
 "@esbuild/linux-s390x@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.17.19.tgz#e2afd1afcaf63afe2c7d9ceacd28ec57c77f8829"
   integrity sha512-IbFsFbxMWLuKEbH+7sTkKzL6NJmG2vRyy6K7JJo55w+8xDk7RElYn6xvXtDW8HCfoKBFK69f3pgBJSUSQPr+4Q==
 
+"@esbuild/linux-s390x@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.23.0.tgz#45390f12e802201f38a0229e216a6aed4351dfe8"
+  integrity sha512-WDi3+NVAuyjg/Wxi+o5KPqRbZY0QhI9TjrEEm+8dmpY9Xir8+HE/HNx2JoLckhKbFopW0RdO2D72w8trZOV+Wg==
+
 "@esbuild/linux-x64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.17.19.tgz#8a0e9738b1635f0c53389e515ae83826dec22aa4"
   integrity sha512-68ngA9lg2H6zkZcyp22tsVt38mlhWde8l3eJLWkyLrp4HwMUr3c1s/M2t7+kHIhvMjglIBrFpncX1SzMckomGw==
 
+"@esbuild/linux-x64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.23.0.tgz#c8409761996e3f6db29abcf9b05bee8d7d80e910"
+  integrity sha512-a3pMQhUEJkITgAw6e0bWA+F+vFtCciMjW/LPtoj99MhVt+Mfb6bbL9hu2wmTZgNd994qTAEw+U/r6k3qHWWaOQ==
+
 "@esbuild/netbsd-x64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.17.19.tgz#c29fb2453c6b7ddef9a35e2c18b37bda1ae5c462"
   integrity sha512-CwFq42rXCR8TYIjIfpXCbRX0rp1jo6cPIUPSaWwzbVI4aOfX96OXY8M6KNmtPcg7QjYeDmN+DD0Wp3LaBOLf4Q==
 
+"@esbuild/netbsd-x64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.23.0.tgz#ba70db0114380d5f6cfb9003f1d378ce989cd65c"
+  integrity sha512-cRK+YDem7lFTs2Q5nEv/HHc4LnrfBCbH5+JHu6wm2eP+d8OZNoSMYgPZJq78vqQ9g+9+nMuIsAO7skzphRXHyw==
+
+"@esbuild/openbsd-arm64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/openbsd-arm64/-/openbsd-arm64-0.23.0.tgz#72fc55f0b189f7a882e3cf23f332370d69dfd5db"
+  integrity sha512-suXjq53gERueVWu0OKxzWqk7NxiUWSUlrxoZK7usiF50C6ipColGR5qie2496iKGYNLhDZkPxBI3erbnYkU0rQ==
+
 "@esbuild/openbsd-x64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.17.19.tgz#95e75a391403cb10297280d524d66ce04c920691"
   integrity sha512-cnq5brJYrSZ2CF6c35eCmviIN3k3RczmHz8eYaVlNasVqsNY+JKohZU5MKmaOI+KkllCdzOKKdPs762VCPC20g==
 
+"@esbuild/openbsd-x64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.23.0.tgz#b6ae7a0911c18fe30da3db1d6d17a497a550e5d8"
+  integrity sha512-6p3nHpby0DM/v15IFKMjAaayFhqnXV52aEmv1whZHX56pdkK+MEaLoQWj+H42ssFarP1PcomVhbsR4pkz09qBg==
+
 "@esbuild/sunos-x64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.17.19.tgz#722eaf057b83c2575937d3ffe5aeb16540da7273"
   integrity sha512-vCRT7yP3zX+bKWFeP/zdS6SqdWB8OIpaRq/mbXQxTGHnIxspRtigpkUcDMlSCOejlHowLqII7K2JKevwyRP2rg==
 
+"@esbuild/sunos-x64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.23.0.tgz#58f0d5e55b9b21a086bfafaa29f62a3eb3470ad8"
+  integrity sha512-BFelBGfrBwk6LVrmFzCq1u1dZbG4zy/Kp93w2+y83Q5UGYF1d8sCzeLI9NXjKyujjBBniQa8R8PzLFAUrSM9OA==
+
 "@esbuild/win32-arm64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.17.19.tgz#9aa9dc074399288bdcdd283443e9aeb6b9552b6f"
   integrity sha512-yYx+8jwowUstVdorcMdNlzklLYhPxjniHWFKgRqH7IFlUEa0Umu3KuYplf1HUZZ422e3NU9F4LGb+4O0Kdcaag==
 
+"@esbuild/win32-arm64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.23.0.tgz#b858b2432edfad62e945d5c7c9e5ddd0f528ca6d"
+  integrity sha512-lY6AC8p4Cnb7xYHuIxQ6iYPe6MfO2CC43XXKo9nBXDb35krYt7KGhQnOkRGar5psxYkircpCqfbNDB4uJbS2jQ==
+
 "@esbuild/win32-ia32@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.17.19.tgz#95ad43c62ad62485e210f6299c7b2571e48d2b03"
   integrity sha512-eggDKanJszUtCdlVs0RB+h35wNlb5v4TWEkq4vZcmVt5u/HiDZrTXe2bWFQUez3RgNHwx/x4sk5++4NSSicKkw==
 
+"@esbuild/win32-ia32@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.23.0.tgz#167ef6ca22a476c6c0c014a58b4f43ae4b80dec7"
+  integrity sha512-7L1bHlOTcO4ByvI7OXVI5pNN6HSu6pUQq9yodga8izeuB1KcT2UkHaH6118QJwopExPn0rMHIseCTx1CRo/uNA==
+
 "@esbuild/win32-x64@0.17.19":
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.17.19.tgz#8cfaf2ff603e9aabb910e9c0558c26cf32744061"
   integrity sha512-lAhycmKnVOuRYNtRtatQR1LPQf2oYCkRGkSFnseDAKPl8lu5SOsK/e1sXe5a0Pc5kHIHe6P2I/ilntNv2xf3cA==
 
+"@esbuild/win32-x64@0.23.0":
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.23.0.tgz#db44a6a08520b5f25bbe409f34a59f2d4bcc7ced"
+  integrity sha512-Arm+WgUFLUATuoxCJcahGuk6Yj9Pzxd6l11Zb/2aAuv5kWWvvfhLFo2fni4uSK5vzlUdCGZ/BdV5tH8klj8p8g==
+
 "@eslint-community/eslint-utils@^4.2.0":
   version "4.4.0"
   resolved "https://registry.yarnpkg.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz#a23514e8fb9af1269d5f7788aa556798d61c6b59"
@@ -185,10 +310,10 @@
   resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.11.0.tgz#b0ffd0312b4a3fd2d6f77237e7248a5ad3a680ae"
   integrity sha512-G/M/tIiMrTAxEWRfLfQJMmGNX28IxBg4PBz8XqQhqUHLFI6TL2htpIB1iQCj144V5ee/JaKyT9/WZ0MGZWfA7A==
 
-"@eslint/config-array@^0.17.0":
-  version "0.17.0"
-  resolved "https://registry.yarnpkg.com/@eslint/config-array/-/config-array-0.17.0.tgz#ff305e1ee618a00e6e5d0485454c8d92d94a860d"
-  integrity sha512-A68TBu6/1mHHuc5YJL0U0VVeGNiklLAL6rRmhTCP2B5XjWLMnrX+HkO+IAXyHvks5cyyY1jjK5ITPQ1HGS2EVA==
+"@eslint/config-array@^0.17.1":
+  version "0.17.1"
+  resolved "https://registry.yarnpkg.com/@eslint/config-array/-/config-array-0.17.1.tgz#d9b8b8b6b946f47388f32bedfd3adf29ca8f8910"
+  integrity sha512-BlYOpej8AQ8Ev9xVqroV7a02JK3SkBAaN9GfMMH9W6Ch8FlQlkjGw4Ir7+FgYwfirivAf4t+GtzuAxqfukmISA==
   dependencies:
     "@eslint/object-schema" "^2.1.4"
     debug "^4.3.1"
@@ -209,10 +334,10 @@
     minimatch "^3.1.2"
     strip-json-comments "^3.1.1"
 
-"@eslint/js@9.7.0", "@eslint/js@^9.1.1":
-  version "9.7.0"
-  resolved "https://registry.yarnpkg.com/@eslint/js/-/js-9.7.0.tgz#b712d802582f02b11cfdf83a85040a296afec3f0"
-  integrity sha512-ChuWDQenef8OSFnvuxv0TCVxEwmu3+hPNKvM9B34qpM0rDRbjL8t5QkQeHHeAfsKQjuH9wS82WeCi1J/owatng==
+"@eslint/js@9.8.0", "@eslint/js@^9.8.0":
+  version "9.8.0"
+  resolved "https://registry.yarnpkg.com/@eslint/js/-/js-9.8.0.tgz#ae9bc14bb839713c5056f5018bcefa955556d3a4"
+  integrity sha512-MfluB7EUfxXtv3i/++oh89uzAr4PDI4nn201hsp+qaXqsjAWzinlZEHEfPgAX4doIlKvPG/i0A9dpKxOLII8yA==
 
 "@eslint/object-schema@^2.1.4":
   version "2.1.4"
@@ -502,7 +627,7 @@ es-module-lexer@^1.5.3:
   resolved "https://registry.yarnpkg.com/es-module-lexer/-/es-module-lexer-1.5.4.tgz#a8efec3a3da991e60efa6b633a7cad6ab8d26b78"
   integrity sha512-MVNK56NiMrOwitFB7cqDwq0CQutbw+0BvLshJSse0MUNU+y1FC3bUS/AQg7oUng+/wKrrki7JfmwtVHkVfPLlw==
 
-esbuild@0.17.19, esbuild@^0.17.11:
+esbuild@0.17.19:
   version "0.17.19"
   resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.17.19.tgz#087a727e98299f0462a3d0bcdd9cd7ff100bd955"
   integrity sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==
@@ -530,15 +655,45 @@ esbuild@0.17.19, esbuild@^0.17.11:
     "@esbuild/win32-ia32" "0.17.19"
     "@esbuild/win32-x64" "0.17.19"
 
+esbuild@^0.23.0:
+  version "0.23.0"
+  resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.23.0.tgz#de06002d48424d9fdb7eb52dbe8e95927f852599"
+  integrity sha512-1lvV17H2bMYda/WaFb2jLPeHU3zml2k4/yagNMG8Q/YtfMjCwEUZa2eXXMgZTVSL5q1n4H7sQ0X6CdJDqqeCFA==
+  optionalDependencies:
+    "@esbuild/aix-ppc64" "0.23.0"
+    "@esbuild/android-arm" "0.23.0"
+    "@esbuild/android-arm64" "0.23.0"
+    "@esbuild/android-x64" "0.23.0"
+    "@esbuild/darwin-arm64" "0.23.0"
+    "@esbuild/darwin-x64" "0.23.0"
+    "@esbuild/freebsd-arm64" "0.23.0"
+    "@esbuild/freebsd-x64" "0.23.0"
+    "@esbuild/linux-arm" "0.23.0"
+    "@esbuild/linux-arm64" "0.23.0"
+    "@esbuild/linux-ia32" "0.23.0"
+    "@esbuild/linux-loong64" "0.23.0"
+    "@esbuild/linux-mips64el" "0.23.0"
+    "@esbuild/linux-ppc64" "0.23.0"
+    "@esbuild/linux-riscv64" "0.23.0"
+    "@esbuild/linux-s390x" "0.23.0"
+    "@esbuild/linux-x64" "0.23.0"
+    "@esbuild/netbsd-x64" "0.23.0"
+    "@esbuild/openbsd-arm64" "0.23.0"
+    "@esbuild/openbsd-x64" "0.23.0"
+    "@esbuild/sunos-x64" "0.23.0"
+    "@esbuild/win32-arm64" "0.23.0"
+    "@esbuild/win32-ia32" "0.23.0"
+    "@esbuild/win32-x64" "0.23.0"
+
 escape-string-regexp@^4.0.0:
   version "4.0.0"
   resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34"
   integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==
 
-eslint-plugin-jsdoc@^49.0.0:
-  version "49.0.0"
-  resolved "https://registry.yarnpkg.com/eslint-plugin-jsdoc/-/eslint-plugin-jsdoc-49.0.0.tgz#8520227f30cdd4ce20e321979acf7fa68107a503"
-  integrity sha512-MlLQpFmann7YkTpjJfsWiSSG877RrijLfg5A1lwfl2FAFTM343Kd+HRt/cnLG+rjypSwaWVqgEgtBTIovnS6qA==
+eslint-plugin-jsdoc@^50.0.0:
+  version "50.0.0"
+  resolved "https://registry.yarnpkg.com/eslint-plugin-jsdoc/-/eslint-plugin-jsdoc-50.0.0.tgz#0d064e14e1a8a3624c0474359fc51325b38b0fc9"
+  integrity sha512-czyJ5F7/qY2LIhUD5Bl6q1CCZ8mjvfEA9HQN5nvIp/Pb8VLIlUNd+DMZdA2OKN74QQMS3pobC06hFqAOJyOv5Q==
   dependencies:
     "@es-joy/jsdoccomment" "~0.46.0"
     are-docs-informative "^0.0.2"
@@ -570,16 +725,16 @@ eslint-visitor-keys@^4.0.0:
   resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-4.0.0.tgz#e3adc021aa038a2a8e0b2f8b0ce8f66b9483b1fb"
   integrity sha512-OtIRv/2GyiF6o/d8K7MYKKbXrOUBIK6SfkIRM4Z0dY3w+LiQ0vy3F57m0Z71bjbyeiWFiHJ8brqnmE6H6/jEuw==
 
-eslint@^9.1.1:
-  version "9.7.0"
-  resolved "https://registry.yarnpkg.com/eslint/-/eslint-9.7.0.tgz#bedb48e1cdc2362a0caaa106a4c6ed943e8b09e4"
-  integrity sha512-FzJ9D/0nGiCGBf8UXO/IGLTgLVzIxze1zpfA8Ton2mjLovXdAPlYDv+MQDcqj3TmrhAGYfOpz9RfR+ent0AgAw==
+eslint@^9.8.0:
+  version "9.8.0"
+  resolved "https://registry.yarnpkg.com/eslint/-/eslint-9.8.0.tgz#a4f4a090c8ea2d10864d89a6603e02ce9f649f0f"
+  integrity sha512-K8qnZ/QJzT2dLKdZJVX6W4XOwBzutMYmt0lqUS+JdXgd+HTYFlonFgkJ8s44d/zMPPCnOOk0kMWCApCPhiOy9A==
   dependencies:
     "@eslint-community/eslint-utils" "^4.2.0"
     "@eslint-community/regexpp" "^4.11.0"
-    "@eslint/config-array" "^0.17.0"
+    "@eslint/config-array" "^0.17.1"
     "@eslint/eslintrc" "^3.1.0"
-    "@eslint/js" "9.7.0"
+    "@eslint/js" "9.8.0"
     "@humanwhocodes/module-importer" "^1.0.1"
     "@humanwhocodes/retry" "^0.3.0"
     "@nodelib/fs.walk" "^1.2.8"
@@ -752,11 +907,6 @@ globals@^14.0.0:
   resolved "https://registry.yarnpkg.com/globals/-/globals-14.0.0.tgz#898d7413c29babcf6bafe56fcadded858ada724e"
   integrity sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==
 
-globals@^15.1.0:
-  version "15.8.0"
-  resolved "https://registry.yarnpkg.com/globals/-/globals-15.8.0.tgz#e64bb47b619dd8cbf32b3c1a0a61714e33cbbb41"
-  integrity sha512-VZAJ4cewHTExBWDHR6yptdIBlx9YSSZuwojj9Nt5mBRXQzrKakDsVKQ1J63sklLvzAJm0X5+RpO4i3Y2hcOnFw==
-
 has-flag@^4.0.0:
   version "4.0.0"
   resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b"
@@ -894,10 +1044,10 @@ mime@^3.0.0:
   resolved "https://registry.yarnpkg.com/mime/-/mime-3.0.0.tgz#b374550dca3a0c18443b0c950a6a58f1931cf7a7"
   integrity sha512-jSCU7/VB1loIWBZe14aEYHU/+1UMEHoaO7qxCOVJOw9GgH72VAWppxNcjU+x9a2k3GSIBXNKxXQFqRvvZ7vr3A==
 
-miniflare@3.20240718.0:
-  version "3.20240718.0"
-  resolved "https://registry.yarnpkg.com/miniflare/-/miniflare-3.20240718.0.tgz#41561c6620b2b15803f5b3d2e903ed3af40f3b0b"
-  integrity sha512-TKgSeyqPBeT8TBLxbDJOKPWlq/wydoJRHjAyDdgxbw59N6wbP8JucK6AU1vXCfu21eKhrEin77ssXOpbfekzPA==
+miniflare@3.20240725.0:
+  version "3.20240725.0"
+  resolved "https://registry.yarnpkg.com/miniflare/-/miniflare-3.20240725.0.tgz#8d4683a4d0f8de260514a0df8d83da558eebec5c"
+  integrity sha512-n9NTLI8J9Xt0Cls6dRpqoIPkVFnxD9gMnU/qDkDX9diKfN16HyxpAdA5mto/hKuRpjW19TxnTMcxBo90vZXemw==
   dependencies:
     "@cspotcode/source-map-support" "0.8.1"
     acorn "^8.8.0"
@@ -907,7 +1057,7 @@ miniflare@3.20240718.0:
     glob-to-regexp "^0.4.1"
     stoppable "^1.1.0"
     undici "^5.28.4"
-    workerd "1.20240718.0"
+    workerd "1.20240725.0"
     ws "^8.17.1"
     youch "^3.2.2"
     zod "^3.22.3"
@@ -1281,30 +1431,31 @@ word-wrap@^1.2.5:
   resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.5.tgz#d2c45c6dd4fbce621a66f136cbe328afd0410b34"
   integrity sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==
 
-workerd@1.20240718.0:
-  version "1.20240718.0"
-  resolved "https://registry.yarnpkg.com/workerd/-/workerd-1.20240718.0.tgz#7a397d0a159f7362dc3f7b19472190a858d96f7c"
-  integrity sha512-w7lOLRy0XecQTg/ujTLWBiJJuoQvzB3CdQ6/8Wgex3QxFhV9Pbnh3UbwIuUfMw3OCCPQc4o7y+1P+mISAgp6yg==
+workerd@1.20240725.0:
+  version "1.20240725.0"
+  resolved "https://registry.yarnpkg.com/workerd/-/workerd-1.20240725.0.tgz#df08269651c817548b0e503ae71535cd535ee1a3"
+  integrity sha512-VZwgejRcHsQ9FEPtc7v25ebINLAR+stL3q1hC1xByE+quskdoWpTXHkZwZ3IdSgvm9vPVbCbJw9p5mGnDByW2A==
   optionalDependencies:
-    "@cloudflare/workerd-darwin-64" "1.20240718.0"
-    "@cloudflare/workerd-darwin-arm64" "1.20240718.0"
-    "@cloudflare/workerd-linux-64" "1.20240718.0"
-    "@cloudflare/workerd-linux-arm64" "1.20240718.0"
-    "@cloudflare/workerd-windows-64" "1.20240718.0"
+    "@cloudflare/workerd-darwin-64" "1.20240725.0"
+    "@cloudflare/workerd-darwin-arm64" "1.20240725.0"
+    "@cloudflare/workerd-linux-64" "1.20240725.0"
+    "@cloudflare/workerd-linux-arm64" "1.20240725.0"
+    "@cloudflare/workerd-windows-64" "1.20240725.0"
 
-wrangler@^3.0.0:
-  version "3.65.1"
-  resolved "https://registry.yarnpkg.com/wrangler/-/wrangler-3.65.1.tgz#493bd92b504f9f056cd57bbe2d430797600c914b"
-  integrity sha512-Z5NyrbpGMQCpim/6VnI1im0/Weh5+CU1sdep1JbfFxHjn/Jt9K+MeUq+kCns5ubkkdRx2EYsusB/JKyX2JdJ4w==
+wrangler@^3.69.1:
+  version "3.69.1"
+  resolved "https://registry.yarnpkg.com/wrangler/-/wrangler-3.69.1.tgz#4a89b3c5e407a839cd6c26de861bfdbc9d1b9cf9"
+  integrity sha512-lqgPsaxIP564OJE6f7RIS/iLy+WaY0EN89p2g83nkrPN6PjuC6vB3eC7jgeVZO1ntWjD0X+mEU5ggbERHr899w==
   dependencies:
     "@cloudflare/kv-asset-handler" "0.3.4"
+    "@cloudflare/workers-shared" "0.1.0"
     "@esbuild-plugins/node-globals-polyfill" "^0.2.3"
     "@esbuild-plugins/node-modules-polyfill" "^0.2.2"
     blake3-wasm "^2.1.5"
     chokidar "^3.5.3"
     date-fns "^3.6.0"
     esbuild "0.17.19"
-    miniflare "3.20240718.0"
+    miniflare "3.20240725.0"
     nanoid "^3.3.3"
     path-to-regexp "^6.2.0"
     resolve "^1.22.8"
@@ -1312,6 +1463,7 @@ wrangler@^3.0.0:
     selfsigned "^2.0.1"
     source-map "^0.6.1"
     unenv "npm:unenv-nightly@1.10.0-1717606461.a117952"
+    workerd "1.20240725.0"
     xxhash-wasm "^1.0.1"
   optionalDependencies:
     fsevents "~2.3.2"

From 8f81453b973f5ebac257c7c2ce77b8253e048536 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Thu, 8 Aug 2024 15:20:04 +0800
Subject: [PATCH 21/24] =?UTF-8?q?fix:=20Anthropic=E5=88=A0=E9=99=A4max=5Ft?=
 =?UTF-8?q?okens=E5=8F=82=E6=95=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 src/agent/anthropic.js | 1 -
 1 file changed, 1 deletion(-)

diff --git a/src/agent/anthropic.js b/src/agent/anthropic.js
index adee2761..207d23bd 100644
--- a/src/agent/anthropic.js
+++ b/src/agent/anthropic.js
@@ -62,7 +62,6 @@ export async function requestCompletionsFromAnthropicAI(params, context, onStrea
         model: context.USER_CONFIG.ANTHROPIC_CHAT_MODEL,
         messages: await Promise.all(messages.map(renderAnthropicMessage)),
         stream: onStream != null,
-        max_tokens: ENV.MAX_TOKEN_LENGTH,
     };
     if (!body.system) {
         delete body.system;

From d7c985dcb11d2d9b66762e24408d22a68461ac95 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Thu, 8 Aug 2024 15:25:18 +0800
Subject: [PATCH 22/24] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0Telegraph?=
 =?UTF-8?q?=E5=9B=BE=E5=BA=8A=E6=94=AF=E6=8C=81?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 dist/buildinfo.json     |   2 +-
 dist/index.js           | 209 ++++++++++++++++++++++++++--------------
 dist/timestamp          |   2 +-
 src/agent/anthropic.js  |   3 +-
 src/agent/openai.js     |   3 +-
 src/config/env.js       |   2 +
 src/telegram/message.js |   6 +-
 src/utils/cache.js      |  36 +++++++
 src/utils/image.js      | 103 ++++++++++++++++++++
 src/utils/utils.js      |  70 --------------
 10 files changed, 290 insertions(+), 146 deletions(-)
 create mode 100644 src/utils/cache.js
 create mode 100644 src/utils/image.js

diff --git a/dist/buildinfo.json b/dist/buildinfo.json
index 27a722fc..9b48cd81 100644
--- a/dist/buildinfo.json
+++ b/dist/buildinfo.json
@@ -1 +1 @@
-{"sha":"1fb3fbc","timestamp":1723096985}
\ No newline at end of file
+{"sha":"8f81453","timestamp":1723101877}
\ No newline at end of file
diff --git a/dist/index.js b/dist/index.js
index c9ca540d..ff27d0bb 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -89,9 +89,9 @@ var Environment = class {
   // -- 版本数据 --
   //
   // 当前版本
-  BUILD_TIMESTAMP = 1723096985;
+  BUILD_TIMESTAMP = 1723101877;
   // 当前版本 commit id
-  BUILD_VERSION = "1fb3fbc";
+  BUILD_VERSION = "8f81453";
   // -- 基础配置 --
   /**
    * @type {I18n | null}
@@ -161,6 +161,8 @@ var Environment = class {
   SHOW_REPLY_BUTTON = false;
   // 而外引用消息开关
   EXTRA_MESSAGE_CONTEXT = false;
+  // 开启Telegraph图床
+  TELEGRAPH_ENABLE = false;
   // -- 模式开关 --
   //
   // 使用流模式
@@ -1052,82 +1054,74 @@ ERROR: ${e.message}`;
   }
 }
 
-// src/utils/utils.js
-function renderHTML(body) {
-  return `
-  
-  
-    ChatGPT-Telegram-Workers
-    
-    
-    
-    
-    
-  
-  
-    ${body}
-  
-
-  `;
-}
-function errorToString(e) {
-  return JSON.stringify({
-    message: e.message,
-    stack: e.stack
+    }
+  }
+};
+
+// src/utils/image.js
+var IMAGE_CACHE = new Cache();
+async function fetchImage(url) {
+  if (IMAGE_CACHE[url]) {
+    return IMAGE_CACHE.get(url);
+  }
+  return fetch(url).then((resp) => resp.arrayBuffer()).then((blob) => {
+    IMAGE_CACHE.set(url, blob);
+    return blob;
   });
 }
-async function makeResponse200(resp) {
-  if (resp === null) {
-    return new Response("NOT HANDLED", { status: 200 });
-  }
-  if (resp.status === 200) {
-    return resp;
-  } else {
-    return new Response(resp.body, {
-      status: 200,
-      headers: {
-        "Original-Status": resp.status,
-        ...resp.headers
-      }
-    });
+async function uploadImageToTelegraph(url) {
+  if (url.startsWith("https://telegra.ph")) {
+    return url;
   }
+  const raw = await fetch(url).then((resp2) => resp2.arrayBuffer());
+  const formData = new FormData();
+  formData.append("file", new Blob([raw]), "blob");
+  const resp = await fetch("https://telegra.ph/upload", {
+    method: "POST",
+    body: formData
+  });
+  let [{ src }] = await resp.json();
+  src = `https://telegra.ph${src}`;
+  IMAGE_CACHE.set(url, raw);
+  return src;
 }
 async function urlToBase64String(url) {
   try {
     const { Buffer: Buffer2 } = await import("node:buffer");
-    return fetch(url).then((resp) => resp.arrayBuffer()).then((buffer) => Buffer2.from(buffer).toString("base64"));
+    return fetchImage(url).then((buffer) => Buffer2.from(buffer).toString("base64"));
   } catch {
-    return fetch(url).then((resp) => resp.arrayBuffer()).then((buffer) => btoa(String.fromCharCode.apply(null, new Uint8Array(buffer))));
+    return fetchImage(url).then((buffer) => btoa(String.fromCharCode.apply(null, new Uint8Array(buffer))));
   }
 }
 function getImageFormatFromBase64(base64String) {
@@ -1458,8 +1452,7 @@ async function requestCompletionsFromAnthropicAI(params, context, onStream) {
     system: prompt,
     model: context.USER_CONFIG.ANTHROPIC_CHAT_MODEL,
     messages: await Promise.all(messages.map(renderAnthropicMessage)),
-    stream: onStream != null,
-    max_tokens: ENV.MAX_TOKEN_LENGTH
+    stream: onStream != null
   };
   if (!body.system) {
     delete body.system;
@@ -2226,6 +2219,77 @@ function commandsDocument() {
   });
 }
 
+// src/utils/utils.js
+function renderHTML(body) {
+  return `
+  
+  
+    ChatGPT-Telegram-Workers
+    
+    
+    
+    
+    
+  
+  
+    ${body}
+  
+
+  `;
+}
+function errorToString(e) {
+  return JSON.stringify({
+    message: e.message,
+    stack: e.stack
+  });
+}
+async function makeResponse200(resp) {
+  if (resp === null) {
+    return new Response("NOT HANDLED", { status: 200 });
+  }
+  if (resp.status === 200) {
+    return resp;
+  } else {
+    return new Response(resp.body, {
+      status: 200,
+      headers: {
+        "Original-Status": resp.status,
+        ...resp.headers
+      }
+    });
+  }
+}
+
 // src/telegram/message.js
 async function msgInitChatContext(message, context) {
   await context.initContext(message);
@@ -2392,7 +2456,10 @@ async function msgChatWithLLM(message, context) {
     }
     sizeIndex = Math.max(0, Math.min(sizeIndex, message.photo.length - 1));
     const fileId = message.photo[sizeIndex].file_id;
-    const url = await getFileLink(fileId, context.SHARE_CONTEXT.currentBotToken);
+    let url = await getFileLink(fileId, context.SHARE_CONTEXT.currentBotToken);
+    if (ENV.TELEGRAPH_ENABLE) {
+      url = await uploadImageToTelegraph(url);
+    }
     params.images = [url];
   }
   return chatWithLLM(params, context, null);
diff --git a/dist/timestamp b/dist/timestamp
index 00f7c7e9..87e96339 100644
--- a/dist/timestamp
+++ b/dist/timestamp
@@ -1 +1 @@
-1723096985
\ No newline at end of file
+1723101877
\ No newline at end of file
diff --git a/src/agent/anthropic.js b/src/agent/anthropic.js
index 207d23bd..0c898c3d 100644
--- a/src/agent/anthropic.js
+++ b/src/agent/anthropic.js
@@ -3,7 +3,8 @@ import "../types/agent.js";
 import {anthropicSseJsonParser, Stream} from "./stream.js";
 import {ENV} from "../config/env.js";
 import {requestChatCompletions} from "./request.js";
-import {imageToBase64String} from "../utils/utils.js";
+
+import {imageToBase64String} from "../utils/image.js";
 
 
 /**
diff --git a/src/agent/openai.js b/src/agent/openai.js
index ff4a0db3..f5692b57 100644
--- a/src/agent/openai.js
+++ b/src/agent/openai.js
@@ -1,7 +1,8 @@
 import "../types/context.js";
 import {requestChatCompletions} from "./request.js";
 import {ENV} from "../config/env.js";
-import {imageToBase64String, renderBase64DataURI} from '../utils/utils.js';
+
+import {imageToBase64String, renderBase64DataURI} from "../utils/image.js";
 
 
 /**
diff --git a/src/config/env.js b/src/config/env.js
index c262f581..cee0926d 100644
--- a/src/config/env.js
+++ b/src/config/env.js
@@ -188,6 +188,8 @@ class Environment {
     SHOW_REPLY_BUTTON = false;
     // 而外引用消息开关
     EXTRA_MESSAGE_CONTEXT = false;
+    // 开启Telegraph图床
+    TELEGRAPH_ENABLE = false;
 
     // -- 模式开关 --
     //
diff --git a/src/telegram/message.js b/src/telegram/message.js
index c52195e3..64463e20 100644
--- a/src/telegram/message.js
+++ b/src/telegram/message.js
@@ -6,6 +6,7 @@ import {errorToString} from '../utils/utils.js';
 import {chatWithLLM} from '../agent/llm.js';
 
 import '../types/telegram.js';
+import {uploadImageToTelegraph} from "../utils/image.js";
 
 
 /**
@@ -261,7 +262,10 @@ async function msgChatWithLLM(message, context) {
         }
         sizeIndex = Math.max(0, Math.min(sizeIndex, message.photo.length - 1));
         const fileId = message.photo[sizeIndex].file_id;
-        const url = await getFileLink(fileId, context.SHARE_CONTEXT.currentBotToken);
+        let url = await getFileLink(fileId, context.SHARE_CONTEXT.currentBotToken);
+        if (ENV.TELEGRAPH_ENABLE) {
+            url = await uploadImageToTelegraph(url);
+        }
         params.images = [url];
     }
     return chatWithLLM(params, context, null);
diff --git a/src/utils/cache.js b/src/utils/cache.js
new file mode 100644
index 00000000..af16fa42
--- /dev/null
+++ b/src/utils/cache.js
@@ -0,0 +1,36 @@
+export class Cache {
+    constructor() {
+        this.maxItems = 10;
+        this.maxAge = 1000 * 60 * 60;
+        this.cache = {};
+    }
+
+    set(key, value) {
+        this.trim();
+        this.cache[key] = {
+            value,
+            time: Date.now(),
+        };
+    }
+
+    get(key) {
+        this.trim();
+        return this.cache[key].value;
+    }
+
+    trim() {
+        let keys = Object.keys(this.cache);
+        for (const key of keys) {
+            if (Date.now() - this.cache[key].time > this.maxAge) {
+                delete this.cache[key];
+            }
+        }
+        keys = Object.keys(this.cache);
+        if (keys.length > this.maxItems) {
+            keys.sort((a, b) => this.cache[a].time - this.cache[b].time);
+            for (let i = 0; i < keys.length - this.maxItems; i++) {
+                delete this.cache[keys[i]];
+            }
+        }
+    }
+}
diff --git a/src/utils/image.js b/src/utils/image.js
new file mode 100644
index 00000000..7f90464c
--- /dev/null
+++ b/src/utils/image.js
@@ -0,0 +1,103 @@
+import {Cache} from "./cache.js";
+
+const IMAGE_CACHE = new Cache();
+
+/**
+ * @param {string} url
+ * @returns {Promise}
+ */
+async function fetchImage(url) {
+    if (IMAGE_CACHE[url]) {
+        return IMAGE_CACHE.get(url);
+    }
+    return fetch(url)
+        .then(resp => resp.arrayBuffer())
+        .then(blob => {
+            IMAGE_CACHE.set(url, blob);
+            return blob;
+        });
+}
+
+/**
+ * @param {string} url
+ * @returns {Promise}
+ */
+export async function uploadImageToTelegraph(url) {
+    if (url.startsWith('https://telegra.ph')) {
+        return url;
+    }
+    const raw = await fetch(url).then(resp => resp.arrayBuffer());
+    const formData = new FormData();
+    formData.append('file', new Blob([raw]), 'blob');
+
+    const resp = await fetch('https://telegra.ph/upload', {
+        method: 'POST',
+        body: formData,
+    });
+    let [{src}] = await resp.json();
+    src = `https://telegra.ph${src}`;
+    IMAGE_CACHE.set(url, raw);
+    return src;
+}
+
+/**
+ * @param {string} url
+ * @returns {Promise}
+ */
+async function urlToBase64String(url) {
+    try {
+        const {Buffer} = await import('node:buffer');
+        return fetchImage(url)
+            .then(buffer => Buffer.from(buffer).toString('base64'));
+    } catch {
+        // 非原生base64编码速度太慢不适合在workers中使用
+        // 在wrangler.toml中添加 Node.js 选项启用nodejs兼容
+        // compatibility_flags = [ "nodejs_compat" ]
+        return fetchImage(url)
+            .then(buffer => btoa(String.fromCharCode.apply(null, new Uint8Array(buffer))));
+    }
+}
+
+/**
+ * @param {string} base64String
+ * @returns {string}
+ */
+function getImageFormatFromBase64(base64String) {
+    const firstChar = base64String.charAt(0);
+    switch (firstChar) {
+        case '/':
+            return 'jpeg';
+        case 'i':
+            return 'png';
+        case 'R':
+            return 'gif';
+        case 'U':
+            return 'webp';
+        default:
+            throw new Error('Unsupported image format');
+    }
+}
+
+/**
+ * @typedef {object} DataBase64
+ * @property {string} data
+ * @property {string} format
+ * @param {string} url
+ * @returns {Promise}
+ */
+export async function imageToBase64String(url) {
+    const base64String = await urlToBase64String(url);
+    const format = getImageFormatFromBase64(base64String);
+    return {
+        data: base64String,
+        format: `image/${format}`
+    };
+}
+
+/**
+ * @param {DataBase64} params
+ * @returns {string}
+ */
+export function renderBase64DataURI(params) {
+    return `data:${params.format};base64,${params.data}`;
+}
diff --git a/src/utils/utils.js b/src/utils/utils.js
index 57fcfe30..509447c6 100644
--- a/src/utils/utils.js
+++ b/src/utils/utils.js
@@ -96,73 +96,3 @@ export async function makeResponse200(resp) {
     }
 }
 
-/**
- * @returns {boolean}
- */
-export function supportsNativeBase64() {
-    return typeof Buffer !== 'undefined';
-}
-
-/**
- * @param {string} url
- * @returns {Promise}
- */
-async function urlToBase64String(url) {
-    try {
-        const {Buffer} = await import('node:buffer');
-        return fetch(url)
-            .then(resp => resp.arrayBuffer())
-            .then(buffer => Buffer.from(buffer).toString('base64'));
-    } catch {
-        // 非原生base64编码速度太慢不适合在workers中使用
-        // 在wrangler.toml中添加 Node.js 选项启用nodejs兼容
-        // compatibility_flags = [ "nodejs_compat" ]
-        return fetch(url)
-            .then(resp => resp.arrayBuffer())
-            .then(buffer => btoa(String.fromCharCode.apply(null, new Uint8Array(buffer))));
-    }
-}
-
-/**
- * @param {string} base64String
- * @returns {string}
- */
-function getImageFormatFromBase64(base64String) {
-    const firstChar = base64String.charAt(0);
-    switch (firstChar) {
-        case '/':
-            return 'jpeg';
-        case 'i':
-            return 'png';
-        case 'R':
-            return 'gif';
-        case 'U':
-            return 'webp';
-        default:
-            throw new Error('Unsupported image format');
-    }
-}
-
-/**
- * @typedef {object} DataBase64
- * @property {string} data
- * @property {string} format
- * @param {string} url
- * @returns {Promise}
- */
-export async function imageToBase64String(url) {
-    const base64String = await urlToBase64String(url);
-    const format = getImageFormatFromBase64(base64String);
-    return {
-        data: base64String,
-        format: `image/${format}`
-    };
-}
-
-/**
- * @param {DataBase64} params
- * @returns {string}
- */
-export function renderBase64DataURI(params) {
-    return `data:${params.format};base64,${params.data}`;
-}

From 018745432d2d60d41dc32fcbad1c324f11f45399 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Thu, 8 Aug 2024 15:32:24 +0800
Subject: [PATCH 23/24] =?UTF-8?q?fix:=20=E8=AF=BB=E5=8F=96=E7=BC=93?=
 =?UTF-8?q?=E5=AD=98=E9=80=BB=E8=BE=91?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

---
 src/utils/cache.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/utils/cache.js b/src/utils/cache.js
index af16fa42..82f4fc07 100644
--- a/src/utils/cache.js
+++ b/src/utils/cache.js
@@ -15,7 +15,7 @@ export class Cache {
 
     get(key) {
         this.trim();
-        return this.cache[key].value;
+        return this.cache[key]?.value;
     }
 
     trim() {

From fb1e7083fdaf311bfa93682fca93d16c30852370 Mon Sep 17 00:00:00 2001
From: tbxark 
Date: Thu, 8 Aug 2024 15:48:02 +0800
Subject: [PATCH 24/24] build: 1.8.0 pre-release

---
 dist/buildinfo.json     |  2 +-
 dist/index.js           |  6 +++---
 dist/timestamp          |  2 +-
 src/telegram/message.js |  1 -
 src/utils/utils.js      | 11 -----------
 5 files changed, 5 insertions(+), 17 deletions(-)

diff --git a/dist/buildinfo.json b/dist/buildinfo.json
index 9b48cd81..664b87b2 100644
--- a/dist/buildinfo.json
+++ b/dist/buildinfo.json
@@ -1 +1 @@
-{"sha":"8f81453","timestamp":1723101877}
\ No newline at end of file
+{"sha":"0187454","timestamp":1723103175}
\ No newline at end of file
diff --git a/dist/index.js b/dist/index.js
index ff27d0bb..158c1923 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -89,9 +89,9 @@ var Environment = class {
   // -- 版本数据 --
   //
   // 当前版本
-  BUILD_TIMESTAMP = 1723101877;
+  BUILD_TIMESTAMP = 1723103175;
   // 当前版本 commit id
-  BUILD_VERSION = "8f81453";
+  BUILD_VERSION = "0187454";
   // -- 基础配置 --
   /**
    * @type {I18n | null}
@@ -1070,7 +1070,7 @@ var Cache = class {
   }
   get(key) {
     this.trim();
-    return this.cache[key].value;
+    return this.cache[key]?.value;
   }
   trim() {
     let keys = Object.keys(this.cache);
diff --git a/dist/timestamp b/dist/timestamp
index 87e96339..b4412ca7 100644
--- a/dist/timestamp
+++ b/dist/timestamp
@@ -1 +1 @@
-1723101877
\ No newline at end of file
+1723103175
\ No newline at end of file
diff --git a/src/telegram/message.js b/src/telegram/message.js
index 64463e20..f6893866 100644
--- a/src/telegram/message.js
+++ b/src/telegram/message.js
@@ -254,7 +254,6 @@ async function msgChatWithLLM(message, context) {
     const params = {message: content};
     if (message.photo && message.photo.length > 0) {
         let sizeIndex = 0;
-        // 仅在支持原生base64的环境下运行选择更高质量的图片防止workers中base64编码超时
         if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET >= 0) {
             sizeIndex = ENV.TELEGRAM_PHOTO_SIZE_OFFSET;
         } else if (ENV.TELEGRAM_PHOTO_SIZE_OFFSET < 0) {
diff --git a/src/utils/utils.js b/src/utils/utils.js
index 509447c6..86179863 100644
--- a/src/utils/utils.js
+++ b/src/utils/utils.js
@@ -1,14 +1,3 @@
-/**
- * @param {number} length
- * @returns {string}
- */
-export function randomString(length) {
-    const chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ';
-    let result = '';
-    for (let i = length; i > 0; --i) result += chars[Math.floor(Math.random() * chars.length)];
-    return result;
-}
-
 /**
  * @param {string} body
  * @returns {string}