diff --git a/handlers/__init__.py b/handlers/__init__.py index 79ca3ca..db934e1 100644 --- a/handlers/__init__.py +++ b/handlers/__init__.py @@ -7,7 +7,6 @@ import traceback from functools import update_wrapper from pathlib import Path from typing import Any, Callable, TypeVar -from expiringdict import ExpiringDict import requests from telebot import TeleBot @@ -24,8 +23,6 @@ T = TypeVar("T", bound=Callable) BOT_MESSAGE_LENGTH = 4000 -REPLY_MESSAGE_CACHE = ExpiringDict(max_len=1000, max_age_seconds=300) - def bot_reply_first(message: Message, who: str, bot: TeleBot) -> Message: """Create the first reply message which make user feel the bot is working.""" @@ -35,24 +32,25 @@ def bot_reply_first(message: Message, who: str, bot: TeleBot) -> Message: def bot_reply_markdown( - reply_id: Message, who: str, text: str, bot: TeleBot, split_text: bool = True + reply_id: Message, + who: str, + text: str, + bot: TeleBot, + split_text: bool = True, + disable_web_page_preview: bool = False, ) -> bool: """ reply the Markdown by take care of the message length. it will fallback to plain text in case of any failure """ try: - cache_key = f"{reply_id.chat.id}_{reply_id.message_id}" - if cache_key in REPLY_MESSAGE_CACHE and REPLY_MESSAGE_CACHE[cache_key] == text: - print(f"Skipping duplicate message for {cache_key}") - return True - REPLY_MESSAGE_CACHE[cache_key] = text if len(text.encode("utf-8")) <= BOT_MESSAGE_LENGTH or not split_text: bot.edit_message_text( f"*{who}*:\n{telegramify_markdown.convert(text)}", chat_id=reply_id.chat.id, message_id=reply_id.message_id, parse_mode="MarkdownV2", + disable_web_page_preview=disable_web_page_preview, ) return True @@ -63,6 +61,7 @@ def bot_reply_markdown( chat_id=reply_id.chat.id, message_id=reply_id.message_id, parse_mode="MarkdownV2", + disable_web_page_preview=disable_web_page_preview, ) for i in range(1, len(msgs)): bot.reply_to( @@ -79,6 +78,7 @@ def bot_reply_markdown( f"*{who}*:\n{text}", chat_id=reply_id.chat.id, message_id=reply_id.message_id, + disable_web_page_preview=disable_web_page_preview, ) return False @@ -286,7 +286,7 @@ class TelegraphAPI: data = { "access_token": self.access_token, "title": title, - "content": json.dumps(content, ensure_ascii=False), + "content": json.dumps(content), "return_content": return_content, "author_name": author_name if author_name else self.author_name, "author_url": author_url if author_url else self.author_url, diff --git a/handlers/useful.py b/handlers/useful.py index 2fe1975..1895414 100644 --- a/handlers/useful.py +++ b/handlers/useful.py @@ -31,17 +31,18 @@ Language = "zh-cn" # "en" or "zh-cn". SUMMARY = "gemini" # "cohere" or "gemini" or None General_clean = True # Will Delete LLM message Extra_clean = True # Will Delete command message too - +Link_Clean = False # True will disable Instant View / Web Preview #### LLMs #### GEMINI_USE = True CHATGPT_USE = True -COHERE_USE = False # Slow, but web search +CLADUE_USE = True QWEN_USE = True -CLADUE_USE = False # Untested + +COHERE_USE = False # Slow, but web search LLAMA_USE = False # prompted for Language COHERE_USE_BACKGROUND = True # Only display in telegra.ph -LLAMA_USE_BACKGROUND = True +LLAMA_USE_BACKGROUND = True # But telegra.ph's **instant view** may not up to date #### LLMs init #### #### OpenAI init #### @@ -542,25 +543,33 @@ def final_answer(latest_message: Message, bot: TeleBot, full_answer: str, answer #### Summary #### if SUMMARY == None: pass - elif COHERE_USE and COHERE_API_KEY and SUMMARY == "cohere": - summary_cohere(bot, full_answer, ph_s, reply_id) - elif GEMINI_USE and GOOGLE_GEMINI_KEY and SUMMARY == "gemini": - summary_gemini(bot, full_answer, ph_s, reply_id) else: - pass + s = llm_summary(bot, full_answer, ph_s, reply_id) + bot_reply_markdown(reply_id, who, s, bot, disable_web_page_preview=True) #### Background LLM #### - # Run background llm, no show to telegram, just update the page, Good for slow llm + # Run background llm, no show to telegram, just update the ph page, Good for slow llm if LLAMA_USE_BACKGROUND and LLAMA_API_KEY: llama_b_m = background_llama(latest_message.text) - print(llama_b_m) full_answer = llm_background(ph_s, full_answer, llama_b_m) + if COHERE_USE_BACKGROUND and COHERE_API_KEY: cohere_b_m = background_cohere(latest_message.text) - print(cohere_b_m) full_answer = llm_background(ph_s, full_answer, cohere_b_m) +def llm_summary(bot, full_answer, ph_s, reply_id) -> str: + """llm summary return the summary of the full answer.""" + if SUMMARY == "gemini": + s = summary_gemini(bot, full_answer, ph_s, reply_id) + elif SUMMARY == "cohere": + s = summary_cohere(bot, full_answer, ph_s, reply_id) + else: + print(f"\n---\nSummary Fail\n---\n") + s = f"**[Full Answer]({ph_s})**\n~~Summary Answer Wrong~~\n" + return s + + def background_cohere(m: str) -> str: """we run cohere get the full answer in background""" who = "Command R Plus" @@ -621,7 +630,7 @@ def background_llama(m: str) -> str: return llm_answer(who, s) -def summary_cohere(bot: TeleBot, full_answer: str, ph_s: str, reply_id: int) -> None: +def summary_cohere(bot: TeleBot, full_answer: str, ph_s: str, reply_id: int) -> str: """Receive the full text, and the final_answer's chat_id, update with a summary.""" who = "Answer it" @@ -674,6 +683,7 @@ Start with "Summary:" or "总结:" bot_reply_markdown(reply_id, who, s, bot) except: pass + return s except Exception as e: if Language == "zh-cn": @@ -689,9 +699,9 @@ def summary_gemini(bot: TeleBot, full_answer: str, ph_s: str, reply_id: int) -> # inherit if Language == "zh-cn": - s = f"**[全文]({ph_s})** | " + s = f"**[🔗全文]({ph_s})** | " elif Language == "en": - s = f"**[Full Answer]({ph_s})** | " + s = f"**[🔗Full Answer]({ph_s})** | " try: r = convo_summary.send_message(full_answer, stream=True) @@ -703,6 +713,7 @@ def summary_gemini(bot: TeleBot, full_answer: str, ph_s: str, reply_id: int) -> bot_reply_markdown(reply_id, who, s, bot, split_text=False) bot_reply_markdown(reply_id, who, s, bot) convo_summary.history.clear() + return s except Exception as e: if Language == "zh-cn": bot_reply_markdown(reply_id, who, f"[全文]({ph_s})", bot)