feat: Web Preview (Instant View) switch for cleaner look

This commit is contained in:
Alter-xyz 2024-06-29 03:10:08 -04:00
parent bde2fd061b
commit 4755a4cedd
2 changed files with 36 additions and 25 deletions

View File

@ -7,7 +7,6 @@ import traceback
from functools import update_wrapper from functools import update_wrapper
from pathlib import Path from pathlib import Path
from typing import Any, Callable, TypeVar from typing import Any, Callable, TypeVar
from expiringdict import ExpiringDict
import requests import requests
from telebot import TeleBot from telebot import TeleBot
@ -24,8 +23,6 @@ T = TypeVar("T", bound=Callable)
BOT_MESSAGE_LENGTH = 4000 BOT_MESSAGE_LENGTH = 4000
REPLY_MESSAGE_CACHE = ExpiringDict(max_len=1000, max_age_seconds=300)
def bot_reply_first(message: Message, who: str, bot: TeleBot) -> Message: def bot_reply_first(message: Message, who: str, bot: TeleBot) -> Message:
"""Create the first reply message which make user feel the bot is working.""" """Create the first reply message which make user feel the bot is working."""
@ -35,24 +32,25 @@ def bot_reply_first(message: Message, who: str, bot: TeleBot) -> Message:
def bot_reply_markdown( def bot_reply_markdown(
reply_id: Message, who: str, text: str, bot: TeleBot, split_text: bool = True reply_id: Message,
who: str,
text: str,
bot: TeleBot,
split_text: bool = True,
disable_web_page_preview: bool = False,
) -> bool: ) -> bool:
""" """
reply the Markdown by take care of the message length. reply the Markdown by take care of the message length.
it will fallback to plain text in case of any failure it will fallback to plain text in case of any failure
""" """
try: try:
cache_key = f"{reply_id.chat.id}_{reply_id.message_id}"
if cache_key in REPLY_MESSAGE_CACHE and REPLY_MESSAGE_CACHE[cache_key] == text:
print(f"Skipping duplicate message for {cache_key}")
return True
REPLY_MESSAGE_CACHE[cache_key] = text
if len(text.encode("utf-8")) <= BOT_MESSAGE_LENGTH or not split_text: if len(text.encode("utf-8")) <= BOT_MESSAGE_LENGTH or not split_text:
bot.edit_message_text( bot.edit_message_text(
f"*{who}*:\n{telegramify_markdown.convert(text)}", f"*{who}*:\n{telegramify_markdown.convert(text)}",
chat_id=reply_id.chat.id, chat_id=reply_id.chat.id,
message_id=reply_id.message_id, message_id=reply_id.message_id,
parse_mode="MarkdownV2", parse_mode="MarkdownV2",
disable_web_page_preview=disable_web_page_preview,
) )
return True return True
@ -63,6 +61,7 @@ def bot_reply_markdown(
chat_id=reply_id.chat.id, chat_id=reply_id.chat.id,
message_id=reply_id.message_id, message_id=reply_id.message_id,
parse_mode="MarkdownV2", parse_mode="MarkdownV2",
disable_web_page_preview=disable_web_page_preview,
) )
for i in range(1, len(msgs)): for i in range(1, len(msgs)):
bot.reply_to( bot.reply_to(
@ -79,6 +78,7 @@ def bot_reply_markdown(
f"*{who}*:\n{text}", f"*{who}*:\n{text}",
chat_id=reply_id.chat.id, chat_id=reply_id.chat.id,
message_id=reply_id.message_id, message_id=reply_id.message_id,
disable_web_page_preview=disable_web_page_preview,
) )
return False return False
@ -286,7 +286,7 @@ class TelegraphAPI:
data = { data = {
"access_token": self.access_token, "access_token": self.access_token,
"title": title, "title": title,
"content": json.dumps(content, ensure_ascii=False), "content": json.dumps(content),
"return_content": return_content, "return_content": return_content,
"author_name": author_name if author_name else self.author_name, "author_name": author_name if author_name else self.author_name,
"author_url": author_url if author_url else self.author_url, "author_url": author_url if author_url else self.author_url,

View File

@ -31,17 +31,18 @@ Language = "zh-cn" # "en" or "zh-cn".
SUMMARY = "gemini" # "cohere" or "gemini" or None SUMMARY = "gemini" # "cohere" or "gemini" or None
General_clean = True # Will Delete LLM message General_clean = True # Will Delete LLM message
Extra_clean = True # Will Delete command message too Extra_clean = True # Will Delete command message too
Link_Clean = False # True will disable Instant View / Web Preview
#### LLMs #### #### LLMs ####
GEMINI_USE = True GEMINI_USE = True
CHATGPT_USE = True CHATGPT_USE = True
COHERE_USE = False # Slow, but web search CLADUE_USE = True
QWEN_USE = True QWEN_USE = True
CLADUE_USE = False # Untested
COHERE_USE = False # Slow, but web search
LLAMA_USE = False # prompted for Language LLAMA_USE = False # prompted for Language
COHERE_USE_BACKGROUND = True # Only display in telegra.ph COHERE_USE_BACKGROUND = True # Only display in telegra.ph
LLAMA_USE_BACKGROUND = True LLAMA_USE_BACKGROUND = True # But telegra.ph's **instant view** may not up to date
#### LLMs init #### #### LLMs init ####
#### OpenAI init #### #### OpenAI init ####
@ -542,25 +543,33 @@ def final_answer(latest_message: Message, bot: TeleBot, full_answer: str, answer
#### Summary #### #### Summary ####
if SUMMARY == None: if SUMMARY == None:
pass pass
elif COHERE_USE and COHERE_API_KEY and SUMMARY == "cohere":
summary_cohere(bot, full_answer, ph_s, reply_id)
elif GEMINI_USE and GOOGLE_GEMINI_KEY and SUMMARY == "gemini":
summary_gemini(bot, full_answer, ph_s, reply_id)
else: else:
pass s = llm_summary(bot, full_answer, ph_s, reply_id)
bot_reply_markdown(reply_id, who, s, bot, disable_web_page_preview=True)
#### Background LLM #### #### Background LLM ####
# Run background llm, no show to telegram, just update the page, Good for slow llm # Run background llm, no show to telegram, just update the ph page, Good for slow llm
if LLAMA_USE_BACKGROUND and LLAMA_API_KEY: if LLAMA_USE_BACKGROUND and LLAMA_API_KEY:
llama_b_m = background_llama(latest_message.text) llama_b_m = background_llama(latest_message.text)
print(llama_b_m)
full_answer = llm_background(ph_s, full_answer, llama_b_m) full_answer = llm_background(ph_s, full_answer, llama_b_m)
if COHERE_USE_BACKGROUND and COHERE_API_KEY: if COHERE_USE_BACKGROUND and COHERE_API_KEY:
cohere_b_m = background_cohere(latest_message.text) cohere_b_m = background_cohere(latest_message.text)
print(cohere_b_m)
full_answer = llm_background(ph_s, full_answer, cohere_b_m) full_answer = llm_background(ph_s, full_answer, cohere_b_m)
def llm_summary(bot, full_answer, ph_s, reply_id) -> str:
"""llm summary return the summary of the full answer."""
if SUMMARY == "gemini":
s = summary_gemini(bot, full_answer, ph_s, reply_id)
elif SUMMARY == "cohere":
s = summary_cohere(bot, full_answer, ph_s, reply_id)
else:
print(f"\n---\nSummary Fail\n---\n")
s = f"**[Full Answer]({ph_s})**\n~~Summary Answer Wrong~~\n"
return s
def background_cohere(m: str) -> str: def background_cohere(m: str) -> str:
"""we run cohere get the full answer in background""" """we run cohere get the full answer in background"""
who = "Command R Plus" who = "Command R Plus"
@ -621,7 +630,7 @@ def background_llama(m: str) -> str:
return llm_answer(who, s) return llm_answer(who, s)
def summary_cohere(bot: TeleBot, full_answer: str, ph_s: str, reply_id: int) -> None: def summary_cohere(bot: TeleBot, full_answer: str, ph_s: str, reply_id: int) -> str:
"""Receive the full text, and the final_answer's chat_id, update with a summary.""" """Receive the full text, and the final_answer's chat_id, update with a summary."""
who = "Answer it" who = "Answer it"
@ -674,6 +683,7 @@ Start with "Summary:" or "总结:"
bot_reply_markdown(reply_id, who, s, bot) bot_reply_markdown(reply_id, who, s, bot)
except: except:
pass pass
return s
except Exception as e: except Exception as e:
if Language == "zh-cn": if Language == "zh-cn":
@ -689,9 +699,9 @@ def summary_gemini(bot: TeleBot, full_answer: str, ph_s: str, reply_id: int) ->
# inherit # inherit
if Language == "zh-cn": if Language == "zh-cn":
s = f"**[全文]({ph_s})** | " s = f"**[🔗全文]({ph_s})** | "
elif Language == "en": elif Language == "en":
s = f"**[Full Answer]({ph_s})** | " s = f"**[🔗Full Answer]({ph_s})** | "
try: try:
r = convo_summary.send_message(full_answer, stream=True) r = convo_summary.send_message(full_answer, stream=True)
@ -703,6 +713,7 @@ def summary_gemini(bot: TeleBot, full_answer: str, ph_s: str, reply_id: int) ->
bot_reply_markdown(reply_id, who, s, bot, split_text=False) bot_reply_markdown(reply_id, who, s, bot, split_text=False)
bot_reply_markdown(reply_id, who, s, bot) bot_reply_markdown(reply_id, who, s, bot)
convo_summary.history.clear() convo_summary.history.clear()
return s
except Exception as e: except Exception as e:
if Language == "zh-cn": if Language == "zh-cn":
bot_reply_markdown(reply_id, who, f"[全文]({ph_s})", bot) bot_reply_markdown(reply_id, who, f"[全文]({ph_s})", bot)