diff --git a/handlers/__init__.py b/handlers/__init__.py index a3ac5ff..ed6801a 100644 --- a/handlers/__init__.py +++ b/handlers/__init__.py @@ -26,7 +26,7 @@ DEFAULT_LOAD_PRIORITY = 10 BOT_MESSAGE_LENGTH = 4000 -REPLY_MESSAGE_CACHE = ExpiringDict(max_len=1000, max_age_seconds=300) +REPLY_MESSAGE_CACHE = ExpiringDict(max_len=1000, max_age_seconds=600) def bot_reply_first(message: Message, who: str, bot: TeleBot) -> Message: diff --git a/handlers/chatgpt.py b/handlers/chatgpt.py index 08526d3..faf9b41 100644 --- a/handlers/chatgpt.py +++ b/handlers/chatgpt.py @@ -24,8 +24,8 @@ client = OpenAI(api_key=CHATGPT_API_KEY, base_url=CHATGPT_BASE_URL, timeout=20) # Global history cache -chatgpt_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) -chatgpt_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) +chatgpt_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) +chatgpt_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) def chatgpt_handler(message: Message, bot: TeleBot) -> None: @@ -81,7 +81,7 @@ def chatgpt_handler(message: Message, bot: TeleBot) -> None: except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") # pop my user player_message.pop() return @@ -138,7 +138,7 @@ def chatgpt_pro_handler(message: Message, bot: TeleBot) -> None: s += chunk.choices[0].delta.content if time.time() - start > 1.2: start = time.time() - bot_reply_markdown(reply_id, who, s, bot, split_text=True) + bot_reply_markdown(reply_id, who, s, bot, split_text=False) # maybe not complete try: bot_reply_markdown(reply_id, who, s, bot, split_text=True) @@ -154,7 +154,7 @@ def chatgpt_pro_handler(message: Message, bot: TeleBot) -> None: except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + # bot.reply_to(message, "answer wrong maybe up to the max token") player_message.clear() return @@ -199,11 +199,15 @@ def chatgpt_photo_handler(message: Message, bot: TeleBot) -> None: if time.time() - start > 2.0: start = time.time() bot_reply_markdown(reply_id, who, s, bot, split_text=False) + # maybe not complete + try: + bot_reply_markdown(reply_id, who, s, bot) + except: + pass - bot_reply_markdown(reply_id, who, s, bot) except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") if CHATGPT_API_KEY: diff --git a/handlers/cohere.py b/handlers/cohere.py index 20ee644..ce1c698 100644 --- a/handlers/cohere.py +++ b/handlers/cohere.py @@ -25,7 +25,7 @@ TELEGRA_PH_TOKEN = environ.get("TELEGRA_PH_TOKEN") ph = TelegraphAPI(TELEGRA_PH_TOKEN) # Global history cache -cohere_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) +cohere_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) def clean_text(text): @@ -159,7 +159,7 @@ def cohere_handler(message: Message, bot: TeleBot) -> None: except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "Answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") player_message.clear() return diff --git a/handlers/dify.py b/handlers/dify.py index a1cd013..9bbf642 100644 --- a/handlers/dify.py +++ b/handlers/dify.py @@ -22,9 +22,9 @@ if DIFY_API_KEY: client = ChatClient(api_key=DIFY_API_KEY) # Global history cache -dify_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) +dify_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) dify_player_c = ExpiringDict( - max_len=1000, max_age_seconds=300 + max_len=1000, max_age_seconds=600 ) # History cache is supported by dify cloud conversation_id. @@ -96,7 +96,7 @@ def dify_handler(message: Message, bot: TeleBot) -> None: except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") # pop my user player_message.pop() return diff --git a/handlers/gemini.py b/handlers/gemini.py index 26a5530..cebe373 100644 --- a/handlers/gemini.py +++ b/handlers/gemini.py @@ -34,9 +34,9 @@ safety_settings = [ ] # Global history cache -gemini_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) -gemini_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) -gemini_file_player_dict = ExpiringDict(max_len=100, max_age_seconds=300) +gemini_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) +gemini_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) +gemini_file_player_dict = ExpiringDict(max_len=100, max_age_seconds=600) def make_new_gemini_convo(is_pro=False) -> ChatSession: @@ -116,7 +116,7 @@ def gemini_handler(message: Message, bot: TeleBot) -> None: gemini_reply_text = re.sub(r"\\n", "\n", gemini_reply_text) else: print("No meaningful text was extracted from the exception.") - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") return # By default markdown @@ -167,7 +167,7 @@ def gemini_pro_handler(message: Message, bot: TeleBot) -> None: return except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") try: player.history.clear() except: @@ -204,10 +204,14 @@ def gemini_photo_handler(message: Message, bot: TeleBot) -> None: start = time.time() bot_reply_markdown(reply_id, who, s, bot, split_text=False) - bot_reply_markdown(reply_id, who, s, bot) + # maybe not complete + try: + bot_reply_markdown(reply_id, who, s, bot) + except: + pass except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") def gemini_audio_handler(message: Message, bot: TeleBot) -> None: @@ -245,7 +249,7 @@ def gemini_audio_handler(message: Message, bot: TeleBot) -> None: return except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") try: player.history.clear() except: diff --git a/handlers/llama.py b/handlers/llama.py index 5dd880d..c03d15a 100644 --- a/handlers/llama.py +++ b/handlers/llama.py @@ -22,8 +22,8 @@ if LLAMA_API_KEY: client = Groq(api_key=LLAMA_API_KEY) # Global history cache -llama_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) -llama_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) +llama_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) +llama_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) def llama_handler(message: Message, bot: TeleBot) -> None: @@ -79,7 +79,7 @@ def llama_handler(message: Message, bot: TeleBot) -> None: except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") # pop my user player_message.pop() return @@ -153,7 +153,7 @@ def llama_pro_handler(message: Message, bot: TeleBot) -> None: except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") player_message.clear() return diff --git a/handlers/qwen.py b/handlers/qwen.py index 2cbc94a..7c0d42b 100644 --- a/handlers/qwen.py +++ b/handlers/qwen.py @@ -22,8 +22,8 @@ if QWEN_API_KEY: client = Together(api_key=QWEN_API_KEY) # Global history cache -qwen_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) -qwen_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) +qwen_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) +qwen_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) def qwen_handler(message: Message, bot: TeleBot) -> None: @@ -79,7 +79,7 @@ def qwen_handler(message: Message, bot: TeleBot) -> None: except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") # pop my user player_message.pop() return @@ -152,7 +152,7 @@ def qwen_pro_handler(message: Message, bot: TeleBot) -> None: except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") player_message.clear() return diff --git a/handlers/useful.py b/handlers/useful.py index 5b73637..7cbeb37 100644 --- a/handlers/useful.py +++ b/handlers/useful.py @@ -46,8 +46,8 @@ Hint = ( ) #### LLMs #### GEMINI_USE = True -CHATGPT_USE = False -CLADUE_USE = False +CHATGPT_USE = True +CLADUE_USE = True QWEN_USE = True COHERE_USE = False # Slow, but web search LLAMA_USE = False # prompted for Language @@ -467,7 +467,6 @@ def chatgpt_answer(latest_message: Message, bot: TeleBot, m): except Exception as e: print(f"\n------\n{who} function inner Error:\n{e}\n------\n") - bot_reply_markdown(reply_id, who, "answer wrong", bot) return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id return llm_answer(who, s), reply_id.message_id @@ -506,7 +505,6 @@ def claude_answer(latest_message: Message, bot: TeleBot, m): except Exception as e: print(f"\n------\n{who} function inner Error:\n{e}\n------\n") - bot_reply_markdown(reply_id, who, "answer wrong", bot) return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id answer = f"\n---\n{who}:\n{s}" @@ -581,7 +579,6 @@ def cohere_answer(latest_message: Message, bot: TeleBot, m): pass except Exception as e: print(f"\n------\n{who} function inner Error:\n{e}\n------\n") - bot_reply_markdown(reply_id, who, "Answer wrong", bot) return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id return llm_answer(who, content), reply_id.message_id @@ -619,7 +616,6 @@ def qwen_answer(latest_message: Message, bot: TeleBot, m): except Exception as e: print(f"\n------\n{who} function inner Error:\n{e}\n------\n") - bot_reply_markdown(reply_id, who, "answer wrong", bot) return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id return llm_answer(who, s), reply_id.message_id @@ -662,7 +658,6 @@ def llama_answer(latest_message: Message, bot: TeleBot, m): except Exception as e: print(f"\n------\n{who} function inner Error:\n{e}\n------\n") - bot_reply_markdown(reply_id, who, "answer wrong", bot) return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id return llm_answer(who, s), reply_id.message_id diff --git a/handlers/yi.py b/handlers/yi.py index f5a844b..2aa1e1b 100644 --- a/handlers/yi.py +++ b/handlers/yi.py @@ -22,8 +22,8 @@ client = OpenAI( ) # Global history cache -yi_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) -yi_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=300) +yi_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) +yi_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) def yi_handler(message: Message, bot: TeleBot) -> None: @@ -82,7 +82,7 @@ def yi_handler(message: Message, bot: TeleBot) -> None: except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") # pop my user player_message.pop() return @@ -156,7 +156,7 @@ def yi_pro_handler(message: Message, bot: TeleBot) -> None: except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") player_message.clear() return @@ -206,7 +206,7 @@ def yi_photo_handler(message: Message, bot: TeleBot) -> None: bot_reply_markdown(reply_id, who, text, bot) except Exception as e: print(e) - bot_reply_markdown(reply_id, who, "answer wrong", bot) + bot.reply_to(message, "answer wrong maybe up to the max token") if YI_API_KEY and YI_BASE_URL: