diff --git a/handlers/kling.py b/handlers/kling.py index 43e84be..c86b09a 100644 --- a/handlers/kling.py +++ b/handlers/kling.py @@ -133,13 +133,3 @@ if KLING_COOKIE: bot.register_message_handler( kling_pro_handler, commands=["kling_pro"], pass_bot=True ) - bot.register_message_handler( - kling_pro_handler, regexp="^kling_pro:", pass_bot=True - ) - bot.register_message_handler( - kling_photo_handler, - content_types=["photo"], - func=lambda m: m.caption - and m.caption.startswith(("kling:", "/kling", "kling:", "/kling")), - pass_bot=True, - ) diff --git a/handlers/llama.py b/handlers/llama.py index c03d15a..d8c0cac 100644 --- a/handlers/llama.py +++ b/handlers/llama.py @@ -15,8 +15,8 @@ markdown_symbol.head_level_1 = "📌" # If you want, Customizing the head level markdown_symbol.link = "🔗" # If you want, Customizing the link symbol LLAMA_API_KEY = environ.get("GROQ_API_KEY") -LLAMA_MODEL = "llama3-8b-8192" -LLAMA_PRO_MODEL = "llama3-70b-8192" +LLAMA_MODEL = "llama-3.1-70b-versatile" +LLAMA_PRO_MODEL = "llama-3.1-70b-versatile" if LLAMA_API_KEY: client = Groq(api_key=LLAMA_API_KEY) @@ -61,9 +61,7 @@ def llama_handler(message: Message, bot: TeleBot) -> None: llama_reply_text = "" try: - r = client.chat.completions.create( - messages=player_message, max_tokens=8192, model=LLAMA_MODEL - ) + r = client.chat.completions.create(messages=player_message, model=LLAMA_MODEL) content = r.choices[0].message.content.encode("utf8").decode() if not content: llama_reply_text = f"{who} did not answer." @@ -123,7 +121,6 @@ def llama_pro_handler(message: Message, bot: TeleBot) -> None: try: r = client.chat.completions.create( messages=player_message, - max_tokens=8192, model=LLAMA_PRO_MODEL, stream=True, ) diff --git a/handlers/useful.py b/handlers/useful.py index d3009ac..fb72b7a 100644 --- a/handlers/useful.py +++ b/handlers/useful.py @@ -50,8 +50,8 @@ Hint = ( #### LLMs #### GEMINI_USE = True -CHATGPT_USE = False -CLADUE_USE = False +CHATGPT_USE = True +CLADUE_USE = True QWEN_USE = False COHERE_USE = False # Slow, but web search LLAMA_USE = False # prompted for Language