mirror of
				https://github.com/cdryzun/tg_bot_collections.git
				synced 2025-11-04 08:46:44 +08:00 
			
		
		
		
	fix: use 405b for llama
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
This commit is contained in:
		@ -133,13 +133,3 @@ if KLING_COOKIE:
 | 
			
		||||
        bot.register_message_handler(
 | 
			
		||||
            kling_pro_handler, commands=["kling_pro"], pass_bot=True
 | 
			
		||||
        )
 | 
			
		||||
        bot.register_message_handler(
 | 
			
		||||
            kling_pro_handler, regexp="^kling_pro:", pass_bot=True
 | 
			
		||||
        )
 | 
			
		||||
        bot.register_message_handler(
 | 
			
		||||
            kling_photo_handler,
 | 
			
		||||
            content_types=["photo"],
 | 
			
		||||
            func=lambda m: m.caption
 | 
			
		||||
            and m.caption.startswith(("kling:", "/kling", "kling:", "/kling")),
 | 
			
		||||
            pass_bot=True,
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
@ -15,8 +15,8 @@ markdown_symbol.head_level_1 = "📌"  # If you want, Customizing the head level
 | 
			
		||||
markdown_symbol.link = "🔗"  # If you want, Customizing the link symbol
 | 
			
		||||
 | 
			
		||||
LLAMA_API_KEY = environ.get("GROQ_API_KEY")
 | 
			
		||||
LLAMA_MODEL = "llama3-8b-8192"
 | 
			
		||||
LLAMA_PRO_MODEL = "llama3-70b-8192"
 | 
			
		||||
LLAMA_MODEL = "llama-3.1-70b-versatile"
 | 
			
		||||
LLAMA_PRO_MODEL = "llama-3.1-70b-versatile"
 | 
			
		||||
 | 
			
		||||
if LLAMA_API_KEY:
 | 
			
		||||
    client = Groq(api_key=LLAMA_API_KEY)
 | 
			
		||||
@ -61,9 +61,7 @@ def llama_handler(message: Message, bot: TeleBot) -> None:
 | 
			
		||||
 | 
			
		||||
    llama_reply_text = ""
 | 
			
		||||
    try:
 | 
			
		||||
        r = client.chat.completions.create(
 | 
			
		||||
            messages=player_message, max_tokens=8192, model=LLAMA_MODEL
 | 
			
		||||
        )
 | 
			
		||||
        r = client.chat.completions.create(messages=player_message, model=LLAMA_MODEL)
 | 
			
		||||
        content = r.choices[0].message.content.encode("utf8").decode()
 | 
			
		||||
        if not content:
 | 
			
		||||
            llama_reply_text = f"{who} did not answer."
 | 
			
		||||
@ -123,7 +121,6 @@ def llama_pro_handler(message: Message, bot: TeleBot) -> None:
 | 
			
		||||
    try:
 | 
			
		||||
        r = client.chat.completions.create(
 | 
			
		||||
            messages=player_message,
 | 
			
		||||
            max_tokens=8192,
 | 
			
		||||
            model=LLAMA_PRO_MODEL,
 | 
			
		||||
            stream=True,
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
@ -50,8 +50,8 @@ Hint = (
 | 
			
		||||
#### LLMs ####
 | 
			
		||||
GEMINI_USE = True
 | 
			
		||||
 | 
			
		||||
CHATGPT_USE = False
 | 
			
		||||
CLADUE_USE = False
 | 
			
		||||
CHATGPT_USE = True
 | 
			
		||||
CLADUE_USE = True
 | 
			
		||||
QWEN_USE = False
 | 
			
		||||
COHERE_USE = False  # Slow, but web search
 | 
			
		||||
LLAMA_USE = False  # prompted for Language
 | 
			
		||||
 | 
			
		||||
		Reference in New Issue
	
	Block a user