mirror of
https://github.com/cdryzun/tg_bot_collections.git
synced 2025-04-29 00:27:09 +08:00
refactor
- refactor: Modulated - chore: Switch on the top as "Customization" - chore: Prompt for tasks - feat: Support more LLM as optional - feat: Summarization for final answer - feat: Asynchronous/Thread for faster speed
This commit is contained in:
parent
f2b73750a8
commit
d1d84aa0e3
@ -8,80 +8,130 @@ import time
|
|||||||
import datetime
|
import datetime
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
from openai import OpenAI
|
|
||||||
import google.generativeai as genai
|
|
||||||
from telebot import TeleBot
|
|
||||||
from together import Together
|
|
||||||
from telebot.types import Message
|
|
||||||
|
|
||||||
from . import *
|
from . import *
|
||||||
|
|
||||||
from telegramify_markdown.customize import markdown_symbol
|
from telegramify_markdown.customize import markdown_symbol
|
||||||
|
|
||||||
#### Cohere init ####
|
# If you want, Customizing the head level 1 symbol
|
||||||
import cohere
|
markdown_symbol.head_level_1 = "📌"
|
||||||
|
markdown_symbol.link = "🔗" # If you want, Customizing the link symbol
|
||||||
COHERE_API_KEY = environ.get("COHERE_API_KEY")
|
|
||||||
COHERE_MODEL = "command-r-plus"
|
|
||||||
# if you want to use cohere for answer it, set it to True
|
|
||||||
USE_CHHERE = False
|
|
||||||
USE_CLAUDE = True
|
|
||||||
if COHERE_API_KEY:
|
|
||||||
co = cohere.Client(api_key=COHERE_API_KEY)
|
|
||||||
|
|
||||||
#### Telegraph init ####
|
|
||||||
TELEGRA_PH_TOKEN = environ.get("TELEGRA_PH_TOKEN")
|
|
||||||
ph = TelegraphAPI(TELEGRA_PH_TOKEN)
|
|
||||||
#### Telegraph done ####
|
|
||||||
|
|
||||||
chat_message_dict = ExpiringDict(max_len=100, max_age_seconds=120)
|
chat_message_dict = ExpiringDict(max_len=100, max_age_seconds=120)
|
||||||
chat_user_dict = ExpiringDict(max_len=100, max_age_seconds=20)
|
chat_user_dict = ExpiringDict(max_len=100, max_age_seconds=20)
|
||||||
|
|
||||||
markdown_symbol.head_level_1 = "📌" # If you want, Customizing the head level 1 symbol
|
|
||||||
markdown_symbol.link = "🔗" # If you want, Customizing the link symbol
|
|
||||||
|
|
||||||
GOOGLE_GEMINI_KEY = environ.get("GOOGLE_GEMINI_KEY")
|
#### Customization ####
|
||||||
|
Language = "zh-cn" # "en" or "zh-cn".
|
||||||
|
SUMMARY = "gemini" # "cohere" or "gemini" or None
|
||||||
|
Extra_clean = True # Will Delete command message
|
||||||
|
GEMINI_USE = True
|
||||||
|
CHATGPT_USE = True
|
||||||
|
COHERE_USE = True
|
||||||
|
QWEN_USE = True
|
||||||
|
CLADUE_USE = True
|
||||||
|
LLAMA_USE = True
|
||||||
|
|
||||||
genai.configure(api_key=GOOGLE_GEMINI_KEY)
|
#### Telegra.ph init ####
|
||||||
|
# Will auto generate a token if not provided, restart will lose all TODO
|
||||||
|
TELEGRA_PH_TOKEN = environ.get("TELEGRA_PH_TOKEN")
|
||||||
|
# Edit "Store_Token = False" in "__init__.py" to True to store it
|
||||||
|
ph = TelegraphAPI(TELEGRA_PH_TOKEN)
|
||||||
|
|
||||||
generation_config = {
|
|
||||||
"temperature": 0.7,
|
|
||||||
"top_p": 1,
|
|
||||||
"top_k": 1,
|
|
||||||
"max_output_tokens": 8192,
|
|
||||||
}
|
|
||||||
|
|
||||||
safety_settings = [
|
#### LLMs init ####
|
||||||
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
#### OpenAI init ####
|
||||||
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
|
||||||
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
|
|
||||||
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
|
||||||
]
|
|
||||||
|
|
||||||
model = genai.GenerativeModel(
|
|
||||||
model_name="gemini-1.5-flash-latest",
|
|
||||||
generation_config=generation_config,
|
|
||||||
safety_settings=safety_settings,
|
|
||||||
)
|
|
||||||
|
|
||||||
#### ChatGPT init ####
|
|
||||||
CHATGPT_API_KEY = environ.get("OPENAI_API_KEY")
|
CHATGPT_API_KEY = environ.get("OPENAI_API_KEY")
|
||||||
CHATGPT_BASE_URL = environ.get("OPENAI_API_BASE") or "https://api.openai.com/v1"
|
CHATGPT_BASE_URL = environ.get("OPENAI_API_BASE") or "https://api.openai.com/v1"
|
||||||
|
if CHATGPT_USE and CHATGPT_API_KEY:
|
||||||
|
from openai import OpenAI
|
||||||
|
|
||||||
|
CHATGPT_PRO_MODEL = "gpt-4o-2024-05-13"
|
||||||
|
client = OpenAI(api_key=CHATGPT_API_KEY, base_url=CHATGPT_BASE_URL, timeout=300)
|
||||||
|
|
||||||
|
|
||||||
|
#### Gemini init ####
|
||||||
|
GOOGLE_GEMINI_KEY = environ.get("GOOGLE_GEMINI_KEY")
|
||||||
|
if GEMINI_USE and GOOGLE_GEMINI_KEY:
|
||||||
|
import google.generativeai as genai
|
||||||
|
from google.generativeai import ChatSession
|
||||||
|
from google.generativeai.types.generation_types import StopCandidateException
|
||||||
|
|
||||||
|
genai.configure(api_key=GOOGLE_GEMINI_KEY)
|
||||||
|
|
||||||
|
generation_config = {
|
||||||
|
"temperature": 0.7,
|
||||||
|
"top_p": 1,
|
||||||
|
"top_k": 1,
|
||||||
|
"max_output_tokens": 8192,
|
||||||
|
}
|
||||||
|
|
||||||
|
safety_settings = [
|
||||||
|
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
||||||
|
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
||||||
|
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
|
||||||
|
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
||||||
|
]
|
||||||
|
|
||||||
|
model = genai.GenerativeModel(
|
||||||
|
model_name="gemini-1.5-flash-latest",
|
||||||
|
generation_config=generation_config,
|
||||||
|
safety_settings=safety_settings,
|
||||||
|
)
|
||||||
|
model_flash = genai.GenerativeModel(
|
||||||
|
model_name="gemini-1.5-flash-latest",
|
||||||
|
generation_config=generation_config,
|
||||||
|
safety_settings=safety_settings,
|
||||||
|
system_instruction=f"""
|
||||||
|
The user asked a question, and multiple AI have given answers to the same question.
|
||||||
|
Your task is to summarize the responses from them in a concise and clear manner.
|
||||||
|
The summary should:
|
||||||
|
In one to two short sentences, as less as possible, and should not exceed 150 characters.
|
||||||
|
Your must use language of {Language} to respond.
|
||||||
|
Start with "Summary:" or "总结:"
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
convo = model.start_chat()
|
||||||
|
convo_summary = model_flash.start_chat()
|
||||||
|
|
||||||
|
|
||||||
|
#### Cohere init ####
|
||||||
|
COHERE_API_KEY = environ.get("COHERE_API_KEY")
|
||||||
|
|
||||||
|
if COHERE_USE and COHERE_API_KEY:
|
||||||
|
import cohere
|
||||||
|
|
||||||
|
COHERE_MODEL = "command-r-plus"
|
||||||
|
co = cohere.Client(api_key=COHERE_API_KEY)
|
||||||
|
|
||||||
|
|
||||||
|
#### Qwen init ####
|
||||||
QWEN_API_KEY = environ.get("TOGETHER_API_KEY")
|
QWEN_API_KEY = environ.get("TOGETHER_API_KEY")
|
||||||
QWEN_MODEL = "Qwen/Qwen2-72B-Instruct"
|
|
||||||
CHATGPT_PRO_MODEL = "gpt-4o-2024-05-13"
|
|
||||||
|
|
||||||
#### CLAUDE ####
|
if QWEN_USE and QWEN_API_KEY:
|
||||||
|
from together import Together
|
||||||
|
|
||||||
|
QWEN_MODEL = "Qwen/Qwen2-72B-Instruct"
|
||||||
|
qwen_client = Together(api_key=QWEN_API_KEY)
|
||||||
|
|
||||||
|
#### Claude init ####
|
||||||
ANTHROPIC_API_KEY = environ.get("ANTHROPIC_API_KEY")
|
ANTHROPIC_API_KEY = environ.get("ANTHROPIC_API_KEY")
|
||||||
ANTHROPIC_BASE_URL = environ.get("ANTHROPIC_BASE_URL")
|
|
||||||
ANTHROPIC_MODEL = "claude-3-5-sonnet-20240620"
|
|
||||||
# use openai for claude
|
# use openai for claude
|
||||||
claude_client = OpenAI(
|
if CLADUE_USE and ANTHROPIC_API_KEY:
|
||||||
api_key=ANTHROPIC_API_KEY, base_url=ANTHROPIC_BASE_URL, timeout=20
|
ANTHROPIC_BASE_URL = environ.get("ANTHROPIC_BASE_URL")
|
||||||
)
|
ANTHROPIC_MODEL = "claude-3-5-sonnet-20240620"
|
||||||
|
claude_client = OpenAI(
|
||||||
|
api_key=ANTHROPIC_API_KEY, base_url=ANTHROPIC_BASE_URL, timeout=20
|
||||||
|
)
|
||||||
|
|
||||||
client = OpenAI(api_key=CHATGPT_API_KEY, base_url=CHATGPT_BASE_URL, timeout=300)
|
#### llama init ####
|
||||||
qwen_client = Together(api_key=QWEN_API_KEY, timeout=300)
|
LLAMA_API_KEY = environ.get("GROQ_API_KEY")
|
||||||
|
if LLAMA_USE and LLAMA_API_KEY:
|
||||||
|
from groq import Groq
|
||||||
|
|
||||||
|
llama_client = Groq(api_key=LLAMA_API_KEY)
|
||||||
|
LLAMA_MODEL = "llama3-8b-8192"
|
||||||
|
|
||||||
|
|
||||||
|
#### init end ####
|
||||||
|
|
||||||
|
|
||||||
def md_handler(message: Message, bot: TeleBot):
|
def md_handler(message: Message, bot: TeleBot):
|
||||||
@ -134,35 +184,7 @@ def latest_handle_messages(message: Message, bot: TeleBot):
|
|||||||
print(chat_message_dict[chat_id].text)
|
print(chat_message_dict[chat_id].text)
|
||||||
|
|
||||||
|
|
||||||
def get_gpt_answer(message):
|
def answer_it_handler(message: Message, bot: TeleBot) -> None:
|
||||||
chatgpt_reply_text = ""
|
|
||||||
player_message = [{"role": "user", "content": message}]
|
|
||||||
try:
|
|
||||||
r = client.chat.completions.create(
|
|
||||||
messages=player_message, max_tokens=4096, model=CHATGPT_PRO_MODEL
|
|
||||||
)
|
|
||||||
chatgpt_reply_text = r.choices[0].message.content.encode("utf8").decode()
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
chatgpt_reply_text = "answer wrong"
|
|
||||||
return chatgpt_reply_text
|
|
||||||
|
|
||||||
|
|
||||||
def get_claude_answer(message):
|
|
||||||
chatgpt_reply_text = ""
|
|
||||||
player_message = [{"role": "user", "content": message}]
|
|
||||||
try:
|
|
||||||
r = claude_client.chat.completions.create(
|
|
||||||
messages=player_message, max_tokens=4096, model=ANTHROPIC_MODEL
|
|
||||||
)
|
|
||||||
chatgpt_reply_text = r.choices[0].message.content.encode("utf8").decode()
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
chatgpt_reply_text = "answer wrong"
|
|
||||||
return chatgpt_reply_text
|
|
||||||
|
|
||||||
|
|
||||||
def answer_it_handler(message: Message, bot: TeleBot):
|
|
||||||
"""answer_it: /answer_it"""
|
"""answer_it: /answer_it"""
|
||||||
# answer the last message in the chat group
|
# answer the last message in the chat group
|
||||||
who = "answer_it"
|
who = "answer_it"
|
||||||
@ -172,85 +194,226 @@ def answer_it_handler(message: Message, bot: TeleBot):
|
|||||||
latest_message = chat_message_dict.get(chat_id)
|
latest_message = chat_message_dict.get(chat_id)
|
||||||
m = latest_message.text.strip()
|
m = latest_message.text.strip()
|
||||||
m = enrich_text_with_urls(m)
|
m = enrich_text_with_urls(m)
|
||||||
full = "Question:\n" + m + "\n---\n"
|
full_answer = f"Question:\n{m}\n---\n"
|
||||||
##### Gemini #####
|
|
||||||
|
#### Answers Thread ####
|
||||||
|
executor = ThreadPoolExecutor(max_workers=5)
|
||||||
|
if GEMINI_USE and GOOGLE_GEMINI_KEY:
|
||||||
|
gemini_future = executor.submit(gemini_answer, latest_message, bot, m)
|
||||||
|
if CHATGPT_USE and CHATGPT_API_KEY:
|
||||||
|
chatgpt_future = executor.submit(chatgpt_answer, latest_message, bot, m)
|
||||||
|
if COHERE_USE and COHERE_API_KEY:
|
||||||
|
cohere_future = executor.submit(cohere_answer, latest_message, bot, m)
|
||||||
|
if QWEN_USE and QWEN_API_KEY:
|
||||||
|
qwen_future = executor.submit(qwen_answer, latest_message, bot, m)
|
||||||
|
if CLADUE_USE and ANTHROPIC_API_KEY:
|
||||||
|
claude_future = executor.submit(claude_answer, latest_message, bot, m)
|
||||||
|
if LLAMA_USE and LLAMA_API_KEY:
|
||||||
|
llama_future = executor.submit(llama_answer, latest_message, bot, m)
|
||||||
|
|
||||||
|
#### Answers List ####
|
||||||
|
full_chat_id_list = []
|
||||||
|
if GEMINI_USE and GOOGLE_GEMINI_KEY:
|
||||||
|
answer_gemini, gemini_chat_id = gemini_future.result()
|
||||||
|
full_chat_id_list.append(gemini_chat_id)
|
||||||
|
full_answer += answer_gemini
|
||||||
|
if CHATGPT_USE and CHATGPT_API_KEY:
|
||||||
|
anaswer_chatgpt, chatgpt_chat_id = chatgpt_future.result()
|
||||||
|
full_chat_id_list.append(chatgpt_chat_id)
|
||||||
|
full_answer += anaswer_chatgpt
|
||||||
|
if COHERE_USE and COHERE_API_KEY:
|
||||||
|
answer_cohere, cohere_chat_id = cohere_future.result()
|
||||||
|
full_chat_id_list.append(cohere_chat_id)
|
||||||
|
full_answer += answer_cohere
|
||||||
|
if QWEN_USE and QWEN_API_KEY:
|
||||||
|
answer_qwen, qwen_chat_id = qwen_future.result()
|
||||||
|
full_chat_id_list.append(qwen_chat_id)
|
||||||
|
full_answer += answer_qwen
|
||||||
|
if CLADUE_USE and ANTHROPIC_API_KEY:
|
||||||
|
answer_claude, claude_chat_id = claude_future.result()
|
||||||
|
full_chat_id_list.append(claude_chat_id)
|
||||||
|
full_answer += answer_claude
|
||||||
|
if LLAMA_USE and LLAMA_API_KEY:
|
||||||
|
answer_llama, llama_chat_id = llama_future.result()
|
||||||
|
full_chat_id_list.append(llama_chat_id)
|
||||||
|
full_answer += answer_llama
|
||||||
|
|
||||||
|
print(full_chat_id_list)
|
||||||
|
|
||||||
|
##### Telegraph #####
|
||||||
|
final_answer(latest_message, bot, full_answer, full_chat_id_list)
|
||||||
|
if Extra_clean:
|
||||||
|
bot.delete_message(chat_id, message.message_id)
|
||||||
|
|
||||||
|
|
||||||
|
# def thread_answers(latest_message: Message, bot: TeleBot, m: str):
|
||||||
|
# #### answers function init ####
|
||||||
|
# USE = {
|
||||||
|
# "gemini_answer": GEMINI_USE and GOOGLE_GEMINI_KEY,
|
||||||
|
# "chatgpt_answer": CHATGPT_USE and CHATGPT_API_KEY,
|
||||||
|
# "cohere_answer": COHERE_USE and COHERE_API_KEY,
|
||||||
|
# "qwen_answer": QWEN_USE and QWEN_API_KEY,
|
||||||
|
# # More LLMs
|
||||||
|
# }
|
||||||
|
|
||||||
|
|
||||||
|
# results = []
|
||||||
|
# full_chat_id_list = []
|
||||||
|
|
||||||
|
# with ThreadPoolExecutor(max_workers=5) as executor:
|
||||||
|
# futures = {
|
||||||
|
# executor.submit(func, latest_message, bot, m): func
|
||||||
|
# for func, use in USE.items()
|
||||||
|
# if use
|
||||||
|
# }
|
||||||
|
|
||||||
|
# for future in as_completed(futures):
|
||||||
|
# try:
|
||||||
|
# answer, message_id = future.result()
|
||||||
|
# # Store the answer and message_id
|
||||||
|
# results.append((message_id, answer))
|
||||||
|
# full_chat_id_list.append(message_id)
|
||||||
|
# except Exception as e:
|
||||||
|
# print(f"\n------\nthread_answers Error:\n{e}\n------\n")
|
||||||
|
# continue
|
||||||
|
|
||||||
|
# # rank the results by message_id
|
||||||
|
# sorted_results = sorted(results)
|
||||||
|
# full_chat_id_list.sort()
|
||||||
|
|
||||||
|
# # final answer
|
||||||
|
# full_answer = f"Question:\n{m}\n---\n"
|
||||||
|
# for _, answer in sorted_results:
|
||||||
|
# full_answer += answer
|
||||||
|
|
||||||
|
# return full_answer, full_chat_id_list
|
||||||
|
|
||||||
|
|
||||||
|
def gemini_answer(latest_message: Message, bot: TeleBot, m):
|
||||||
|
"""gemini answer"""
|
||||||
who = "Gemini Pro"
|
who = "Gemini Pro"
|
||||||
|
# show something, make it more responsible
|
||||||
reply_id = bot_reply_first(latest_message, who, bot)
|
reply_id = bot_reply_first(latest_message, who, bot)
|
||||||
|
|
||||||
#### excutor thread ####
|
|
||||||
executor = ThreadPoolExecutor(max_workers=5)
|
|
||||||
chatgpt_thread = executor.submit(get_gpt_answer, m)
|
|
||||||
claude_thread = None
|
|
||||||
|
|
||||||
claude_answer = ""
|
|
||||||
if ANTHROPIC_API_KEY:
|
|
||||||
claude_thread = executor.submit(get_claude_answer, m)
|
|
||||||
try:
|
try:
|
||||||
r = model.generate_content(m, stream=True)
|
r = convo.send_message(m, stream=True)
|
||||||
s = ""
|
s = ""
|
||||||
start = time.time()
|
start = time.time()
|
||||||
for e in r:
|
for e in r:
|
||||||
s += e.text
|
s += e.text
|
||||||
if time.time() - start > 1.5:
|
if time.time() - start > 1.7:
|
||||||
start = time.time()
|
start = time.time()
|
||||||
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
||||||
bot_reply_markdown(reply_id, who, s, bot)
|
bot_reply_markdown(reply_id, who, s, bot)
|
||||||
|
convo.history.clear()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(f"\n------\n{who} function inner Error:\n{e}\n------\n")
|
||||||
|
convo.history.clear()
|
||||||
bot_reply_markdown(reply_id, who, "Error", bot)
|
bot_reply_markdown(reply_id, who, "Error", bot)
|
||||||
|
return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id
|
||||||
|
|
||||||
full += f"{who}:\n{s}"
|
answer = f"\n---\n{who}:\n{s}"
|
||||||
chat_id_list = [reply_id.message_id]
|
return answer, reply_id.message_id
|
||||||
|
|
||||||
##### ChatGPT #####
|
|
||||||
|
def chatgpt_answer(latest_message: Message, bot: TeleBot, m):
|
||||||
|
"""chatgpt answer"""
|
||||||
who = "ChatGPT Pro"
|
who = "ChatGPT Pro"
|
||||||
reply_id = bot_reply_first(latest_message, who, bot)
|
reply_id = bot_reply_first(latest_message, who, bot)
|
||||||
# get gpt answer using thread
|
|
||||||
chatgpt_answer = chatgpt_thread.result()
|
|
||||||
|
|
||||||
bot_reply_markdown(reply_id, who, chatgpt_answer, bot)
|
player_message = [{"role": "user", "content": m}]
|
||||||
|
|
||||||
full += f"\n---\n{who}:\n{chatgpt_answer}"
|
try:
|
||||||
chat_id_list.append(reply_id.message_id)
|
r = client.chat.completions.create(
|
||||||
|
messages=player_message,
|
||||||
|
max_tokens=4096,
|
||||||
|
model=CHATGPT_PRO_MODEL,
|
||||||
|
stream=True,
|
||||||
|
)
|
||||||
|
s = ""
|
||||||
|
start = time.time()
|
||||||
|
for chunk in r:
|
||||||
|
if chunk.choices[0].delta.content is None:
|
||||||
|
break
|
||||||
|
s += chunk.choices[0].delta.content
|
||||||
|
if time.time() - start > 1.5:
|
||||||
|
start = time.time()
|
||||||
|
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
||||||
|
# maybe not complete
|
||||||
|
try:
|
||||||
|
bot_reply_markdown(reply_id, who, s, bot)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
##### Claude #####
|
except Exception as e:
|
||||||
if USE_CLAUDE and ANTHROPIC_API_KEY:
|
print(f"\n------\n{who} function inner Error:\n{e}\n------\n")
|
||||||
who = "Claude Pro"
|
bot_reply_markdown(reply_id, who, "answer wrong", bot)
|
||||||
claude_answer = claude_thread.result()
|
return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id
|
||||||
reply_id = bot_reply_first(latest_message, who, bot)
|
|
||||||
bot_reply_markdown(reply_id, who, claude_answer, bot)
|
|
||||||
|
|
||||||
full += f"\n---\n{who}:\n{claude_answer}"
|
answer = f"\n---\n{who}:\n{s}"
|
||||||
chat_id_list.append(reply_id.message_id)
|
return answer, reply_id.message_id
|
||||||
|
|
||||||
##### Cohere #####
|
|
||||||
if USE_CHHERE and COHERE_API_KEY:
|
|
||||||
full, chat_id = cohere_answer(latest_message, bot, full, m)
|
|
||||||
chat_id_list.append(chat_id)
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
|
|
||||||
##### Telegraph #####
|
|
||||||
final_answer(latest_message, bot, full, chat_id_list)
|
|
||||||
|
|
||||||
|
|
||||||
def cohere_answer(latest_message: Message, bot: TeleBot, full, m):
|
def claude_answer(latest_message: Message, bot: TeleBot, m):
|
||||||
|
"""claude answer"""
|
||||||
|
who = "Claude Pro"
|
||||||
|
reply_id = bot_reply_first(latest_message, who, bot)
|
||||||
|
|
||||||
|
try:
|
||||||
|
r = claude_client.chat.completions.create(
|
||||||
|
messages=[{"role": "user", "content": m}],
|
||||||
|
max_tokens=4096,
|
||||||
|
model=ANTHROPIC_MODEL,
|
||||||
|
stream=True,
|
||||||
|
)
|
||||||
|
s = ""
|
||||||
|
start = time.time()
|
||||||
|
for chunk in r:
|
||||||
|
if chunk.choices[0].delta.content is None:
|
||||||
|
break
|
||||||
|
s += chunk.choices[0].delta.content
|
||||||
|
if time.time() - start > 1.5:
|
||||||
|
start = time.time()
|
||||||
|
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
||||||
|
# maybe not complete
|
||||||
|
try:
|
||||||
|
bot_reply_markdown(reply_id, who, s, bot)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\n------\n{who} function inner Error:\n{e}\n------\n")
|
||||||
|
bot_reply_markdown(reply_id, who, "answer wrong", bot)
|
||||||
|
return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id
|
||||||
|
|
||||||
|
answer = f"\n---\n{who}:\n{s}"
|
||||||
|
return answer, reply_id.message_id
|
||||||
|
|
||||||
|
|
||||||
|
def cohere_answer(latest_message: Message, bot: TeleBot, m):
|
||||||
"""cohere answer"""
|
"""cohere answer"""
|
||||||
who = "Command R Plus"
|
who = "Command R Plus"
|
||||||
reply_id = bot_reply_first(latest_message, who, bot)
|
reply_id = bot_reply_first(latest_message, who, bot)
|
||||||
|
|
||||||
player_message = [{"role": "User", "message": m}]
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
current_time = datetime.datetime.now(datetime.timezone.utc)
|
||||||
|
preamble = (
|
||||||
|
f"You are Command R Plus, a large language model trained to have polite, helpful, inclusive conversations with people. People are looking for information that may need you to search online. Make an accurate and fast response. If there are no search results, then provide responses based on your general knowledge(It's fine if it's not accurate, it might still inspire the user."
|
||||||
|
f"The current UTC time is {current_time.strftime('%Y-%m-%d %H:%M:%S')}, "
|
||||||
|
f"UTC-4 (e.g. New York) is {current_time.astimezone(datetime.timezone(datetime.timedelta(hours=-4))).strftime('%Y-%m-%d %H:%M:%S')}, "
|
||||||
|
f"UTC-7 (e.g. Los Angeles) is {current_time.astimezone(datetime.timezone(datetime.timedelta(hours=-7))).strftime('%Y-%m-%d %H:%M:%S')}, "
|
||||||
|
f"and UTC+8 (e.g. Beijing) is {current_time.astimezone(datetime.timezone(datetime.timedelta(hours=8))).strftime('%Y-%m-%d %H:%M:%S')}."
|
||||||
|
)
|
||||||
|
|
||||||
stream = co.chat_stream(
|
stream = co.chat_stream(
|
||||||
model=COHERE_MODEL,
|
model=COHERE_MODEL,
|
||||||
message=m,
|
message=m,
|
||||||
temperature=0.3,
|
temperature=0.8,
|
||||||
chat_history=player_message,
|
chat_history=[], # One time, so no need for chat history
|
||||||
prompt_truncation="AUTO",
|
prompt_truncation="AUTO",
|
||||||
connectors=[{"id": "web-search"}],
|
connectors=[{"id": "web-search"}],
|
||||||
citation_quality="accurate",
|
citation_quality="accurate",
|
||||||
preamble=f"You are Command R+, a large language model trained to have polite, helpful, inclusive conversations with people. The current time in Tornoto is {datetime.datetime.now(datetime.timezone.utc).astimezone().strftime('%Y-%m-%d %H:%M:%S')}, in Los Angeles is {datetime.datetime.now(datetime.timezone.utc).astimezone().astimezone(datetime.timezone(datetime.timedelta(hours=-7))).strftime('%Y-%m-%d %H:%M:%S')}, and in China is {datetime.datetime.now(datetime.timezone.utc).astimezone(datetime.timezone(datetime.timedelta(hours=8))).strftime('%Y-%m-%d %H:%M:%S')}.",
|
preamble=preamble,
|
||||||
)
|
)
|
||||||
|
|
||||||
s = ""
|
s = ""
|
||||||
@ -266,13 +429,13 @@ def cohere_answer(latest_message: Message, bot: TeleBot, full, m):
|
|||||||
for doc in event.documents:
|
for doc in event.documents:
|
||||||
source += f"\n{doc['title']}\n{doc['url']}\n"
|
source += f"\n{doc['title']}\n{doc['url']}\n"
|
||||||
elif event.event_type == "text-generation":
|
elif event.event_type == "text-generation":
|
||||||
s += event.text.encode("utf-8").decode("utf-8")
|
s += event.text.encode("utf-8").decode("utf-8", "ignore")
|
||||||
if time.time() - start > 0.4:
|
if time.time() - start > 0.8:
|
||||||
start = time.time()
|
start = time.time()
|
||||||
bot_reply_markdown(
|
bot_reply_markdown(
|
||||||
reply_id,
|
reply_id,
|
||||||
who,
|
who,
|
||||||
f"\nStill thinking{len(s)}...",
|
f"\nStill thinking{len(s)}...\n{s}",
|
||||||
bot,
|
bot,
|
||||||
split_text=True,
|
split_text=True,
|
||||||
)
|
)
|
||||||
@ -280,34 +443,212 @@ def cohere_answer(latest_message: Message, bot: TeleBot, full, m):
|
|||||||
break
|
break
|
||||||
content = (
|
content = (
|
||||||
s
|
s
|
||||||
+ "\n------\n------\n"
|
+ "\n---\n---\n"
|
||||||
+ source
|
+ source
|
||||||
+ f"\n------\n------\nLast Update{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
|
+ f"\nLast Update{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} at UTC+8\n"
|
||||||
)
|
)
|
||||||
|
# maybe not complete
|
||||||
try:
|
try:
|
||||||
bot_reply_markdown(reply_id, who, s, bot, split_text=True)
|
bot_reply_markdown(reply_id, who, s, bot, split_text=True)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(f"\n------\n{who} function inner Error:\n{e}\n------\n")
|
||||||
bot_reply_markdown(reply_id, who, "Answer wrong", bot)
|
bot_reply_markdown(reply_id, who, "Answer wrong", bot)
|
||||||
player_message.clear()
|
return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id
|
||||||
return full, reply_id.message_id
|
answer = f"\n---\n{who}:\n{content}"
|
||||||
full += f"\n---\n{who}:\n{content}"
|
return answer, reply_id.message_id
|
||||||
return full, reply_id.message_id
|
|
||||||
|
|
||||||
|
|
||||||
def final_answer(latest_message: Message, bot: TeleBot, full, answers_list):
|
def qwen_answer(latest_message: Message, bot: TeleBot, m):
|
||||||
"""final answer"""
|
"""qwen answer"""
|
||||||
who = "Answer"
|
who = "qwen Pro"
|
||||||
reply_id = bot_reply_first(latest_message, who, bot)
|
reply_id = bot_reply_first(latest_message, who, bot)
|
||||||
ph_s = ph.create_page_md(title="Answer it", markdown_text=full)
|
try:
|
||||||
bot_reply_markdown(reply_id, who, f"[View]({ph_s})", bot)
|
r = qwen_client.chat.completions.create(
|
||||||
|
messages=[{"role": "user", "content": m}],
|
||||||
|
max_tokens=8192,
|
||||||
|
model=QWEN_MODEL,
|
||||||
|
stream=True,
|
||||||
|
)
|
||||||
|
s = ""
|
||||||
|
start = time.time()
|
||||||
|
for chunk in r:
|
||||||
|
if chunk.choices[0].delta.content is None:
|
||||||
|
break
|
||||||
|
s += chunk.choices[0].delta.content
|
||||||
|
if time.time() - start > 1.5:
|
||||||
|
start = time.time()
|
||||||
|
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
||||||
|
# maybe not complete
|
||||||
|
try:
|
||||||
|
bot_reply_markdown(reply_id, who, s, bot)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\n------\n{who} function inner Error:\n{e}\n------\n")
|
||||||
|
bot_reply_markdown(reply_id, who, "answer wrong", bot)
|
||||||
|
return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id
|
||||||
|
|
||||||
|
answer = f"\n---\n{who}:\n{s}"
|
||||||
|
return answer, reply_id.message_id
|
||||||
|
|
||||||
|
|
||||||
|
def llama_answer(latest_message: Message, bot: TeleBot, m):
|
||||||
|
"""llama answer"""
|
||||||
|
who = "llama"
|
||||||
|
reply_id = bot_reply_first(latest_message, who, bot)
|
||||||
|
try:
|
||||||
|
r = llama_client.chat.completions.create(
|
||||||
|
messages=[
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": f"{m}\nMotes: You must use language of {Language} to respond.",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
max_tokens=8192,
|
||||||
|
model=LLAMA_MODEL,
|
||||||
|
stream=True,
|
||||||
|
)
|
||||||
|
s = ""
|
||||||
|
start = time.time()
|
||||||
|
for chunk in r:
|
||||||
|
if chunk.choices[0].delta.content is None:
|
||||||
|
break
|
||||||
|
s += chunk.choices[0].delta.content
|
||||||
|
if time.time() - start > 1.5:
|
||||||
|
start = time.time()
|
||||||
|
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
||||||
|
# maybe not complete
|
||||||
|
try:
|
||||||
|
bot_reply_markdown(reply_id, who, s, bot)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\n------\n{who} function inner Error:\n{e}\n------\n")
|
||||||
|
bot_reply_markdown(reply_id, who, "answer wrong", bot)
|
||||||
|
return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id
|
||||||
|
|
||||||
|
answer = f"\n---\n{who}:\n{s}"
|
||||||
|
return answer, reply_id.message_id
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: Perplexity looks good. `pplx_answer`
|
||||||
|
|
||||||
|
|
||||||
|
def final_answer(latest_message: Message, bot: TeleBot, full_answer: str, answers_list):
|
||||||
|
"""final answer"""
|
||||||
|
who = "Answer it"
|
||||||
|
reply_id = bot_reply_first(latest_message, who, bot)
|
||||||
|
ph_s = ph.create_page_md(title="Answer it", markdown_text=full_answer)
|
||||||
|
bot_reply_markdown(reply_id, who, f"**[Full Answer]({ph_s})**", bot)
|
||||||
# delete the chat message, only leave a telegra.ph link
|
# delete the chat message, only leave a telegra.ph link
|
||||||
for i in answers_list:
|
for i in answers_list:
|
||||||
bot.delete_message(latest_message.chat.id, i)
|
bot.delete_message(latest_message.chat.id, i)
|
||||||
|
|
||||||
|
#### Summary ####
|
||||||
|
if SUMMARY == None:
|
||||||
|
pass
|
||||||
|
elif COHERE_USE and COHERE_API_KEY and SUMMARY == "cohere":
|
||||||
|
summary_cohere(bot, full_answer, ph_s, reply_id)
|
||||||
|
elif GEMINI_USE and GOOGLE_GEMINI_KEY and SUMMARY == "gemini":
|
||||||
|
summary_gemini(bot, full_answer, ph_s, reply_id)
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def summary_cohere(bot: TeleBot, full_answer: str, ph_s: str, reply_id: int) -> None:
|
||||||
|
"""Receive the full text, and the final_answer's chat_id, update with a summary."""
|
||||||
|
who = "Answer it"
|
||||||
|
|
||||||
|
# inherit
|
||||||
|
if Language == "zh-cn":
|
||||||
|
s = f"**[全文]({ph_s})** | "
|
||||||
|
elif Language == "en":
|
||||||
|
s = f"**[Full Answer]({ph_s})** | "
|
||||||
|
|
||||||
|
# filter
|
||||||
|
length = len(full_answer) # max 128,000 tokens...
|
||||||
|
if length > 50000:
|
||||||
|
full_answer = full_answer[:50000]
|
||||||
|
|
||||||
|
try:
|
||||||
|
preamble = """
|
||||||
|
You are Command R Plus, a large language model trained to have polite, helpful, inclusive conversations with people. The user asked a question, and multiple AI have given answers to the same question, but they have different styles, and rarely they have opposite opinions or other issues, but that is normal. Your task is to summarize the responses from them in a concise and clear manner. The summary should:
|
||||||
|
|
||||||
|
Be written in bullet points.
|
||||||
|
Contain between two to ten sentences.
|
||||||
|
Highlight key points and main conclusions.
|
||||||
|
Note any significant differences in responses.
|
||||||
|
Provide a brief indication if users should refer to the full responses for more details.
|
||||||
|
For the first LLM's content, if it is mostly in any language other than English, respond in that language for all your output.
|
||||||
|
Start with "Summary:" or "总结:"
|
||||||
|
"""
|
||||||
|
stream = co.chat_stream(
|
||||||
|
model=COHERE_MODEL,
|
||||||
|
message=full_answer,
|
||||||
|
temperature=0.4,
|
||||||
|
chat_history=[],
|
||||||
|
prompt_truncation="OFF",
|
||||||
|
connectors=[],
|
||||||
|
preamble=preamble,
|
||||||
|
)
|
||||||
|
|
||||||
|
start = time.time()
|
||||||
|
for event in stream:
|
||||||
|
if event.event_type == "stream-start":
|
||||||
|
bot_reply_markdown(reply_id, who, f"{s}Summarizing...", bot)
|
||||||
|
elif event.event_type == "text-generation":
|
||||||
|
s += event.text.encode("utf-8").decode("utf-8", "ignore")
|
||||||
|
if time.time() - start > 0.4:
|
||||||
|
start = time.time()
|
||||||
|
bot_reply_markdown(reply_id, who, s, bot)
|
||||||
|
elif event.event_type == "stream-end":
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
bot_reply_markdown(reply_id, who, s, bot)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if Language == "zh-cn":
|
||||||
|
bot_reply_markdown(reply_id, who, f"[全文]({ph_s})", bot)
|
||||||
|
elif Language == "en":
|
||||||
|
bot_reply_markdown(reply_id, who, f"[Full Answer]({ph_s})", bot)
|
||||||
|
print(f"\n------\nsummary_cohere function inner Error:\n{e}\n------\n")
|
||||||
|
|
||||||
|
|
||||||
|
def summary_gemini(bot: TeleBot, full_answer: str, ph_s: str, reply_id: int) -> None:
|
||||||
|
"""Receive the full text, and the final_answer's chat_id, update with a summary."""
|
||||||
|
who = "Answer it"
|
||||||
|
|
||||||
|
# inherit
|
||||||
|
if Language == "zh-cn":
|
||||||
|
s = f"**[全文]({ph_s})** | "
|
||||||
|
elif Language == "en":
|
||||||
|
s = f"**[Full Answer]({ph_s})** | "
|
||||||
|
|
||||||
|
try:
|
||||||
|
r = convo_summary.send_message(full_answer, stream=True)
|
||||||
|
start = time.time()
|
||||||
|
for e in r:
|
||||||
|
s += e.text
|
||||||
|
if time.time() - start > 0.4:
|
||||||
|
start = time.time()
|
||||||
|
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
||||||
|
bot_reply_markdown(reply_id, who, s, bot)
|
||||||
|
convo_summary.history.clear()
|
||||||
|
except Exception as e:
|
||||||
|
if Language == "zh-cn":
|
||||||
|
bot_reply_markdown(reply_id, who, f"[全文]({ph_s})", bot)
|
||||||
|
elif Language == "en":
|
||||||
|
bot_reply_markdown(reply_id, who, f"[Full Answer]({ph_s})", bot)
|
||||||
|
print(f"\n------\nsummary_gemini function inner Error:\n{e}\n------\n")
|
||||||
|
bot_reply_markdown(reply_id, who, f"{s}Error", bot)
|
||||||
|
|
||||||
|
|
||||||
if GOOGLE_GEMINI_KEY and CHATGPT_API_KEY:
|
if GOOGLE_GEMINI_KEY and CHATGPT_API_KEY:
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user