mirror of
https://github.com/cdryzun/tg_bot_collections.git
synced 2025-04-29 00:27:09 +08:00
feat: answer it!
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
This commit is contained in:
parent
785b75e994
commit
b9ed871d58
@ -100,7 +100,10 @@ def wrap_handler(handler: T, bot: TeleBot) -> T:
|
||||
def wrapper(message: Message, *args: Any, **kwargs: Any) -> None:
|
||||
try:
|
||||
m = ""
|
||||
if message.text is not None:
|
||||
if message.text == "/answer_it":
|
||||
# for answer_it no args
|
||||
return handler(message, *args, **kwargs)
|
||||
elif message.text is not None:
|
||||
m = message.text = extract_prompt(message.text, bot.get_me().username)
|
||||
elif message.caption is not None:
|
||||
m = message.caption = extract_prompt(
|
||||
@ -138,6 +141,9 @@ def load_handlers(bot: TeleBot, disable_commands: list[str]) -> None:
|
||||
all_commands: list[BotCommand] = []
|
||||
for handler in bot.message_handlers:
|
||||
help_text = getattr(handler["function"], "__doc__", "")
|
||||
# tricky ignore the latest_handle_messages
|
||||
if help_text and help_text == "ignore":
|
||||
continue
|
||||
# Add pre-processing and error handling to all callbacks
|
||||
handler["function"] = wrap_handler(handler["function"], bot)
|
||||
for command in handler["filters"].get("commands", []):
|
||||
|
@ -136,15 +136,14 @@ def chatgpt_pro_handler(message: Message, bot: TeleBot) -> None:
|
||||
if chunk.choices[0].delta.content is None:
|
||||
break
|
||||
s += chunk.choices[0].delta.content
|
||||
if time.time() - start > 1.7:
|
||||
if time.time() - start > 1.2:
|
||||
start = time.time()
|
||||
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
||||
|
||||
if not bot_reply_markdown(reply_id, who, s, bot):
|
||||
bot_reply_markdown(reply_id, who, s, bot, split_text=True)
|
||||
# maybe not complete
|
||||
# maybe the same message
|
||||
player_message.clear()
|
||||
return
|
||||
try:
|
||||
bot_reply_markdown(reply_id, who, s, bot, split_text=True)
|
||||
except:
|
||||
pass
|
||||
|
||||
player_message.append(
|
||||
{
|
||||
|
@ -247,6 +247,8 @@ def gemini_audio_handler(message: Message, bot: TeleBot) -> None:
|
||||
return
|
||||
|
||||
|
||||
if GOOGLE_GEMINI_KEY:
|
||||
|
||||
def register(bot: TeleBot) -> None:
|
||||
bot.register_message_handler(gemini_handler, commands=["gemini"], pass_bot=True)
|
||||
bot.register_message_handler(gemini_handler, regexp="^gemini:", pass_bot=True)
|
||||
|
@ -2,10 +2,59 @@
|
||||
|
||||
from telebot import TeleBot
|
||||
from telebot.types import Message
|
||||
from expiringdict import ExpiringDict
|
||||
from os import environ
|
||||
import time
|
||||
|
||||
from openai import OpenAI
|
||||
import google.generativeai as genai
|
||||
from google.generativeai import ChatSession
|
||||
from google.generativeai.types.generation_types import StopCandidateException
|
||||
from telebot import TeleBot
|
||||
from telebot.types import Message
|
||||
|
||||
from . import *
|
||||
|
||||
from telegramify_markdown.customize import markdown_symbol
|
||||
|
||||
chat_message_dict = ExpiringDict(max_len=100, max_age_seconds=300)
|
||||
|
||||
markdown_symbol.head_level_1 = "📌" # If you want, Customizing the head level 1 symbol
|
||||
markdown_symbol.link = "🔗" # If you want, Customizing the link symbol
|
||||
|
||||
GOOGLE_GEMINI_KEY = environ.get("GOOGLE_GEMINI_KEY")
|
||||
|
||||
genai.configure(api_key=GOOGLE_GEMINI_KEY)
|
||||
|
||||
generation_config = {
|
||||
"temperature": 0.7,
|
||||
"top_p": 1,
|
||||
"top_k": 1,
|
||||
"max_output_tokens": 8192,
|
||||
}
|
||||
|
||||
safety_settings = [
|
||||
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
||||
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
||||
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
|
||||
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
||||
]
|
||||
|
||||
model = genai.GenerativeModel(
|
||||
model_name="models/gemini-1.5-pro-latest",
|
||||
generation_config=generation_config,
|
||||
safety_settings=safety_settings,
|
||||
)
|
||||
convo = model.start_chat()
|
||||
|
||||
#### ChatGPT init ####
|
||||
CHATGPT_API_KEY = environ.get("OPENAI_API_KEY")
|
||||
CHATGPT_BASE_URL = environ.get("OPENAI_API_BASE") or "https://api.openai.com/v1"
|
||||
CHATGPT_PRO_MODEL = "gpt-4o-2024-05-13"
|
||||
|
||||
|
||||
client = OpenAI(api_key=CHATGPT_API_KEY, base_url=CHATGPT_BASE_URL, timeout=20)
|
||||
|
||||
|
||||
def md_handler(message: Message, bot: TeleBot):
|
||||
"""pretty md: /md <address>"""
|
||||
@ -14,6 +63,83 @@ def md_handler(message: Message, bot: TeleBot):
|
||||
bot_reply_markdown(reply_id, who, message.text.strip(), bot)
|
||||
|
||||
|
||||
def latest_handle_messages(message: Message, bot: TeleBot):
|
||||
"""ignore"""
|
||||
chat_id = message.chat.id
|
||||
chat_message_dict[chat_id] = message
|
||||
|
||||
|
||||
def answer_it_handler(message: Message, bot: TeleBot):
|
||||
"""answer_it: /answer_it"""
|
||||
# answer the last message in the chat group
|
||||
who = "answer_it"
|
||||
# get the last message in the chat
|
||||
|
||||
chat_id = message.chat.id
|
||||
latest_message = chat_message_dict.get(chat_id)
|
||||
m = latest_message.text.strip()
|
||||
m = enrich_text_with_urls(m)
|
||||
##### Gemini #####
|
||||
who = "Gemini Pro"
|
||||
# show something, make it more responsible
|
||||
reply_id = bot_reply_first(latest_message, who, bot)
|
||||
|
||||
try:
|
||||
r = convo.send_message(m, stream=True)
|
||||
s = ""
|
||||
start = time.time()
|
||||
for e in r:
|
||||
s += e.text
|
||||
if time.time() - start > 1.7:
|
||||
start = time.time()
|
||||
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
||||
|
||||
convo.history.clear()
|
||||
except Exception as e:
|
||||
print(e)
|
||||
convo.history.clear()
|
||||
bot_reply_markdown(reply_id, who, "Error", bot)
|
||||
|
||||
##### ChatGPT #####
|
||||
who = "ChatGPT Pro"
|
||||
reply_id = bot_reply_first(latest_message, who, bot)
|
||||
|
||||
player_message = [{"role": "user", "content": m}]
|
||||
|
||||
try:
|
||||
r = client.chat.completions.create(
|
||||
messages=player_message,
|
||||
max_tokens=4096,
|
||||
model=CHATGPT_PRO_MODEL,
|
||||
stream=True,
|
||||
)
|
||||
s = ""
|
||||
start = time.time()
|
||||
for chunk in r:
|
||||
if chunk.choices[0].delta.content is None:
|
||||
break
|
||||
s += chunk.choices[0].delta.content
|
||||
if time.time() - start > 1.2:
|
||||
start = time.time()
|
||||
bot_reply_markdown(reply_id, who, s, bot, split_text=True)
|
||||
# maybe not complete
|
||||
try:
|
||||
bot_reply_markdown(reply_id, who, s, bot, split_text=True)
|
||||
except:
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
bot_reply_markdown(reply_id, who, "answer wrong", bot)
|
||||
return
|
||||
|
||||
|
||||
if GOOGLE_GEMINI_KEY and CHATGPT_API_KEY:
|
||||
|
||||
def register(bot: TeleBot) -> None:
|
||||
bot.register_message_handler(md_handler, commands=["md"], pass_bot=True)
|
||||
bot.register_message_handler(
|
||||
answer_it_handler, commands=["answer_it"], pass_bot=True
|
||||
)
|
||||
bot.register_message_handler(md_handler, regexp="^md:", pass_bot=True)
|
||||
bot.register_message_handler(latest_handle_messages, pass_bot=True)
|
||||
|
Loading…
x
Reference in New Issue
Block a user