mirror of
https://github.com/cdryzun/tg_bot_collections.git
synced 2025-04-29 00:27:09 +08:00
Merge pull request #27 from F4ria/reuse-bot-reply-markdown
Reuse bot_reply_markdown when streaming output
This commit is contained in:
commit
ec3b42d620
@ -28,20 +28,22 @@ def bot_reply_first(message: Message, who: str, bot: TeleBot) -> Message:
|
||||
)
|
||||
|
||||
|
||||
def bot_reply_markdown(reply_id: Message, who: str, text: str, bot: TeleBot) -> None:
|
||||
def bot_reply_markdown(
|
||||
reply_id: Message, who: str, text: str, bot: TeleBot, split_text: bool = True
|
||||
) -> bool:
|
||||
"""
|
||||
reply the Markdown by take care of the message length.
|
||||
it will fallback to plain text in case of any failure
|
||||
"""
|
||||
try:
|
||||
if len(text.encode("utf-8")) <= BOT_MESSAGE_LENGTH:
|
||||
if len(text.encode("utf-8")) <= BOT_MESSAGE_LENGTH or not split_text:
|
||||
bot.edit_message_text(
|
||||
f"*{who}*:\n{telegramify_markdown.convert(text)}",
|
||||
chat_id=reply_id.chat.id,
|
||||
message_id=reply_id.message_id,
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
return
|
||||
return True
|
||||
|
||||
# Need a split of message
|
||||
msgs = smart_split(text, BOT_MESSAGE_LENGTH)
|
||||
@ -57,6 +59,8 @@ def bot_reply_markdown(reply_id: Message, who: str, text: str, bot: TeleBot) ->
|
||||
f"*{who}* \[{i+1}/{len(msgs)}\]:\n{telegramify_markdown.convert(msgs[i])}",
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(traceback.format_exc())
|
||||
# print(f"wrong markdown format: {text}")
|
||||
@ -65,6 +69,7 @@ def bot_reply_markdown(reply_id: Message, who: str, text: str, bot: TeleBot) ->
|
||||
chat_id=reply_id.chat.id,
|
||||
message_id=reply_id.message_id,
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def extract_prompt(message: str, bot_name: str) -> str:
|
||||
|
@ -46,8 +46,9 @@ def claude_handler(message: Message, bot: TeleBot) -> None:
|
||||
player_message.clear()
|
||||
return
|
||||
|
||||
who = "Claude"
|
||||
# show something, make it more responsible
|
||||
reply_id = bot_reply_first(message, "Claude", bot)
|
||||
reply_id = bot_reply_first(message, who, bot)
|
||||
|
||||
player_message.append({"role": "user", "content": m})
|
||||
# keep the last 5, every has two ask and answer.
|
||||
@ -64,7 +65,7 @@ def claude_handler(message: Message, bot: TeleBot) -> None:
|
||||
max_tokens=4096, messages=player_message, model=ANTHROPIC_MODEL
|
||||
)
|
||||
if not r.content:
|
||||
claude_reply_text = "Claude did not answer."
|
||||
claude_reply_text = f"{who} did not answer."
|
||||
player_message.pop()
|
||||
else:
|
||||
claude_reply_text = r.content[0].text
|
||||
@ -76,16 +77,12 @@ def claude_handler(message: Message, bot: TeleBot) -> None:
|
||||
)
|
||||
|
||||
except APITimeoutError:
|
||||
bot.reply_to(
|
||||
message,
|
||||
"claude answer:\n" + "claude answer timeout",
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
bot_reply_markdown(reply_id, who, "answer timeout", bot)
|
||||
# pop my user
|
||||
player_message.clear()
|
||||
return
|
||||
|
||||
bot_reply_markdown(reply_id, "Claude", claude_reply_text, bot)
|
||||
bot_reply_markdown(reply_id, who, claude_reply_text, bot)
|
||||
|
||||
|
||||
def claude_pro_handler(message: Message, bot: TeleBot) -> None:
|
||||
@ -107,6 +104,10 @@ def claude_pro_handler(message: Message, bot: TeleBot) -> None:
|
||||
player_message.clear()
|
||||
return
|
||||
|
||||
who = "Claude Pro"
|
||||
# show something, make it more responsible
|
||||
reply_id = bot_reply_first(message, who, bot)
|
||||
|
||||
player_message.append({"role": "user", "content": m})
|
||||
# keep the last 5, every has two ask and answer.
|
||||
if len(player_message) > 10:
|
||||
@ -125,45 +126,17 @@ def claude_pro_handler(message: Message, bot: TeleBot) -> None:
|
||||
)
|
||||
s = ""
|
||||
start = time.time()
|
||||
is_send = True
|
||||
reply_id = None
|
||||
for e in r:
|
||||
if e.type == "content_block_delta":
|
||||
s += e.delta.text
|
||||
if time.time() - start > 1.7:
|
||||
start = time.time()
|
||||
if is_send:
|
||||
reply_id = bot.reply_to(
|
||||
message,
|
||||
convert(s),
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
is_send = False
|
||||
else:
|
||||
try:
|
||||
# maybe the same message
|
||||
if not reply_id:
|
||||
continue
|
||||
bot.edit_message_text(
|
||||
message_id=reply_id.message_id,
|
||||
chat_id=reply_id.chat.id,
|
||||
text=convert(s),
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
except Exception as e:
|
||||
print(str(e))
|
||||
try:
|
||||
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
||||
|
||||
if not bot_reply_markdown(reply_id, who, s, bot):
|
||||
# maybe not complete
|
||||
# maybe the same message
|
||||
bot.edit_message_text(
|
||||
message_id=reply_id.message_id,
|
||||
chat_id=reply_id.chat.id,
|
||||
text=convert(s),
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
except Exception as e:
|
||||
player_message.clear()
|
||||
print(str(e))
|
||||
return
|
||||
|
||||
player_message.append(
|
||||
@ -174,11 +147,7 @@ def claude_pro_handler(message: Message, bot: TeleBot) -> None:
|
||||
)
|
||||
|
||||
except APITimeoutError:
|
||||
bot.reply_to(
|
||||
message,
|
||||
"claude answer:\n" + "claude answer timeout",
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
bot_reply_markdown(reply_id, who, "answer timeout", bot)
|
||||
# pop my user
|
||||
player_message.clear()
|
||||
return
|
||||
@ -186,11 +155,10 @@ def claude_pro_handler(message: Message, bot: TeleBot) -> None:
|
||||
|
||||
def claude_photo_handler(message: Message, bot: TeleBot) -> None:
|
||||
s = message.caption
|
||||
reply_message = bot.reply_to(
|
||||
message,
|
||||
"Generating claude vision answer please wait.",
|
||||
)
|
||||
prompt = s.strip()
|
||||
who = "Claude Vision"
|
||||
# show something, make it more responsible
|
||||
reply_id = bot_reply_first(message, who, bot)
|
||||
# get the high quaility picture.
|
||||
max_size_photo = max(message.photo, key=lambda p: p.file_size)
|
||||
file_path = bot.get_file(max_size_photo.file_id).file_path
|
||||
@ -223,17 +191,21 @@ def claude_photo_handler(message: Message, bot: TeleBot) -> None:
|
||||
},
|
||||
],
|
||||
model=ANTHROPIC_MODEL,
|
||||
stream=True,
|
||||
)
|
||||
bot.reply_to(message, "Claude vision answer:\n" + r.content[0].text)
|
||||
s = ""
|
||||
start = time.time()
|
||||
for e in r:
|
||||
if e.type == "content_block_delta":
|
||||
s += e.delta.text
|
||||
if time.time() - start > 1.7:
|
||||
start = time.time()
|
||||
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
||||
|
||||
bot_reply_markdown(reply_id, who, s, bot)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
bot.reply_to(
|
||||
message,
|
||||
"Claude vision answer:\n" + "claude vision answer wrong",
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
finally:
|
||||
bot.delete_message(reply_message.chat.id, reply_message.message_id)
|
||||
bot_reply_markdown(reply_id, who, "answer wrong", bot)
|
||||
|
||||
|
||||
def register(bot: TeleBot) -> None:
|
||||
|
@ -69,8 +69,9 @@ def gemini_handler(message: Message, bot: TeleBot) -> None:
|
||||
player.history.clear()
|
||||
return
|
||||
|
||||
who = "Gemini"
|
||||
# show something, make it more responsible
|
||||
reply_id = bot_reply_first(message, "Gemini", bot)
|
||||
reply_id = bot_reply_first(message, who, bot)
|
||||
|
||||
# keep the last 5, every has two ask and answer.
|
||||
if len(player.history) > 10:
|
||||
@ -89,14 +90,11 @@ def gemini_handler(message: Message, bot: TeleBot) -> None:
|
||||
gemini_reply_text = re.sub(r"\\n", "\n", gemini_reply_text)
|
||||
else:
|
||||
print("No meaningful text was extracted from the exception.")
|
||||
bot.reply_to(
|
||||
message,
|
||||
"Google gemini encountered an error while generating an answer. Please check the log.",
|
||||
)
|
||||
bot_reply_markdown(reply_id, who, "answer wrong", bot)
|
||||
return
|
||||
|
||||
# By default markdown
|
||||
bot_reply_markdown(reply_id, "Gemini", gemini_reply_text, bot)
|
||||
bot_reply_markdown(reply_id, who, gemini_reply_text, bot)
|
||||
|
||||
|
||||
def gemini_pro_handler(message: Message, bot: TeleBot) -> None:
|
||||
@ -117,8 +115,9 @@ def gemini_pro_handler(message: Message, bot: TeleBot) -> None:
|
||||
player.history.clear()
|
||||
return
|
||||
|
||||
who = "Gemini Pro"
|
||||
# show something, make it more responsible
|
||||
reply_id = bot_reply_first(message, "Geminipro", bot)
|
||||
reply_id = bot_reply_first(message, who, bot)
|
||||
|
||||
# keep the last 5, every has two ask and answer.
|
||||
if len(player.history) > 10:
|
||||
@ -130,51 +129,28 @@ def gemini_pro_handler(message: Message, bot: TeleBot) -> None:
|
||||
start = time.time()
|
||||
for e in r:
|
||||
s += e.text
|
||||
print(s)
|
||||
if time.time() - start > 1.7:
|
||||
start = time.time()
|
||||
try:
|
||||
# maybe the same message
|
||||
if not reply_id:
|
||||
continue
|
||||
bot.edit_message_text(
|
||||
message_id=reply_id.message_id,
|
||||
chat_id=reply_id.chat.id,
|
||||
text=convert(s),
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
except Exception as e:
|
||||
print(str(e))
|
||||
try:
|
||||
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
||||
|
||||
if not bot_reply_markdown(reply_id, who, s, bot):
|
||||
# maybe not complete
|
||||
# maybe the same message
|
||||
bot.edit_message_text(
|
||||
message_id=reply_id.message_id,
|
||||
chat_id=reply_id.chat.id,
|
||||
text=convert(s),
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
except Exception as e:
|
||||
player.history.clear()
|
||||
print(str(e))
|
||||
return
|
||||
except:
|
||||
bot.reply_to(
|
||||
message,
|
||||
"Geminipro answer:\n" + "geminipro answer timeout",
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
bot_reply_markdown(reply_id, who, "answer wrong", bot)
|
||||
player.history.clear()
|
||||
return
|
||||
|
||||
|
||||
def gemini_photo_handler(message: Message, bot: TeleBot) -> None:
|
||||
s = message.caption
|
||||
reply_message = bot.reply_to(
|
||||
message,
|
||||
"Generating google gemini vision answer please wait.",
|
||||
)
|
||||
prompt = s.strip()
|
||||
who = "Gemini Vision"
|
||||
# show something, make it more responsible
|
||||
reply_id = bot_reply_first(message, who, bot)
|
||||
# get the high quaility picture.
|
||||
max_size_photo = max(message.photo, key=lambda p: p.file_size)
|
||||
file_path = bot.get_file(max_size_photo.file_id).file_path
|
||||
@ -189,10 +165,19 @@ def gemini_photo_handler(message: Message, bot: TeleBot) -> None:
|
||||
"parts": [{"mime_type": "image/jpeg", "data": image_data}, {"text": prompt}]
|
||||
}
|
||||
try:
|
||||
response = model.generate_content(contents=contents)
|
||||
bot.reply_to(message, "Gemini vision answer:\n" + response.text)
|
||||
finally:
|
||||
bot.delete_message(reply_message.chat.id, reply_message.message_id)
|
||||
r = model.generate_content(contents=contents, stream=True)
|
||||
s = ""
|
||||
start = time.time()
|
||||
for e in r:
|
||||
s += e.text
|
||||
if time.time() - start > 1.7:
|
||||
start = time.time()
|
||||
bot_reply_markdown(reply_id, who, s, bot, split_text=False)
|
||||
|
||||
bot_reply_markdown(reply_id, who, s, bot)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
bot_reply_markdown(reply_id, who, "answer wrong", bot)
|
||||
|
||||
|
||||
def register(bot: TeleBot) -> None:
|
||||
|
@ -43,8 +43,9 @@ def yi_handler(message: Message, bot: TeleBot) -> None:
|
||||
player_message.clear()
|
||||
return
|
||||
|
||||
who = "Yi"
|
||||
# show something, make it more responsible
|
||||
reply_id = bot_reply_first(message, "Yi", bot)
|
||||
reply_id = bot_reply_first(message, who, bot)
|
||||
|
||||
player_message.append({"role": "user", "content": m})
|
||||
# keep the last 5, every has two ask and answer.
|
||||
@ -61,7 +62,7 @@ def yi_handler(message: Message, bot: TeleBot) -> None:
|
||||
|
||||
content = r.choices[0].message.content.encode("utf8").decode()
|
||||
if not content:
|
||||
yi_reply_text = "yi did not answer."
|
||||
yi_reply_text = f"{who} did not answer."
|
||||
player_message.pop()
|
||||
else:
|
||||
yi_reply_text = content
|
||||
@ -74,17 +75,13 @@ def yi_handler(message: Message, bot: TeleBot) -> None:
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
bot.reply_to(
|
||||
message,
|
||||
"yi answer:\n" + "yi answer timeout",
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
bot_reply_markdown(reply_id, who, "answer wrong", bot)
|
||||
# pop my user
|
||||
player_message.pop()
|
||||
return
|
||||
|
||||
# reply back as Markdown and fallback to plain text if failed.
|
||||
bot_reply_markdown(reply_id, "Yi", yi_reply_text, bot)
|
||||
bot_reply_markdown(reply_id, who, yi_reply_text, bot)
|
||||
|
||||
|
||||
def _image_to_data_uri(file_path):
|
||||
@ -95,11 +92,10 @@ def _image_to_data_uri(file_path):
|
||||
|
||||
def yi_photo_handler(message: Message, bot: TeleBot) -> None:
|
||||
s = message.caption
|
||||
bot.reply_to(
|
||||
message,
|
||||
"Generating yi vision answer please wait.",
|
||||
)
|
||||
prompt = s.strip()
|
||||
who = "Yi Vision"
|
||||
# show something, make it more responsible
|
||||
reply_id = bot_reply_first(message, who, bot)
|
||||
# get the high quaility picture.
|
||||
max_size_photo = max(message.photo, key=lambda p: p.file_size)
|
||||
file_path = bot.get_file(max_size_photo.file_id).file_path
|
||||
@ -136,14 +132,10 @@ def yi_photo_handler(message: Message, bot: TeleBot) -> None:
|
||||
).json()
|
||||
try:
|
||||
text = response["choices"][0]["message"]["content"].encode("utf8").decode()
|
||||
bot.reply_to(message, "yi vision answer:\n" + text)
|
||||
bot_reply_markdown(reply_id, who, text, bot)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
bot.reply_to(
|
||||
message,
|
||||
"yi vision answer:\n" + "yi vision answer wrong",
|
||||
parse_mode="MarkdownV2",
|
||||
)
|
||||
bot_reply_markdown(reply_id, who, "answer wrong", bot)
|
||||
|
||||
|
||||
def register(bot: TeleBot) -> None:
|
||||
|
Loading…
x
Reference in New Issue
Block a user