From 0b60ae2fbe2537f12b560bc0db86ba839e3679a9 Mon Sep 17 00:00:00 2001 From: Frost Ming Date: Tue, 8 Jul 2025 11:41:57 +0800 Subject: [PATCH] feat: add summary and search commands (#54) * feat: add summary and search commands Signed-off-by: Frost Ming * fix formats Signed-off-by: Frost Ming * fix: clean up Signed-off-by: Frost Ming --- .env.example | 16 +- .gitignore | 2 + config.py | 35 ++ handlers/__init__.py | 485 +------------- handlers/_telegraph.py | 252 ++++++++ handlers/{tts.py => _tts.py} | 0 handlers/_utils.py | 209 +++++++ handlers/{yi.py => _yi.py} | 8 +- handlers/chatgpt.py | 48 +- handlers/claude.py | 9 +- handlers/cohere.py | 23 +- handlers/dify.py | 12 +- handlers/fake_liuneng.py | 9 +- handlers/gemini.py | 19 +- handlers/github.py | 21 +- handlers/kling.py | 24 +- handlers/llama.py | 19 +- handlers/map.py | 7 +- handlers/qwen.py | 19 +- handlers/sd.py | 61 +- handlers/summary/__init__.py | 139 ++++ handlers/summary/__main__.py | 49 ++ handlers/summary/messages.py | 164 +++++ handlers/summary/utils.py | 48 ++ handlers/tweet.py | 4 +- handlers/useful.py | 1147 ---------------------------------- pdm.lock | 928 ++++++--------------------- pyproject.toml | 12 +- requirements.txt | 49 +- setup.sh | 20 +- tg.py | 29 +- 31 files changed, 1279 insertions(+), 2588 deletions(-) create mode 100644 config.py create mode 100644 handlers/_telegraph.py rename handlers/{tts.py => _tts.py} (100%) create mode 100644 handlers/_utils.py rename handlers/{yi.py => _yi.py} (99%) create mode 100644 handlers/summary/__init__.py create mode 100644 handlers/summary/__main__.py create mode 100644 handlers/summary/messages.py create mode 100644 handlers/summary/utils.py delete mode 100644 handlers/useful.py diff --git a/.env.example b/.env.example index 373cdcc..eaf7273 100644 --- a/.env.example +++ b/.env.example @@ -1,8 +1,8 @@ -Google_Gemini_API_Key="your_gemini_api_key" -Telegram_Bot_Token="your_telegram_bot_token" -Anthropic_API_Key="your_anthropic_api_key" -Openai_API_Key="your_openai_api_key" -Yi_API_Key="your_yi_api_key" -Yi_Base_Url="your_yi_base_url" -Python_Bin_Path="" -Python_Venv_Path="venv" +GOOGLE_GEMINI_API_KEY="your_gemini_api_key" +TELEGRAM_BOT_TOKEN="your_telegram_bot_token" +ANTHROPIC_API_KEY="your_anthropic_api_key" +OPENAI_API_KEY="your_openai_api_key" +YI_API_KEY="your_yi_api_key" +YI_BASE_URL="your_yi_base_url" +PYTHON_BIN_PATH="" +PYTHON_VENV_PATH="venv" diff --git a/.gitignore b/.gitignore index d4d2196..3fccb7f 100644 --- a/.gitignore +++ b/.gitignore @@ -170,3 +170,5 @@ nohup.out .pdm-python *.wav token_key.json +messages.db +*.session diff --git a/config.py b/config.py new file mode 100644 index 0000000..89607ad --- /dev/null +++ b/config.py @@ -0,0 +1,35 @@ +from functools import cached_property + +import openai +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class Settings(BaseSettings): + model_config = SettingsConfigDict(env_file=".env") + + telegram_bot_token: str + timezone: str = "Asia/Shanghai" + + openai_api_key: str | None = None + openai_model: str = "gpt-4o-mini" + openai_base_url: str = "https://api.openai.com/v1" + + google_gemini_api_key: str | None = None + anthropic_api_key: str | None = None + telegra_ph_token: str | None = None + + @cached_property + def openai_client(self) -> openai.OpenAI: + return openai.OpenAI( + api_key=self.openai_api_key, + base_url=self.openai_base_url, + ) + + @cached_property + def telegraph_client(self): + from handlers._telegraph import TelegraphAPI + + return TelegraphAPI(self.telegra_ph_token) + + +settings = Settings() # type: ignore diff --git a/handlers/__init__.py b/handlers/__init__.py index 0c1ad02..69a2011 100644 --- a/handlers/__init__.py +++ b/handlers/__init__.py @@ -1,173 +1,22 @@ -from __future__ import annotations - -import base64 import importlib -import re -import traceback -from functools import update_wrapper -from mimetypes import guess_type from pathlib import Path -from typing import Any, Callable, TypeVar -import requests from telebot import TeleBot -from telebot.types import BotCommand, Message -from telebot.util import smart_split -import telegramify_markdown -from telegramify_markdown.customize import markdown_symbol -from urlextract import URLExtract -from expiringdict import ExpiringDict +from telebot.types import BotCommand -markdown_symbol.head_level_1 = "๐Ÿ“Œ" # If you want, Customizing the head level 1 symbol -markdown_symbol.link = "๐Ÿ”—" # If you want, Customizing the link symbol - -T = TypeVar("T", bound=Callable) +from ._utils import logger, wrap_handler DEFAULT_LOAD_PRIORITY = 10 -BOT_MESSAGE_LENGTH = 4000 -REPLY_MESSAGE_CACHE = ExpiringDict(max_len=1000, max_age_seconds=600) - - -def bot_reply_first(message: Message, who: str, bot: TeleBot) -> Message: - """Create the first reply message which make user feel the bot is working.""" - return bot.reply_to( - message, f"*{who}* is _thinking_ \.\.\.", parse_mode="MarkdownV2" - ) - - -def bot_reply_markdown( - reply_id: Message, - who: str, - text: str, - bot: TeleBot, - split_text: bool = True, - disable_web_page_preview: bool = False, -) -> bool: - """ - reply the Markdown by take care of the message length. - it will fallback to plain text in case of any failure - """ - try: - cache_key = f"{reply_id.chat.id}_{reply_id.message_id}" - if cache_key in REPLY_MESSAGE_CACHE and REPLY_MESSAGE_CACHE[cache_key] == text: - print(f"Skipping duplicate message for {cache_key}") - return True - REPLY_MESSAGE_CACHE[cache_key] = text - if len(text.encode("utf-8")) <= BOT_MESSAGE_LENGTH or not split_text: - bot.edit_message_text( - f"*{who}*:\n{telegramify_markdown.convert(text)}", - chat_id=reply_id.chat.id, - message_id=reply_id.message_id, - parse_mode="MarkdownV2", - disable_web_page_preview=disable_web_page_preview, - ) - return True - - # Need a split of message - msgs = smart_split(text, BOT_MESSAGE_LENGTH) - bot.edit_message_text( - f"*{who}* \[1/{len(msgs)}\]:\n{telegramify_markdown.convert(msgs[0])}", - chat_id=reply_id.chat.id, - message_id=reply_id.message_id, - parse_mode="MarkdownV2", - disable_web_page_preview=disable_web_page_preview, - ) - for i in range(1, len(msgs)): - bot.reply_to( - reply_id.reply_to_message, - f"*{who}* \[{i+1}/{len(msgs)}\]:\n{telegramify_markdown.convert(msgs[i])}", - parse_mode="MarkdownV2", - ) - - return True - except Exception as e: - print(traceback.format_exc()) - # print(f"wrong markdown format: {text}") - bot.edit_message_text( - f"*{who}*:\n{text}", - chat_id=reply_id.chat.id, - message_id=reply_id.message_id, - disable_web_page_preview=disable_web_page_preview, - ) - return False - - -def extract_prompt(message: str, bot_name: str) -> str: - """ - This function filters messages for prompts. - - Returns: - str: If it is not a prompt, return None. Otherwise, return the trimmed prefix of the actual prompt. - """ - # remove '@bot_name' as it is considered part of the command when in a group chat. - message = re.sub(re.escape(f"@{bot_name}"), "", message).strip() - # add a whitespace after the first colon as we separate the prompt from the command by the first whitespace. - message = re.sub(":", ": ", message, count=1).strip() - try: - left, message = message.split(maxsplit=1) - except ValueError: - return "" - if ":" not in left: - # the replacement happens in the right part, restore it. - message = message.replace(": ", ":", 1) - return message.strip() - - -def remove_prompt_prefix(message: str) -> str: - """ - Remove "/cmd" or "/cmd@bot_name" or "cmd:" - """ - message += " " - # Explanation of the regex pattern: - # ^ - Match the start of the string - # ( - Start of the group - # / - Literal forward slash - # [a-zA-Z] - Any letter (start of the command) - # [a-zA-Z0-9_]* - Any number of letters, digits, or underscores - # (@\w+)? - Optionally match @ followed by one or more word characters (for bot name) - # \s - A single whitespace character (space or newline) - # | - OR - # [a-zA-Z] - Any letter (start of the command) - # [a-zA-Z0-9_]* - Any number of letters, digits, or underscores - # :\s - Colon followed by a single whitespace character - # ) - End of the group - pattern = r"^(/[a-zA-Z][a-zA-Z0-9_]*(@\w+)?\s|[a-zA-Z][a-zA-Z0-9_]*:\s)" - - return re.sub(pattern, "", message).strip() - - -def wrap_handler(handler: T, bot: TeleBot) -> T: - def wrapper(message: Message, *args: Any, **kwargs: Any) -> None: - try: - m = "" - - if message.text and message.text.find("answer_it") != -1: - # for answer_it no args - return handler(message, *args, **kwargs) - elif message.text is not None: - m = message.text = extract_prompt(message.text, bot.get_me().username) - elif message.caption is not None: - m = message.caption = extract_prompt( - message.caption, bot.get_me().username - ) - elif message.location and message.location.latitude is not None: - # for location map handler just return - return handler(message, *args, **kwargs) - if not m: - bot.reply_to(message, "Please provide info after start words.") - return - return handler(message, *args, **kwargs) - except Exception as e: - traceback.print_exc() - # handle more here - if str(e).find("RECITATION") > 0: - bot.reply_to(message, "Your prompt `RECITATION` please check the log") - else: - bot.reply_to(message, "Something wrong, please check the log") - - return update_wrapper(wrapper, handler) +def list_available_commands() -> list[str]: + commands = [] + this_path = Path(__file__).parent + for child in this_path.iterdir(): + if child.name.startswith("_"): + continue + commands.append(child.stem) + return commands def load_handlers(bot: TeleBot, disable_commands: list[str]) -> None: @@ -183,16 +32,13 @@ def load_handlers(bot: TeleBot, disable_commands: list[str]) -> None: modules_with_priority.sort(key=lambda x: x[-1]) for module, name, priority in modules_with_priority: if hasattr(module, "register"): - print(f"Loading {name} handlers with priority {priority}.") + logger.debug(f"Loading {name} handlers with priority {priority}.") module.register(bot) - print("Loading handlers done.") + logger.info("Loading handlers done.") all_commands: list[BotCommand] = [] for handler in bot.message_handlers: help_text = getattr(handler["function"], "__doc__", "") - # tricky ignore the latest_handle_messages - if help_text and help_text == "ignore": - continue # Add pre-processing and error handling to all callbacks handler["function"] = wrap_handler(handler["function"], bot) for command in handler["filters"].get("commands", []): @@ -200,309 +46,4 @@ def load_handlers(bot: TeleBot, disable_commands: list[str]) -> None: if all_commands: bot.set_my_commands(all_commands) - print("Setting commands done.") - - -def list_available_commands() -> list[str]: - commands = [] - this_path = Path(__file__).parent - for child in this_path.iterdir(): - if child.name.startswith("_"): - continue - commands.append(child.stem) - return commands - - -def extract_url_from_text(text: str) -> list[str]: - extractor = URLExtract() - urls = extractor.find_urls(text) - return urls - - -def get_text_from_jina_reader(url: str): - try: - r = requests.get(f"https://r.jina.ai/{url}") - return r.text - except Exception as e: - print(e) - return None - - -def enrich_text_with_urls(text: str) -> str: - urls = extract_url_from_text(text) - for u in urls: - try: - url_text = get_text_from_jina_reader(u) - url_text = f"\n```markdown\n{url_text}\n```\n" - text = text.replace(u, url_text) - except Exception as e: - # just ignore the error - pass - - return text - - -def image_to_data_uri(file_path): - content_type = guess_type(file_path)[0] - with open(file_path, "rb") as image_file: - encoded_image = base64.b64encode(image_file.read()).decode("utf-8") - return f"data:{content_type};base64,{encoded_image}" - - -import json -import requests -import os -from bs4 import BeautifulSoup -import markdown - - -class TelegraphAPI: - def __init__( - self, - access_token=None, - short_name="tg_bot_collections", - author_name="Telegram Bot Collections", - author_url=None, - ): - self.access_token = ( - access_token - if access_token - else self._create_ph_account(short_name, author_name, author_url) - ) - self.base_url = "https://api.telegra.ph" - - # Get account info on initialization - account_info = self.get_account_info() - self.short_name = account_info.get("short_name") - self.author_name = account_info.get("author_name") - self.author_url = account_info.get("author_url") - - def _create_ph_account(self, short_name, author_name, author_url): - Store_Token = False - TELEGRAPH_API_URL = "https://api.telegra.ph/createAccount" - TOKEN_FILE = "token_key.json" - - # Try to load existing token information - try: - with open(TOKEN_FILE, "r") as f: - tokens = json.load(f) - if "TELEGRA_PH_TOKEN" in tokens and tokens["TELEGRA_PH_TOKEN"] != "example": - return tokens["TELEGRA_PH_TOKEN"] - except FileNotFoundError: - tokens = {} - - # If no existing valid token in TOKEN_FILE, create a new account - data = { - "short_name": short_name, - "author_name": author_name, - "author_url": author_url, - } - - # Make API request - response = requests.post(TELEGRAPH_API_URL, data=data) - response.raise_for_status() - - account = response.json() - access_token = account["result"]["access_token"] - - # Update the token in the dictionary - tokens["TELEGRA_PH_TOKEN"] = access_token - - # Store the updated tokens - if Store_Token: - with open(TOKEN_FILE, "w") as f: - json.dump(tokens, f, indent=4) - else: - print(f"Token not stored to file, but here is your token:\n{access_token}") - - # Store it to the environment variable - os.environ["TELEGRA_PH_TOKEN"] = access_token - - return access_token - - def create_page( - self, title, content, author_name=None, author_url=None, return_content=False - ): - url = f"{self.base_url}/createPage" - data = { - "access_token": self.access_token, - "title": title, - "content": json.dumps(content), - "return_content": return_content, - "author_name": author_name if author_name else self.author_name, - "author_url": author_url if author_url else self.author_url, - } - - # Max 65,536 characters/64KB. - if len(json.dumps(content)) > 65536: - content = content[:64000] - data["content"] = json.dumps(content) - - try: - response = requests.post(url, data=data) - response.raise_for_status() - response = response.json() - page_url = response["result"]["url"] - return page_url - except: - return "https://telegra.ph/api" - - def get_account_info(self): - url = f'{self.base_url}/getAccountInfo?access_token={self.access_token}&fields=["short_name","author_name","author_url","auth_url"]' - response = requests.get(url) - - if response.status_code == 200: - return response.json()["result"] - else: - print(f"Fail getting telegra.ph token info: {response.status_code}") - return None - - def edit_page( - self, - path, - title, - content, - author_name=None, - author_url=None, - return_content=False, - ): - url = f"{self.base_url}/editPage" - data = { - "access_token": self.access_token, - "path": path, - "title": title, - "content": json.dumps(content), - "return_content": return_content, - "author_name": author_name if author_name else self.author_name, - "author_url": author_url if author_url else self.author_url, - } - - response = requests.post(url, data=data) - response.raise_for_status() - response = response.json() - - page_url = response["result"]["url"] - return page_url - - def get_page(self, path): - url = f"{self.base_url}/getPage/{path}?return_content=true" - response = requests.get(url) - response.raise_for_status() - return response.json()["result"]["content"] - - def create_page_md( - self, - title, - markdown_text, - author_name=None, - author_url=None, - return_content=False, - ): - content = self._md_to_dom(markdown_text) - return self.create_page(title, content, author_name, author_url, return_content) - - def edit_page_md( - self, - path, - title, - markdown_text, - author_name=None, - author_url=None, - return_content=False, - ): - content = self._md_to_dom(markdown_text) - return self.edit_page( - path, title, content, author_name, author_url, return_content - ) - - def authorize_browser(self): - url = f'{self.base_url}/getAccountInfo?access_token={self.access_token}&fields=["auth_url"]' - response = requests.get(url) - response.raise_for_status() - return response.json()["result"]["auth_url"] - - def _md_to_dom(self, markdown_text): - html = markdown.markdown( - markdown_text, - extensions=["markdown.extensions.extra", "markdown.extensions.sane_lists"], - ) - - soup = BeautifulSoup(html, "html.parser") - - def parse_element(element): - tag_dict = {"tag": element.name} - if element.name in ["h1", "h2", "h3", "h4", "h5", "h6"]: - if element.name == "h1": - tag_dict["tag"] = "h3" - elif element.name == "h2": - tag_dict["tag"] = "h4" - else: - tag_dict["tag"] = "p" - tag_dict["children"] = [ - {"tag": "strong", "children": element.contents} - ] - - if element.attrs: - tag_dict["attrs"] = element.attrs - if element.contents: - children = [] - for child in element.contents: - if isinstance(child, str): - children.append(child.strip()) - else: - children.append(parse_element(child)) - tag_dict["children"] = children - else: - if element.attrs: - tag_dict["attrs"] = element.attrs - if element.contents: - children = [] - for child in element.contents: - if isinstance(child, str): - children.append(child.strip()) - else: - children.append(parse_element(child)) - if children: - tag_dict["children"] = children - return tag_dict - - new_dom = [] - for element in soup.contents: - if isinstance(element, str) and not element.strip(): - continue - elif isinstance(element, str): - new_dom.append({"tag": "text", "content": element.strip()}) - else: - new_dom.append(parse_element(element)) - - return new_dom - - def upload_image(self, file_name: str) -> str: - base_url = "https://telegra.ph" - upload_url = f"{base_url}/upload" - - try: - content_type = guess_type(file_name)[0] - with open(file_name, "rb") as f: - response = requests.post( - upload_url, files={"file": ("blob", f, content_type)} - ) - response.raise_for_status() - # [{'src': '/file/xx.jpg'}] - response = response.json() - image_url = f"{base_url}{response[0]['src']}" - return image_url - except Exception as e: - print(f"upload image: {e}") - return "https://telegra.ph/api" - - -# `import *` will give you these -__all__ = [ - "bot_reply_first", - "bot_reply_markdown", - "remove_prompt_prefix", - "enrich_text_with_urls", - "image_to_data_uri", - "TelegraphAPI", -] + logger.info("Setting commands done.") diff --git a/handlers/_telegraph.py b/handlers/_telegraph.py new file mode 100644 index 0000000..c72cc78 --- /dev/null +++ b/handlers/_telegraph.py @@ -0,0 +1,252 @@ +import json +import os +from mimetypes import guess_type + +import markdown +import requests +from bs4 import BeautifulSoup + +from ._utils import logger + + +class TelegraphAPI: + def __init__( + self, + access_token=None, + short_name="tg_bot_collections", + author_name="Telegram Bot Collections", + author_url=None, + ): + self.access_token = ( + access_token + if access_token + else self._create_ph_account(short_name, author_name, author_url) + ) + self.base_url = "https://api.telegra.ph" + + # Get account info on initialization + account_info = self.get_account_info() + self.short_name = account_info.get("short_name") + self.author_name = account_info.get("author_name") + self.author_url = account_info.get("author_url") + + def _create_ph_account(self, short_name, author_name, author_url): + Store_Token = False + TELEGRAPH_API_URL = "https://api.telegra.ph/createAccount" + TOKEN_FILE = "token_key.json" + + # Try to load existing token information + try: + with open(TOKEN_FILE, "r") as f: + tokens = json.load(f) + if "TELEGRA_PH_TOKEN" in tokens and tokens["TELEGRA_PH_TOKEN"] != "example": + return tokens["TELEGRA_PH_TOKEN"] + except FileNotFoundError: + tokens = {} + + # If no existing valid token in TOKEN_FILE, create a new account + data = { + "short_name": short_name, + "author_name": author_name, + "author_url": author_url, + } + + # Make API request + response = requests.post(TELEGRAPH_API_URL, data=data) + response.raise_for_status() + + account = response.json() + access_token = account["result"]["access_token"] + + # Update the token in the dictionary + tokens["TELEGRA_PH_TOKEN"] = access_token + + # Store the updated tokens + if Store_Token: + with open(TOKEN_FILE, "w") as f: + json.dump(tokens, f, indent=4) + else: + logger.info( + f"Token not stored to file, but here is your token:\n{access_token}" + ) + + # Store it to the environment variable + os.environ["TELEGRA_PH_TOKEN"] = access_token + + return access_token + + def create_page( + self, title, content, author_name=None, author_url=None, return_content=False + ): + url = f"{self.base_url}/createPage" + data = { + "access_token": self.access_token, + "title": title, + "content": json.dumps(content), + "return_content": return_content, + "author_name": author_name if author_name else self.author_name, + "author_url": author_url if author_url else self.author_url, + } + + # Max 65,536 characters/64KB. + if len(json.dumps(content)) > 65536: + content = content[:64000] + data["content"] = json.dumps(content) + + try: + response = requests.post(url, data=data) + response.raise_for_status() + response = response.json() + page_url = response["result"]["url"] + return page_url + except requests.exceptions.RequestException: + return "https://telegra.ph/api" + + def get_account_info(self): + url = f'{self.base_url}/getAccountInfo?access_token={self.access_token}&fields=["short_name","author_name","author_url","auth_url"]' + response = requests.get(url) + + if response.status_code == 200: + return response.json()["result"] + else: + logger.info(f"Fail getting telegra.ph token info: {response.status_code}") + return None + + def edit_page( + self, + path, + title, + content, + author_name=None, + author_url=None, + return_content=False, + ): + url = f"{self.base_url}/editPage" + data = { + "access_token": self.access_token, + "path": path, + "title": title, + "content": json.dumps(content), + "return_content": return_content, + "author_name": author_name if author_name else self.author_name, + "author_url": author_url if author_url else self.author_url, + } + + response = requests.post(url, data=data) + response.raise_for_status() + response = response.json() + + page_url = response["result"]["url"] + return page_url + + def get_page(self, path): + url = f"{self.base_url}/getPage/{path}?return_content=true" + response = requests.get(url) + response.raise_for_status() + return response.json()["result"]["content"] + + def create_page_md( + self, + title, + markdown_text, + author_name=None, + author_url=None, + return_content=False, + ): + content = self._md_to_dom(markdown_text) + return self.create_page(title, content, author_name, author_url, return_content) + + def edit_page_md( + self, + path, + title, + markdown_text, + author_name=None, + author_url=None, + return_content=False, + ): + content = self._md_to_dom(markdown_text) + return self.edit_page( + path, title, content, author_name, author_url, return_content + ) + + def authorize_browser(self): + url = f'{self.base_url}/getAccountInfo?access_token={self.access_token}&fields=["auth_url"]' + response = requests.get(url) + response.raise_for_status() + return response.json()["result"]["auth_url"] + + def _md_to_dom(self, markdown_text): + html = markdown.markdown( + markdown_text, + extensions=["markdown.extensions.extra", "markdown.extensions.sane_lists"], + ) + + soup = BeautifulSoup(html, "html.parser") + + def parse_element(element): + tag_dict = {"tag": element.name} + if element.name in ["h1", "h2", "h3", "h4", "h5", "h6"]: + if element.name == "h1": + tag_dict["tag"] = "h3" + elif element.name == "h2": + tag_dict["tag"] = "h4" + else: + tag_dict["tag"] = "p" + tag_dict["children"] = [ + {"tag": "strong", "children": element.contents} + ] + + if element.attrs: + tag_dict["attrs"] = element.attrs + if element.contents: + children = [] + for child in element.contents: + if isinstance(child, str): + children.append(child.strip()) + else: + children.append(parse_element(child)) + tag_dict["children"] = children + else: + if element.attrs: + tag_dict["attrs"] = element.attrs + if element.contents: + children = [] + for child in element.contents: + if isinstance(child, str): + children.append(child.strip()) + else: + children.append(parse_element(child)) + if children: + tag_dict["children"] = children + return tag_dict + + new_dom = [] + for element in soup.contents: + if isinstance(element, str) and not element.strip(): + continue + elif isinstance(element, str): + new_dom.append({"tag": "text", "content": element.strip()}) + else: + new_dom.append(parse_element(element)) + + return new_dom + + def upload_image(self, file_name: str) -> str: + base_url = "https://telegra.ph" + upload_url = f"{base_url}/upload" + + try: + content_type = guess_type(file_name)[0] + with open(file_name, "rb") as f: + response = requests.post( + upload_url, files={"file": ("blob", f, content_type)} + ) + response.raise_for_status() + # [{'src': '/file/xx.jpg'}] + response = response.json() + image_url = f"{base_url}{response[0]['src']}" + return image_url + except Exception as e: + logger.info(f"upload image: {e}") + return "https://telegra.ph/api" diff --git a/handlers/tts.py b/handlers/_tts.py similarity index 100% rename from handlers/tts.py rename to handlers/_tts.py diff --git a/handlers/_utils.py b/handlers/_utils.py new file mode 100644 index 0000000..440e811 --- /dev/null +++ b/handlers/_utils.py @@ -0,0 +1,209 @@ +from __future__ import annotations + +import base64 +import logging +import re +from functools import update_wrapper +from mimetypes import guess_type +from typing import Any, Callable, TypeVar + +import requests +import telegramify_markdown +from expiringdict import ExpiringDict +from telebot import TeleBot +from telebot.types import Message +from telebot.util import smart_split +from telegramify_markdown.customize import markdown_symbol +from urlextract import URLExtract + +markdown_symbol.head_level_1 = "๐Ÿ“Œ" # If you want, Customizing the head level 1 symbol +markdown_symbol.link = "๐Ÿ”—" # If you want, Customizing the link symbol + +T = TypeVar("T", bound=Callable) +logger = logging.getLogger("bot") + + +BOT_MESSAGE_LENGTH = 4000 + +REPLY_MESSAGE_CACHE = ExpiringDict(max_len=1000, max_age_seconds=600) + + +def bot_reply_first(message: Message, who: str, bot: TeleBot) -> Message: + """Create the first reply message which make user feel the bot is working.""" + return bot.reply_to( + message, f"*{who}* is _thinking_ \.\.\.", parse_mode="MarkdownV2" + ) + + +def bot_reply_markdown( + reply_id: Message, + who: str, + text: str, + bot: TeleBot, + split_text: bool = True, + disable_web_page_preview: bool = False, +) -> bool: + """ + reply the Markdown by take care of the message length. + it will fallback to plain text in case of any failure + """ + try: + cache_key = f"{reply_id.chat.id}_{reply_id.message_id}" + if cache_key in REPLY_MESSAGE_CACHE and REPLY_MESSAGE_CACHE[cache_key] == text: + logger.info(f"Skipping duplicate message for {cache_key}") + return True + REPLY_MESSAGE_CACHE[cache_key] = text + if len(text.encode("utf-8")) <= BOT_MESSAGE_LENGTH or not split_text: + bot.edit_message_text( + f"*{who}*:\n{telegramify_markdown.convert(text)}", + chat_id=reply_id.chat.id, + message_id=reply_id.message_id, + parse_mode="MarkdownV2", + disable_web_page_preview=disable_web_page_preview, + ) + return True + + # Need a split of message + msgs = smart_split(text, BOT_MESSAGE_LENGTH) + bot.edit_message_text( + f"*{who}* \[1/{len(msgs)}\]:\n{telegramify_markdown.convert(msgs[0])}", + chat_id=reply_id.chat.id, + message_id=reply_id.message_id, + parse_mode="MarkdownV2", + disable_web_page_preview=disable_web_page_preview, + ) + for i in range(1, len(msgs)): + bot.reply_to( + reply_id.reply_to_message, + f"*{who}* \[{i + 1}/{len(msgs)}\\]:\n{telegramify_markdown.convert(msgs[i])}", + parse_mode="MarkdownV2", + ) + + return True + except Exception: + logger.exception("Error in bot_reply_markdown") + # logger.info(f"wrong markdown format: {text}") + bot.edit_message_text( + f"*{who}*:\n{text}", + chat_id=reply_id.chat.id, + message_id=reply_id.message_id, + disable_web_page_preview=disable_web_page_preview, + ) + return False + + +def extract_prompt(message: str, bot_name: str) -> str: + """ + This function filters messages for prompts. + + Returns: + str: If it is not a prompt, return None. Otherwise, return the trimmed prefix of the actual prompt. + """ + # remove '@bot_name' as it is considered part of the command when in a group chat. + message = re.sub(re.escape(f"@{bot_name}"), "", message).strip() + # add a whitespace after the first colon as we separate the prompt from the command by the first whitespace. + message = re.sub(":", ": ", message, count=1).strip() + try: + left, message = message.split(maxsplit=1) + except ValueError: + return "" + if ":" not in left: + # the replacement happens in the right part, restore it. + message = message.replace(": ", ":", 1) + return message.strip() + + +def remove_prompt_prefix(message: str) -> str: + """ + Remove "/cmd" or "/cmd@bot_name" or "cmd:" + """ + message += " " + # Explanation of the regex pattern: + # ^ - Match the start of the string + # ( - Start of the group + # / - Literal forward slash + # [a-zA-Z] - Any letter (start of the command) + # [a-zA-Z0-9_]* - Any number of letters, digits, or underscores + # (@\w+)? - Optionally match @ followed by one or more word characters (for bot name) + # \s - A single whitespace character (space or newline) + # | - OR + # [a-zA-Z] - Any letter (start of the command) + # [a-zA-Z0-9_]* - Any number of letters, digits, or underscores + # :\s - Colon followed by a single whitespace character + # ) - End of the group + pattern = r"^(/[a-zA-Z][a-zA-Z0-9_]*(@\w+)?\s|[a-zA-Z][a-zA-Z0-9_]*:\s)" + + return re.sub(pattern, "", message).strip() + + +def non_llm_handler(handler: T) -> T: + handler.__is_llm_handler__ = False + return handler + + +def wrap_handler(handler: T, bot: TeleBot) -> T: + def wrapper(message: Message, *args: Any, **kwargs: Any) -> None: + try: + if getattr(handler, "__is_llm_handler__", True): + m = "" + + if message.text is not None: + m = message.text = extract_prompt( + message.text, bot.get_me().username + ) + elif message.caption is not None: + m = message.caption = extract_prompt( + message.caption, bot.get_me().username + ) + elif message.location and message.location.latitude is not None: + # for location map handler just return + return handler(message, *args, **kwargs) + if not m: + bot.reply_to(message, "Please provide info after start words.") + return + return handler(message, *args, **kwargs) + except Exception as e: + logger.exception("Error in handler %s: %s", handler.__name__, e) + # handle more here + if str(e).find("RECITATION") > 0: + bot.reply_to(message, "Your prompt `RECITATION` please check the log") + else: + bot.reply_to(message, "Something wrong, please check the log") + + return update_wrapper(wrapper, handler) + + +def extract_url_from_text(text: str) -> list[str]: + extractor = URLExtract() + urls = extractor.find_urls(text) + return urls + + +def get_text_from_jina_reader(url: str): + try: + r = requests.get(f"https://r.jina.ai/{url}") + return r.text + except Exception as e: + logger.exception("Error fetching text from Jina reader: %s", e) + return None + + +def enrich_text_with_urls(text: str) -> str: + urls = extract_url_from_text(text) + for u in urls: + try: + url_text = get_text_from_jina_reader(u) + url_text = f"\n```markdown\n{url_text}\n```\n" + text = text.replace(u, url_text) + except Exception: + # just ignore the error + pass + + return text + + +def image_to_data_uri(file_path): + content_type = guess_type(file_path)[0] + with open(file_path, "rb") as image_file: + encoded_image = base64.b64encode(image_file.read()).decode("utf-8") + return f"data:{content_type};base64,{encoded_image}" diff --git a/handlers/yi.py b/handlers/_yi.py similarity index 99% rename from handlers/yi.py rename to handlers/_yi.py index 2aa1e1b..5498e69 100644 --- a/handlers/yi.py +++ b/handlers/_yi.py @@ -1,12 +1,12 @@ -from os import environ import time +from os import environ -from openai import OpenAI import requests +from expiringdict import ExpiringDict +from openai import OpenAI from telebot import TeleBot from telebot.types import Message from telegramify_markdown import convert -from expiringdict import ExpiringDict from . import * @@ -197,7 +197,7 @@ def yi_photo_handler(message: Message, bot: TeleBot) -> None: } response = requests.post( - f"https://api.lingyiwanwu.com/v1/chat/completions", + "https://api.lingyiwanwu.com/v1/chat/completions", headers=headers, json=payload, ).json() diff --git a/handlers/chatgpt.py b/handlers/chatgpt.py index a5a7947..38e4656 100644 --- a/handlers/chatgpt.py +++ b/handlers/chatgpt.py @@ -1,27 +1,29 @@ -from os import environ import time -from openai import OpenAI +from expiringdict import ExpiringDict from telebot import TeleBot from telebot.types import Message -from expiringdict import ExpiringDict -from rich import print - -from . import * - from telegramify_markdown import convert from telegramify_markdown.customize import markdown_symbol +from config import settings + +from ._utils import ( + bot_reply_first, + bot_reply_markdown, + enrich_text_with_urls, + image_to_data_uri, + logger, +) + markdown_symbol.head_level_1 = "๐Ÿ“Œ" # If you want, Customizing the head level 1 symbol markdown_symbol.link = "๐Ÿ”—" # If you want, Customizing the link symbol -CHATGPT_API_KEY = environ.get("OPENAI_API_KEY") -CHATGPT_BASE_URL = environ.get("OPENAI_API_BASE") or "https://api.openai.com/v1" -CHATGPT_MODEL = "gpt-4o-mini-2024-07-18" -CHATGPT_PRO_MODEL = "gpt-4o-mini-2024-07-18" +CHATGPT_MODEL = settings.openai_model +CHATGPT_PRO_MODEL = settings.openai_model -client = OpenAI(api_key=CHATGPT_API_KEY, base_url=CHATGPT_BASE_URL) +client = settings.openai_client # Global history cache @@ -31,7 +33,7 @@ chatgpt_pro_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) def chatgpt_handler(message: Message, bot: TeleBot) -> None: """gpt : /gpt """ - print(message) + logger.debug(message) m = message.text.strip() player_message = [] @@ -81,8 +83,8 @@ def chatgpt_handler(message: Message, bot: TeleBot) -> None: } ) - except Exception as e: - print(e) + except Exception: + logger.exception("ChatGPT handler error") bot.reply_to(message, "answer wrong maybe up to the max token") # pop my user player_message.pop() @@ -134,7 +136,7 @@ def chatgpt_pro_handler(message: Message, bot: TeleBot) -> None: s = "" start = time.time() for chunk in r: - print(chunk) + logger.debug(chunk) if chunk.choices: if chunk.choices[0].delta.content is None: break @@ -145,7 +147,7 @@ def chatgpt_pro_handler(message: Message, bot: TeleBot) -> None: # maybe not complete try: bot_reply_markdown(reply_id, who, s, bot, split_text=True) - except: + except Exception: pass player_message.append( @@ -155,8 +157,8 @@ def chatgpt_pro_handler(message: Message, bot: TeleBot) -> None: } ) - except Exception as e: - print(e) + except Exception: + logger.exception("ChatGPT handler error") # bot.reply_to(message, "answer wrong maybe up to the max token") player_message.clear() return @@ -205,15 +207,15 @@ def chatgpt_photo_handler(message: Message, bot: TeleBot) -> None: # maybe not complete try: bot_reply_markdown(reply_id, who, s, bot) - except: + except Exception: pass - except Exception as e: - print(e) + except Exception: + logger.exception("ChatGPT handler error") bot.reply_to(message, "answer wrong maybe up to the max token") -if CHATGPT_API_KEY: +if settings.openai_api_key: def register(bot: TeleBot) -> None: bot.register_message_handler(chatgpt_handler, commands=["gpt"], pass_bot=True) diff --git a/handlers/claude.py b/handlers/claude.py index e8a53f2..250246a 100644 --- a/handlers/claude.py +++ b/handlers/claude.py @@ -1,17 +1,16 @@ +import time from os import environ from pathlib import Path -import time from anthropic import Anthropic, APITimeoutError +from expiringdict import ExpiringDict from telebot import TeleBot from telebot.types import Message -from expiringdict import ExpiringDict - -from . import * - from telegramify_markdown import convert from telegramify_markdown.customize import markdown_symbol +from ._utils import bot_reply_first, bot_reply_markdown, enrich_text_with_urls + markdown_symbol.head_level_1 = "๐Ÿ“Œ" # If you want, Customizing the head level 1 symbol markdown_symbol.link = "๐Ÿ”—" # If you want, Customizing the link symbol diff --git a/handlers/cohere.py b/handlers/cohere.py index bfa16a5..ca4bc45 100644 --- a/handlers/cohere.py +++ b/handlers/cohere.py @@ -1,18 +1,19 @@ -from os import environ -import time import datetime import re - -from telebot import TeleBot -from telebot.types import Message -from expiringdict import ExpiringDict - -from . import * +import time +from os import environ import cohere +from expiringdict import ExpiringDict +from telebot import TeleBot +from telebot.types import Message from telegramify_markdown import convert from telegramify_markdown.customize import markdown_symbol +from config import settings + +from ._utils import bot_reply_first, bot_reply_markdown, enrich_text_with_urls + markdown_symbol.head_level_1 = "๐Ÿ“Œ" # If you want, Customizing the head level 1 symbol markdown_symbol.link = "๐Ÿ”—" # If you want, Customizing the link symbol @@ -21,8 +22,6 @@ COHERE_MODEL = "command-r-plus" # command-r may cause Chinese garbled code, and if COHERE_API_KEY: co = cohere.Client(api_key=COHERE_API_KEY) -TELEGRA_PH_TOKEN = environ.get("TELEGRA_PH_TOKEN") -ph = TelegraphAPI(TELEGRA_PH_TOKEN) # Global history cache cohere_player_dict = ExpiringDict(max_len=1000, max_age_seconds=600) @@ -140,7 +139,7 @@ def cohere_handler(message: Message, bot: TeleBot) -> None: + source + f"\nLast Update{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} at UTC+8\n" ) - ph_s = ph.create_page_md( + ph_s = settings.telegraph_client.create_page_md( title="Cohere", markdown_text=content ) # or edit_page with get_page so not producing massive pages s += f"\n\n[View]({ph_s})" @@ -149,7 +148,7 @@ def cohere_handler(message: Message, bot: TeleBot) -> None: bot_reply_markdown( reply_id, who, s, bot, split_text=True, disable_web_page_preview=True ) - except: + except Exception: pass player_message.append( diff --git a/handlers/dify.py b/handlers/dify.py index c78d8d5..4fd15a4 100644 --- a/handlers/dify.py +++ b/handlers/dify.py @@ -1,18 +1,16 @@ import json -import time import re - -from telebot import TeleBot -from telebot.types import Message - -from . import * - +import time # TODO: update requirements.txt and setup tools # pip install dify-client from dify_client import ChatClient +from telebot import TeleBot +from telebot.types import Message from telegramify_markdown.customize import markdown_symbol +from ._utils import bot_reply_first, bot_reply_markdown, enrich_text_with_urls + # If you want, Customizing the head level 1 symbol markdown_symbol.head_level_1 = "๐Ÿ“Œ" markdown_symbol.link = "๐Ÿ”—" # If you want, Customizing the link symbol diff --git a/handlers/fake_liuneng.py b/handlers/fake_liuneng.py index 0630d71..d03df36 100644 --- a/handlers/fake_liuneng.py +++ b/handlers/fake_liuneng.py @@ -1,11 +1,10 @@ import random -from PIL import Image, ImageDraw, ImageFont +import re from os import listdir + +from PIL import Image, ImageDraw, ImageFont from telebot import TeleBot from telebot.types import Message -import re - -from . import * def split_lines(text, max_length=30): @@ -157,7 +156,7 @@ def fake_photo_handler(message: Message, bot: TeleBot) -> None: s = s.replace("/fake", "").strip() s = s.replace("fake:", "").strip() prompt = s.strip() - bot.reply_to(message, f"Generating LiuNeng's fake image") + bot.reply_to(message, "Generating LiuNeng's fake image") # get the high quaility picture. max_size_photo = max(message.photo, key=lambda p: p.file_size) file_path = bot.get_file(max_size_photo.file_id).file_path diff --git a/handlers/gemini.py b/handlers/gemini.py index beabade..4b3bd10 100644 --- a/handlers/gemini.py +++ b/handlers/gemini.py @@ -1,17 +1,16 @@ -from os import environ import re import time +from os import environ import google.generativeai as genai +from expiringdict import ExpiringDict from google.generativeai import ChatSession from google.generativeai.types.generation_types import StopCandidateException from telebot import TeleBot from telebot.types import Message -from expiringdict import ExpiringDict - from telegramify_markdown.customize import markdown_symbol -from . import * +from ._utils import bot_reply_first, bot_reply_markdown, enrich_text_with_urls, logger markdown_symbol.head_level_1 = "๐Ÿ“Œ" # If you want, Customizing the head level 1 symbol markdown_symbol.link = "๐Ÿ”—" # If you want, Customizing the link symbol @@ -166,11 +165,11 @@ def gemini_pro_handler(message: Message, bot: TeleBot) -> None: player.history.clear() return except Exception as e: - print(e) + logger.exception("Gemini audio handler error") bot.reply_to(message, "answer wrong maybe up to the max token") try: player.history.clear() - except: + except Exception: print(f"\n------\n{who} history.clear() Error / Unstoppable\n------\n") return @@ -207,10 +206,10 @@ def gemini_photo_handler(message: Message, bot: TeleBot) -> None: # maybe not complete try: bot_reply_markdown(reply_id, who, s, bot) - except: + except Exception: pass except Exception as e: - print(e) + logger.exception("Gemini photo handler error") bot.reply_to(message, "answer wrong maybe up to the max token") @@ -248,11 +247,11 @@ def gemini_audio_handler(message: Message, bot: TeleBot) -> None: player.history.clear() return except Exception as e: - print(e) + logger.exception("Gemini audio handler error") bot.reply_to(message, "answer wrong maybe up to the max token") try: player.history.clear() - except: + except Exception: print(f"\n------\n{who} history.clear() Error / Unstoppable\n------\n") return diff --git a/handlers/github.py b/handlers/github.py index e26dcf0..5212929 100644 --- a/handlers/github.py +++ b/handlers/github.py @@ -15,18 +15,15 @@ def github_poster_handler(message: Message, bot: TeleBot): cmd_list.append("--year") cmd_list.append(years.strip()) r = subprocess.check_output(cmd_list).decode("utf-8") - try: - if "done" in r: - # TODO windows path - r = subprocess.check_output( - ["cairosvg", "OUT_FOLDER/github.svg", "-o", f"github_{name}.png"] - ).decode("utf-8") - with open(f"github_{name}.png", "rb") as photo: - bot.send_photo( - message.chat.id, photo, reply_to_message_id=message.message_id - ) - except: - bot.reply_to(message, "github poster error") + if "done" in r: + # TODO windows path + r = subprocess.check_output( + ["cairosvg", "OUT_FOLDER/github.svg", "-o", f"github_{name}.png"] + ).decode("utf-8") + with open(f"github_{name}.png", "rb") as photo: + bot.send_photo( + message.chat.id, photo, reply_to_message_id=message.message_id + ) def register(bot: TeleBot) -> None: diff --git a/handlers/kling.py b/handlers/kling.py index c86b09a..6f04b6e 100644 --- a/handlers/kling.py +++ b/handlers/kling.py @@ -1,13 +1,13 @@ import re -from telebot import TeleBot -from telebot.types import Message -from telebot.types import InputMediaPhoto from os import environ + +import requests from expiringdict import ExpiringDict from kling import ImageGen, VideoGen -import requests +from telebot import TeleBot +from telebot.types import InputMediaPhoto, Message -from . import * +from ._utils import logger KLING_COOKIE = environ.get("KLING_COOKIE") pngs_link_dict = ExpiringDict(max_len=100, max_age_seconds=60 * 10) @@ -17,7 +17,7 @@ def kling_handler(message: Message, bot: TeleBot): """kling: /kling
""" bot.reply_to( message, - f"Generating pretty kling image may take some time please wait", + "Generating pretty kling image may take some time please wait", ) m = message.text.strip() prompt = m.strip() @@ -47,7 +47,7 @@ def kling_pro_handler(message: Message, bot: TeleBot): """kling: /kling
""" bot.reply_to( message, - f"Generating pretty kling video may take a long time about 2mins to 5mins please wait", + "Generating pretty kling video may take a long time about 2mins to 5mins please wait", ) m = message.text.strip() prompt = m.strip() @@ -98,7 +98,7 @@ def kling_photo_handler(message: Message, bot: TeleBot) -> None: downloaded_file = bot.download_file(file_path) bot.reply_to( message, - f"Generating pretty kling image using your photo may take some time please wait", + "Generating pretty kling image using your photo may take some time please wait", ) with open("kling.jpg", "wb") as temp_file: temp_file.write(downloaded_file) @@ -109,10 +109,10 @@ def kling_photo_handler(message: Message, bot: TeleBot) -> None: # set the dict try: pngs_link_dict[str(message.from_user.id)] = links - except Exception as e: - print(str(e)) - except Exception as e: - print(str(e)) + except Exception: + logger.exception("Kling photo handler error") + except Exception: + logger.exception("Kling photo handler error") bot.reply_to(message, "kling error maybe block the prompt") return photos_list = [InputMediaPhoto(i) for i in links] diff --git a/handlers/llama.py b/handlers/llama.py index d8c0cac..7ea2f02 100644 --- a/handlers/llama.py +++ b/handlers/llama.py @@ -1,16 +1,15 @@ -from os import environ import time +from os import environ +from expiringdict import ExpiringDict +from groq import Groq from telebot import TeleBot from telebot.types import Message -from expiringdict import ExpiringDict - -from . import * - -from groq import Groq from telegramify_markdown import convert from telegramify_markdown.customize import markdown_symbol +from ._utils import bot_reply_first, bot_reply_markdown, enrich_text_with_urls, logger + markdown_symbol.head_level_1 = "๐Ÿ“Œ" # If you want, Customizing the head level 1 symbol markdown_symbol.link = "๐Ÿ”—" # If you want, Customizing the link symbol @@ -75,8 +74,8 @@ def llama_handler(message: Message, bot: TeleBot) -> None: } ) - except Exception as e: - print(e) + except Exception: + logger.exception("Llama handler error") bot.reply_to(message, "answer wrong maybe up to the max token") # pop my user player_message.pop() @@ -148,8 +147,8 @@ def llama_pro_handler(message: Message, bot: TeleBot) -> None: } ) - except Exception as e: - print(e) + except Exception: + logger.exception("Llama Pro handler error") bot.reply_to(message, "answer wrong maybe up to the max token") player_message.clear() return diff --git a/handlers/map.py b/handlers/map.py index 7b70ab9..819745e 100644 --- a/handlers/map.py +++ b/handlers/map.py @@ -1,12 +1,11 @@ import gc -import shutil import random +import shutil from tempfile import SpooledTemporaryFile import numpy as np -import PIL +import PIL.Image from matplotlib import figure -from PIL import Image from prettymapp.geo import get_aoi from prettymapp.osm import get_osm_geometries from prettymapp.plotting import Plot as PrettyPlot @@ -58,7 +57,7 @@ def sizeof_image(image): def compress_image(input_image, output_image, target_size): quality = 95 factor = 1.0 - with Image.open(input_image) as img: + with PIL.Image.open(input_image) as img: while sizeof_image(img) > target_size: factor -= 0.05 width, height = img.size diff --git a/handlers/qwen.py b/handlers/qwen.py index 7c0d42b..70069dc 100644 --- a/handlers/qwen.py +++ b/handlers/qwen.py @@ -1,16 +1,15 @@ # qwen use https://api.together.xyz -from os import environ import time +from os import environ +from expiringdict import ExpiringDict from telebot import TeleBot from telebot.types import Message -from expiringdict import ExpiringDict - -from . import * - -from together import Together from telegramify_markdown import convert from telegramify_markdown.customize import markdown_symbol +from together import Together + +from ._utils import bot_reply_first, bot_reply_markdown, enrich_text_with_urls, logger markdown_symbol.head_level_1 = "๐Ÿ“Œ" # If you want, Customizing the head level 1 symbol markdown_symbol.link = "๐Ÿ”—" # If you want, Customizing the link symbol @@ -77,8 +76,8 @@ def qwen_handler(message: Message, bot: TeleBot) -> None: } ) - except Exception as e: - print(e) + except Exception: + logger.exception("Qwen handler error") bot.reply_to(message, "answer wrong maybe up to the max token") # pop my user player_message.pop() @@ -150,8 +149,8 @@ def qwen_pro_handler(message: Message, bot: TeleBot) -> None: } ) - except Exception as e: - print(e) + except Exception: + logger.exception("Qwen Pro handler error") bot.reply_to(message, "answer wrong maybe up to the max token") player_message.clear() return diff --git a/handlers/sd.py b/handlers/sd.py index 890f635..bd4a3f9 100644 --- a/handlers/sd.py +++ b/handlers/sd.py @@ -1,20 +1,15 @@ -from telebot import TeleBot -from telebot.types import Message -import requests -from openai import OpenAI from os import environ -from . import * +import requests +from telebot import TeleBot +from telebot.types import Message +from config import settings SD_API_KEY = environ.get("SD3_KEY") # TODO refactor this shit to __init__ -CHATGPT_API_KEY = environ.get("OPENAI_API_KEY") -CHATGPT_BASE_URL = environ.get("OPENAI_API_BASE") or "https://api.openai.com/v1" -CHATGPT_PRO_MODEL = "gpt-4o-2024-05-13" - -client = OpenAI(api_key=CHATGPT_API_KEY, base_url=CHATGPT_BASE_URL) +CHATGPT_PRO_MODEL = settings.openai_model def get_user_balance(): @@ -33,7 +28,7 @@ def get_user_balance(): def generate_sd3_image(prompt): response = requests.post( - f"https://api.stability.ai/v2beta/stable-image/generate/sd3", + "https://api.stability.ai/v2beta/stable-image/generate/sd3", headers={"authorization": f"Bearer {SD_API_KEY}", "accept": "image/*"}, files={"none": ""}, data={ @@ -61,18 +56,14 @@ def sd_handler(message: Message, bot: TeleBot): ) m = message.text.strip() prompt = m.strip() - try: - r = generate_sd3_image(prompt) - if r: - with open(f"sd3.jpeg", "rb") as photo: - bot.send_photo( - message.chat.id, photo, reply_to_message_id=message.message_id - ) - else: - bot.reply_to(message, "prompt error") - except Exception as e: - print(e) - bot.reply_to(message, "sd3 error") + r = generate_sd3_image(prompt) + if r: + with open("sd3.jpeg", "rb") as photo: + bot.send_photo( + message.chat.id, photo, reply_to_message_id=message.message_id + ) + else: + bot.reply_to(message, "prompt error") def sd_pro_handler(message: Message, bot: TeleBot): @@ -83,7 +74,7 @@ def sd_pro_handler(message: Message, bot: TeleBot): rewrite_prompt = ( f"revise `{prompt}` to a DALL-E prompt only return the prompt in English." ) - completion = client.chat.completions.create( + completion = settings.openai_client.chat.completions.create( messages=[{"role": "user", "content": rewrite_prompt}], max_tokens=2048, model=CHATGPT_PRO_MODEL, @@ -95,21 +86,17 @@ def sd_pro_handler(message: Message, bot: TeleBot): message, f"Generating pretty sd3-turbo image may take some time please left credits {credits} every try will cost 4 criedits wait:\n the real prompt is: {sd_prompt}", ) - try: - r = generate_sd3_image(sd_prompt) - if r: - with open(f"sd3.jpeg", "rb") as photo: - bot.send_photo( - message.chat.id, photo, reply_to_message_id=message.message_id - ) - else: - bot.reply_to(message, "prompt error") - except Exception as e: - print(e) - bot.reply_to(message, "sd3 error") + r = generate_sd3_image(sd_prompt) + if r: + with open("sd3.jpeg", "rb") as photo: + bot.send_photo( + message.chat.id, photo, reply_to_message_id=message.message_id + ) + else: + bot.reply_to(message, "prompt error") -if SD_API_KEY and CHATGPT_API_KEY: +if SD_API_KEY and settings.openai_api_key: def register(bot: TeleBot) -> None: bot.register_message_handler(sd_handler, commands=["sd3"], pass_bot=True) diff --git a/handlers/summary/__init__.py b/handlers/summary/__init__.py new file mode 100644 index 0000000..d3271b4 --- /dev/null +++ b/handlers/summary/__init__.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +import logging +from datetime import datetime, timezone +from functools import partial + +import telegramify_markdown +from telebot import TeleBot +from telebot.types import Message + +from config import settings +from handlers._utils import non_llm_handler + +from .messages import ChatMessage, MessageStore +from .utils import PROMPT, filter_message, parse_date + +logger = logging.getLogger("bot") +store = MessageStore("data/messages.db") + + +@non_llm_handler +def handle_message(message: Message): + logger.debug( + "Received message: %s, chat_id=%d, from=%s", + message.text, + message.chat.id, + message.from_user.id, + ) + # ่ฟ™้‡ŒๅฏไปฅๆทปๅŠ ๅค„็†ๆถˆๆฏ็š„้€ป่พ‘ + store.add_message( + ChatMessage( + chat_id=message.chat.id, + message_id=message.id, + content=message.text or "", + user_id=message.from_user.id, + user_name=message.from_user.full_name, + timestamp=datetime.fromtimestamp(message.date, tz=timezone.utc), + ) + ) + + +@non_llm_handler +def summary_command(message: Message, bot: TeleBot): + """็”Ÿๆˆๆถˆๆฏๆ‘˜่ฆใ€‚็คบไพ‹๏ผš/summary today; /summary 2d""" + text_parts = message.text.split(maxsplit=1) + if len(text_parts) < 2: + date = "today" + else: + date = text_parts[1].strip() + since, now = parse_date(date, settings.timezone) + messages = store.get_messages_since(message.chat.id, since) + messages_text = "\n".join( + f"{msg.timestamp.isoformat()} - @{msg.user_name}: {msg.content}" + for msg in messages + ) + if not messages_text: + bot.reply_to(message, "ๆฒกๆœ‰ๆ‰พๅˆฐๆŒ‡ๅฎšๆ—ถ้—ด่Œƒๅ›ดๅ†…็š„ๅކๅฒๆถˆๆฏใ€‚") + return + new_message = bot.reply_to(message, "ๆญฃๅœจ็”Ÿๆˆๆ‘˜่ฆ๏ผŒ่ฏท็จๅ€™...") + response = settings.openai_client.chat.completions.create( + model=settings.openai_model, + messages=[ + {"role": "user", "content": PROMPT.format(messages=messages_text)}, + ], + ) + reply_text = f"""*๐Ÿ‘‡ ๅ‰ๆƒ…ๆ่ฆ ๐Ÿ‘‡ \\({since.strftime("%Y/%m/%d %H:%M")} \\- {now.strftime("%Y/%m/%d %H:%M")}\\)* + +{telegramify_markdown.convert(response.choices[0].message.content)} +""" + logger.debug("Generated summary:\n%s", reply_text) + bot.edit_message_text( + chat_id=new_message.chat.id, + message_id=new_message.message_id, + text=reply_text, + parse_mode="MarkdownV2", + ) + + +@non_llm_handler +def stats_command(message: Message, bot: TeleBot): + """่Žทๅ–็พค็ป„ๆถˆๆฏ็ปŸ่ฎกไฟกๆฏ""" + stats = store.get_stats(message.chat.id) + if not stats: + bot.reply_to(message, "ๆฒกๆœ‰ๆ‰พๅˆฐไปปไฝ•็ปŸ่ฎกไฟกๆฏใ€‚") + return + stats_text = "\n".join( + f"{entry.date}: {entry.message_count} messages" for entry in stats + ) + bot.reply_to( + message, + f"๐Ÿ“Š ็พค็ป„ๆถˆๆฏ็ปŸ่ฎกไฟกๆฏ:\n```\n{stats_text}\n```", + parse_mode="MarkdownV2", + ) + + +@non_llm_handler +def search_command(message: Message, bot: TeleBot): + """ๆœ็ดข็พค็ป„ๆถˆๆฏ๏ผˆ็คบไพ‹๏ผš/search ๅ…ณ้”ฎ่ฏ [N]๏ผ‰""" + text_parts = message.text.split(maxsplit=2) + if len(text_parts) < 2: + bot.reply_to(message, "่ฏทๆไพ›่ฆๆœ็ดข็š„ๅ…ณ้”ฎ่ฏใ€‚") + return + keyword = text_parts[1].strip() + if len(text_parts) > 2 and text_parts[2].isdigit(): + limit = int(text_parts[2]) + else: + limit = 10 + messages = store.search_messages(message.chat.id, keyword, limit=limit) + if not messages: + bot.reply_to(message, "ๆฒกๆœ‰ๆ‰พๅˆฐๅŒน้…็š„ๆถˆๆฏใ€‚") + return + chat_id = str(message.chat.id) + if chat_id.startswith("-100"): + chat_id = chat_id[4:] + items = [] + for msg in messages: + link = f"https://t.me/c/{chat_id}/{msg.message_id}" + items.append(f"{link}\n```\n{msg.content}\n```") + message_text = telegramify_markdown.convert("\n".join(items)) + bot.reply_to( + message, + f"๐Ÿ” *ๆœ็ดข็ป“ๆžœ(ๅชๆ˜พ็คบๅ‰ {limit} ไธช):*\n{message_text}", + parse_mode="MarkdownV2", + ) + + +load_priority = 5 +if settings.openai_api_key: + + def register(bot: TeleBot): + """ๆณจๅ†Œๅ‘ฝไปคๅค„็†ๅ™จ""" + bot.register_message_handler( + summary_command, commands=["summary"], pass_bot=True + ) + bot.register_message_handler(stats_command, commands=["stats"], pass_bot=True) + bot.register_message_handler(search_command, commands=["search"], pass_bot=True) + bot.register_message_handler( + handle_message, func=partial(filter_message, bot=bot) + ) diff --git a/handlers/summary/__main__.py b/handlers/summary/__main__.py new file mode 100644 index 0000000..5258b8a --- /dev/null +++ b/handlers/summary/__main__.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +import asyncio +import os +import sys + +from .messages import ChatMessage, MessageStore + + +async def fetch_messages(chat_id: int) -> None: + from telethon import TelegramClient + from telethon.tl.types import Message + + store = MessageStore("data/messages.db") + + api_id = int(os.getenv("TELEGRAM_API_ID")) + api_hash = os.getenv("TELEGRAM_API_HASH") + async with TelegramClient("test", api_id, api_hash) as client: + assert isinstance(client, TelegramClient) + with store.connect() as conn: + async for message in client.iter_messages(chat_id, reverse=True): + if not isinstance(message, Message) or not message.message: + continue + if not message.from_id: + continue + print(message.pretty_format(message)) + user = await client.get_entity(message.from_id) + fullname = user.first_name + if user.last_name: + fullname += f" {user.last_name}" + store.add_message( + ChatMessage( + chat_id=chat_id, + message_id=message.id, + content=message.message, + user_id=message.from_id.user_id, + user_name=fullname, + timestamp=message.date, + ), + conn=conn, + ) + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python -m handlers.summary ") + sys.exit(1) + chat_id = int(sys.argv[1]) + asyncio.run(fetch_messages(chat_id)) # ๆ›ฟๆขไธบๅฎž้™…็š„็พค็ป„ID diff --git a/handlers/summary/messages.py b/handlers/summary/messages.py new file mode 100644 index 0000000..212c125 --- /dev/null +++ b/handlers/summary/messages.py @@ -0,0 +1,164 @@ +import os +import sqlite3 +from dataclasses import dataclass +from datetime import datetime, timedelta, timezone + + +@dataclass(frozen=True) +class ChatMessage: + chat_id: int + message_id: int + content: str + user_id: int + user_name: str + timestamp: datetime + + +@dataclass(frozen=True) +class StatsEntry: + date: str + message_count: int + + +class MessageStore: + def __init__(self, db_file: str): + parent_folder = os.path.dirname(db_file) + if not os.path.exists(parent_folder): + os.makedirs(parent_folder) + self._db_file = db_file + self._init_db() + + def connect(self) -> sqlite3.Connection: + """Create a new database connection.""" + return sqlite3.connect(self._db_file) + + def _init_db(self): + with self.connect() as conn: + conn.execute( + """ + CREATE TABLE IF NOT EXISTS messages ( + chat_id INTEGER, + message_id INTEGER, + content TEXT, + user_id INTEGER, + user_name TEXT, + timestamp TEXT, + PRIMARY KEY (chat_id, message_id) + ); + """ + ) + conn.execute( + """ + CREATE INDEX IF NOT EXISTS idx_chat_timestamp ON messages (chat_id, timestamp); + """ + ) + conn.commit() + + def add_message( + self, message: ChatMessage, conn: sqlite3.Connection | None = None + ) -> None: + need_close = False + if conn is None: + conn = self.connect() + need_close = True + try: + conn.execute( + """ + INSERT OR REPLACE INTO messages (chat_id, message_id, content, user_id, user_name, timestamp) + VALUES (?, ?, ?, ?, ?, ?); + """, + ( + message.chat_id, + message.message_id, + message.content, + message.user_id, + message.user_name, + message.timestamp.isoformat(), + ), + ) + self._clean_old_messages(message.chat_id, conn) + conn.commit() + finally: + if need_close: + conn.close() + + def get_messages_since(self, chat_id: int, since: datetime) -> list[ChatMessage]: + with self.connect() as conn: + cursor = conn.cursor() + cursor.execute( + """ + SELECT chat_id, message_id, content, user_id, user_name, timestamp + FROM messages + WHERE chat_id = ? AND timestamp >= ? + ORDER BY timestamp ASC; + """, + (chat_id, since.isoformat()), + ) + rows = cursor.fetchall() + return [ + ChatMessage( + chat_id=row[0], + message_id=row[1], + content=row[2], + user_id=row[3], + user_name=row[4], + timestamp=datetime.fromisoformat(row[5]), + ) + for row in rows + ] + + def get_stats(self, chat_id: int) -> list[StatsEntry]: + with self.connect() as conn: + self._clean_old_messages(chat_id, conn) + cursor = conn.cursor() + cursor.execute( + """ + SELECT DATE(timestamp), COUNT(*) + FROM messages + WHERE chat_id = ? + GROUP BY DATE(timestamp) + ORDER BY DATE(timestamp) ASC; + """, + (chat_id,), + ) + rows = cursor.fetchall() + return [StatsEntry(date=row[0], message_count=row[1]) for row in rows] + + def search_messages( + self, chat_id: int, keyword: str, limit: int = 10 + ) -> list[ChatMessage]: + # TODO: Fuzzy search with full-text search or similar + with self.connect() as conn: + cursor = conn.cursor() + cursor.execute( + """ + SELECT chat_id, message_id, content, user_id, user_name, timestamp + FROM messages + WHERE chat_id = ? AND content LIKE ? + ORDER BY timestamp DESC + LIMIT ?; + """, + (chat_id, f"%{keyword}%", limit), + ) + rows = cursor.fetchall() + return [ + ChatMessage( + chat_id=row[0], + message_id=row[1], + content=row[2], + user_id=row[3], + user_name=row[4], + timestamp=datetime.fromisoformat(row[5]), + ) + for row in rows + ] + + def _clean_old_messages( + self, chat_id: int, conn: sqlite3.Connection, days: int = 7 + ) -> None: + cursor = conn.cursor() + threshold_date = datetime.now(tz=timezone.utc) - timedelta(days=days) + cursor.execute( + "DELETE FROM messages WHERE chat_id = ? AND timestamp < ?;", + (chat_id, threshold_date.isoformat()), + ) diff --git a/handlers/summary/utils.py b/handlers/summary/utils.py new file mode 100644 index 0000000..39207b4 --- /dev/null +++ b/handlers/summary/utils.py @@ -0,0 +1,48 @@ +import re +import zoneinfo +from datetime import datetime, timedelta + +from telebot import TeleBot +from telebot.types import Message + +PROMPT = """\ +่ฏทๅฐ†ไธ‹้ข็š„่Šๅคฉ่ฎฐๅฝ•่ฟ›่กŒๆ€ป็ป“๏ผŒๅŒ…ๅซ่ฎจ่ฎบไบ†ๅ“ชไบ›่ฏ้ข˜๏ผŒๆœ‰ๅ“ชไบ›ไบฎ็‚นๅ‘่จ€ๅ’Œไธป่ฆ่ง‚็‚นใ€‚ +ๅผ•็”จ็”จๆˆทๅ่ฏทๅŠ ็ฒ—ใ€‚็›ดๆŽฅ่ฟ”ๅ›žๅ†…ๅฎนๅณๅฏ๏ผŒไธ่ฆๅŒ…ๅซๅผ•ๅฏผ่ฏๅ’Œๆ ‡้ข˜ใ€‚ +--- Messages Start --- +{messages} +--- Messages End --- +""" + + +def filter_message(message: Message, bot: TeleBot) -> bool: + """่ฟ‡ๆปคๆถˆๆฏ๏ผŒๆŽ’้™ค้žๆ–‡ๆœฌๆถˆๆฏๅ’Œๅ‘ฝไปคๆถˆๆฏ""" + if not message.text: + return False + if not message.from_user: + return False + if message.from_user.id == bot.get_me().id: + return False + if message.text.startswith("/"): + return False + return True + + +date_regex = re.compile(r"^(\d+)([dhm])$") + + +def parse_date(date_str: str, locale: str) -> tuple[datetime, datetime]: + date_str = date_str.strip().lower() + now = datetime.now(tz=zoneinfo.ZoneInfo(locale)) + if date_str == "today": + return now.replace(hour=0, minute=0, second=0, microsecond=0), now + elif m := date_regex.match(date_str): + number = int(m.group(1)) + unit = m.group(2) + match unit: + case "d": + return now - timedelta(days=number), now + case "h": + return now - timedelta(hours=number), now + case "m": + return now - timedelta(minutes=number), now + raise ValueError(f"Unsupported date format: {date_str}") diff --git a/handlers/tweet.py b/handlers/tweet.py index 0f1745f..08196b9 100644 --- a/handlers/tweet.py +++ b/handlers/tweet.py @@ -1,8 +1,8 @@ -from urlextract import URLExtract from telebot import TeleBot from telebot.types import Message +from urlextract import URLExtract -from . import * +from ._utils import bot_reply_first, bot_reply_markdown def tweet_handler(message: Message, bot: TeleBot): diff --git a/handlers/useful.py b/handlers/useful.py deleted file mode 100644 index ff99e46..0000000 --- a/handlers/useful.py +++ /dev/null @@ -1,1147 +0,0 @@ -# useful md for myself and you. - -from telebot import TeleBot -from telebot.types import Message -from expiringdict import ExpiringDict -from os import environ -import time -import datetime -from concurrent.futures import ThreadPoolExecutor, as_completed -from threading import Lock -import re - -from . import * - -from telegramify_markdown.customize import markdown_symbol - -# Define the load priority, lower numbers have higher priority -load_priority = 1000 - -# If you want, Customizing the head level 1 symbol -markdown_symbol.head_level_1 = "๐Ÿ“Œ" -markdown_symbol.link = "๐Ÿ”—" # If you want, Customizing the link symbol -chat_message_dict = ExpiringDict(max_len=100, max_age_seconds=120) -chat_user_dict = ExpiringDict(max_len=100, max_age_seconds=20) - -#### Telegra.ph init #### -# Will auto generate a token if not provided, restart will lose all TODO -TELEGRA_PH_TOKEN = environ.get("TELEGRA_PH_TOKEN") -# Edit "Store_Token = False" in "__init__.py" to True to store it -ph = TelegraphAPI(TELEGRA_PH_TOKEN) - -##################################################################### -#### Customization ################################################## -Language = "zh-cn" # "en" or "zh-cn". -SUMMARY = None # "cohere" or "gemini" or None -General_clean = True # Will Delete LLM message -Extra_clean = True # Will Delete command message too -Link_Clean = False # True will disable Instant View / Web Preview -Stream_Thread = 2 # How many stream LLM will stream at the same time -Complete_Thread = 3 # How many non-stream LLM will run at the same time -Stream_Timeout = ( - 240 # If not complete in 4 mins, will stop wait or raise Exception timeout -) -MESSAGE_MAX_LENGTH = 4096 # Message after url enrich may too long -Hint = ( - "\n(Need answer? Type or tap /answer_it after a message)" - if Language == "en" - else "\n(้œ€่ฆๅ›ž็ญ”? ๅœจไธ€ๆกๆถˆๆฏไน‹ๅŽ, ่พ“ๅ…ฅๆˆ–็‚นๅ‡ป /answer_it )" -) -#### LLMs #### -GEMINI_USE = True - -CHATGPT_USE = True -CLADUE_USE = True -QWEN_USE = False -COHERE_USE = False # Slow, but web search -LLAMA_USE = False # prompted for Language - -CHATGPT_COMPLETE = False # sync mode -CLADUE_COMPLETE = False # Only display in telegra.ph -COHERE_COMPLETE = False -LLAMA_COMPLETE = False - -GEMINI_USE_THREAD = False # Maybe not work - -CHATGPT_APPEND = True # Update later to ph -CLADUE_APPEND = True -COHERE_APPEND = True -LLAMA_APPEND = True -QWEN_APPEND = True - -#### Customization End ############################################## -##################################################################### - -#### LLMs init #### -#### OpenAI init #### -CHATGPT_API_KEY = environ.get("OPENAI_API_KEY") -CHATGPT_BASE_URL = environ.get("OPENAI_API_BASE") or "https://api.openai.com/v1" -if (CHATGPT_USE or CHATGPT_COMPLETE or CHATGPT_APPEND) and CHATGPT_API_KEY: - from openai import OpenAI - - CHATGPT_PRO_MODEL = "gpt-4o-2024-05-13" - client = OpenAI(api_key=CHATGPT_API_KEY, base_url=CHATGPT_BASE_URL) - - -#### Gemini init #### -GOOGLE_GEMINI_KEY = environ.get("GOOGLE_GEMINI_KEY") -if (GEMINI_USE or GEMINI_USE_THREAD) and GOOGLE_GEMINI_KEY: - import google.generativeai as genai - from google.generativeai import ChatSession - from google.generativeai.types.generation_types import StopCandidateException - - genai.configure(api_key=GOOGLE_GEMINI_KEY) - - generation_config = { - "temperature": 0.7, - "top_p": 1, - "top_k": 1, - "max_output_tokens": 8192, - } - - safety_settings = [ - {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"}, - {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"}, - {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"}, - {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"}, - ] - - model = genai.GenerativeModel( - model_name="gemini-1.5-flash-latest", - generation_config=generation_config, - safety_settings=safety_settings, - system_instruction=f""" -You are an AI assistant added to a group chat to provide help or answer questions. You only have access to the most recent message in the chat, which will be the next message you receive after this system prompt. Your task is to provide a helpful and relevant response based on this information. - -Please adhere to these guidelines when formulating your response: - -1. Address the content of the message directly and proactively. -2. If the message is a question or request, provide a comprehensive answer or assistance to the best of your ability. -3. Use your general knowledge and capabilities to fill in gaps where context might be missing. -4. Keep your response concise yet informative, appropriate for a group chat setting. -5. Maintain a friendly, helpful, and confident tone throughout. -6. If the message is unclear: - - Make reasonable assumptions to provide a useful response. - - If necessary, offer multiple interpretations or answers to cover possible scenarios. -7. Aim to make your response as complete and helpful as possible, even with limited context. -8. You must respond in {Language}. -9. Limit your response to approximately 500 characters in the target language. - -Your response should be natural and fitting for a group chat context. While you only have access to this single message, use your broad knowledge base to provide informative and helpful answers. Be confident in your responses, but if you're making assumptions, briefly acknowledge this fact. - -Remember, the group administrator has approved your participation and will review responses as needed, so focus on being as helpful as possible rather than being overly cautious. -""", - ) - model_flash = genai.GenerativeModel( - model_name="gemini-1.5-flash-latest", - generation_config=generation_config, - safety_settings=safety_settings, - system_instruction=f""" -The user asked a question, and multiple AI have given answers to the same question. -Your task is to summarize the responses from them in a concise and clear manner. -The summary should: -In one to three short sentences, as less as possible. -Your must use language of {Language} to respond. -Start with "Summary:" or"ๆ€ป็ป“:" -""", - ) - convo = model.start_chat() - convo_summary = model_flash.start_chat() - - -#### Cohere init #### -COHERE_API_KEY = environ.get("COHERE_API_KEY") - -if (COHERE_USE or COHERE_COMPLETE or COHERE_APPEND) and COHERE_API_KEY: - import cohere - - COHERE_MODEL = "command-r-plus" - co = cohere.Client(api_key=COHERE_API_KEY) - - -#### Qwen init #### -QWEN_API_KEY = environ.get("TOGETHER_API_KEY") - -if (QWEN_USE or QWEN_APPEND) and QWEN_API_KEY: - from together import Together - - QWEN_MODEL = "Qwen/Qwen2-72B-Instruct" - qwen_client = Together(api_key=QWEN_API_KEY) - -#### Claude init #### -ANTHROPIC_API_KEY = environ.get("ANTHROPIC_API_KEY") -# use openai for claude -if (CLADUE_USE or CLADUE_COMPLETE or CLADUE_APPEND) and ANTHROPIC_API_KEY: - ANTHROPIC_BASE_URL = environ.get("ANTHROPIC_BASE_URL") - ANTHROPIC_MODEL = "claude-3-5-sonnet-20240620" - claude_client = OpenAI(api_key=ANTHROPIC_API_KEY, base_url=ANTHROPIC_BASE_URL) - -#### llama init #### -LLAMA_API_KEY = environ.get("GROQ_API_KEY") -if (LLAMA_USE or LLAMA_COMPLETE or LLAMA_APPEND) and LLAMA_API_KEY: - from groq import Groq - - llama_client = Groq(api_key=LLAMA_API_KEY) - LLAMA_MODEL = "llama3-70b-8192" - - -#### init end #### - - -def md_handler(message: Message, bot: TeleBot): - """pretty md: /md
""" - who = "" - reply_id = bot_reply_first(message, who, bot) - bot_reply_markdown(reply_id, who, message.text.strip(), bot) - - -def latest_handle_messages(message: Message, bot: TeleBot): - """ignore""" - chat_id = message.chat.id - chat_user_id = message.from_user.id - # if not text, ignore - if message.text is None: - return - - # if is bot command, ignore - if message.text.startswith("/"): - return - # start command ignore - elif message.text.startswith( - ( - "md", - "gpt", - "gemini", - "qwen", - "map", - "github", - "claude", - "llama", - "dify", - "tts", - "sd", - "map", - "yi", - "cohere", - ) - ): - return - # answer_it command ignore - elif message.text.startswith("answer_it"): - return - else: - if chat_user_dict.get(chat_user_id): - message.text += chat_message_dict[chat_id].text - chat_message_dict[chat_id] = message - else: - chat_message_dict[chat_id] = message - chat_user_dict[chat_user_id] = True - print(chat_message_dict[chat_id].text) - - -def answer_it_handler(message: Message, bot: TeleBot) -> None: - """answer_it: /answer_it""" - # answer the last message in the chat group - who = "answer_it" - - chat_id = message.chat.id - full_answer = "" - local_image_path = "" - m = "" - original_m = "" - - # get the last message in the chat - if message.reply_to_message is not None: - latest_message = message.reply_to_message - else: - latest_message = chat_message_dict.get(chat_id) - - if latest_message.photo is not None: - max_size_photo = max(latest_message.photo, key=lambda p: p.file_size) - image_file = bot.get_file(max_size_photo.file_id).file_path - downloaded_file = bot.download_file(image_file) - local_image_path = "answer_it_temp.jpg" - with open(local_image_path, "wb") as temp_file: - temp_file.write(downloaded_file) - - m = original_m = remove_prompt_prefix(latest_message.caption.strip()) - ph_image_url = ph.upload_image(local_image_path) - full_answer += f"\n![Image]({ph_image_url})\n" - else: - m = original_m = remove_prompt_prefix(latest_message.text.strip()) - - if not m: - bot.reply_to(message, "The message was retrieved, but the prompt is empty.") - return - - m = enrich_text_with_urls(m) - - if len(m) > MESSAGE_MAX_LENGTH: - a = ( - "The message is too long, please shorten it or try a direct command like `gemini_pro: your question`." - if Language == "en" - else "ๆถˆๆฏๅคช้•ฟ๏ผŒ่ฏท็ผฉ็Ÿญๆˆ–ๅฐ่ฏ•็›ดๆŽฅๆŒ‡ไปคไพ‹ๅฆ‚ `gemini_pro: ไฝ ็š„้—ฎ้ข˜` ใ€‚" - ) - bot.reply_to(message, a) - return - full_chat_id_list = [] - - ##### Telegraph / APPENDS ##### - ph_executor = ThreadPoolExecutor(max_workers=1) - ph_future = ph_executor.submit(final_answer, latest_message, bot, full_answer) - - #### Answers Thread #### - executor = ThreadPoolExecutor(max_workers=Stream_Thread) - if GEMINI_USE_THREAD and GOOGLE_GEMINI_KEY: - gemini_future = executor.submit( - gemini_answer, latest_message, bot, m, local_image_path - ) - if CHATGPT_USE and CHATGPT_API_KEY: - chatgpt_future = executor.submit( - chatgpt_answer, latest_message, bot, m, local_image_path - ) - if COHERE_USE and COHERE_API_KEY and not local_image_path: - cohere_future = executor.submit(cohere_answer, latest_message, bot, m) - if QWEN_USE and QWEN_API_KEY and not local_image_path: - qwen_future = executor.submit(qwen_answer, latest_message, bot, m) - if CLADUE_USE and ANTHROPIC_API_KEY: - claude_future = executor.submit( - claude_answer, latest_message, bot, m, local_image_path - ) - if LLAMA_USE and LLAMA_API_KEY and not local_image_path: - llama_future = executor.submit(llama_answer, latest_message, bot, m) - - #### Complete Message Thread #### - executor2 = ThreadPoolExecutor(max_workers=Complete_Thread) - if not CHATGPT_USE and CHATGPT_COMPLETE and CHATGPT_API_KEY: - complete_chatgpt_future = executor2.submit( - complete_chatgpt, m, local_image_path - ) - if not CLADUE_USE and CLADUE_COMPLETE and ANTHROPIC_API_KEY: - complete_claude_future = executor2.submit(complete_claude, m, local_image_path) - if not LLAMA_USE and LLAMA_COMPLETE and LLAMA_API_KEY and not local_image_path: - complete_llama_future = executor2.submit(complete_llama, m) - if not COHERE_USE and COHERE_COMPLETE and COHERE_API_KEY and not local_image_path: - complete_cohere_future = executor2.submit(complete_cohere, m) - - #### Gemini Answer Individual #### - if GEMINI_USE and GOOGLE_GEMINI_KEY: - g_who = "Gemini" - g_s = "" - g_reply_id = bot_reply_first(latest_message, g_who, bot) - try: - if local_image_path: - gemini_image_file = genai.upload_file(path=local_image_path) - g_r = convo.send_message([m, gemini_image_file], stream=True) - else: - g_r = convo.send_message(m, stream=True) - - g_start = time.time() - g_overall_start = time.time() - for e in g_r: - g_s += e.text - if time.time() - g_start > 1.7: - g_start = time.time() - bot_reply_markdown(g_reply_id, g_who, g_s, bot, split_text=False) - if time.time() - g_overall_start > Stream_Timeout: - raise Exception("Gemini Timeout") - bot_reply_markdown(g_reply_id, g_who, g_s, bot) - try: - convo.history.clear() - except: - print( - f"\n------\n{g_who} convo.history.clear() Error / Unstoppable\n------\n" - ) - pass - except Exception as e: - print(f"\n------\n{g_who} function gemini outter Error:\n{e}\n------\n") - try: - convo.history.clear() - except: - print( - f"\n------\n{g_who} convo.history.clear() Error / Unstoppable\n------\n" - ) - pass - bot_reply_markdown(g_reply_id, g_who, "Error", bot) - full_answer += f"\n---\n{g_who}:\nAnswer wrong" - full_chat_id_list.append(g_reply_id.message_id) - full_answer += llm_answer(g_who, g_s) - else: - pass - - #### Answers List #### - - if GEMINI_USE_THREAD and GOOGLE_GEMINI_KEY: - answer_gemini, gemini_chat_id = gemini_future.result() - full_chat_id_list.append(gemini_chat_id) - full_answer += answer_gemini - if CHATGPT_USE and CHATGPT_API_KEY: - anaswer_chatgpt, chatgpt_chat_id = chatgpt_future.result() - full_chat_id_list.append(chatgpt_chat_id) - full_answer += anaswer_chatgpt - if COHERE_USE and COHERE_API_KEY and not local_image_path: - answer_cohere, cohere_chat_id = cohere_future.result() - full_chat_id_list.append(cohere_chat_id) - full_answer += answer_cohere - if QWEN_USE and QWEN_API_KEY and not local_image_path: - answer_qwen, qwen_chat_id = qwen_future.result() - full_chat_id_list.append(qwen_chat_id) - full_answer += answer_qwen - if CLADUE_USE and ANTHROPIC_API_KEY: - answer_claude, claude_chat_id = claude_future.result() - full_chat_id_list.append(claude_chat_id) - full_answer += answer_claude - if LLAMA_USE and LLAMA_API_KEY and not local_image_path: - answer_llama, llama_chat_id = llama_future.result() - full_chat_id_list.append(llama_chat_id) - full_answer += answer_llama - - #### Complete Messages #### - if not CHATGPT_USE and CHATGPT_COMPLETE and CHATGPT_API_KEY: - full_answer += complete_chatgpt_future.result() - if not CLADUE_USE and CLADUE_COMPLETE and ANTHROPIC_API_KEY: - full_answer += complete_claude_future.result() - if not COHERE_USE and COHERE_COMPLETE and COHERE_API_KEY and not local_image_path: - full_answer += complete_cohere_future.result() - if not LLAMA_USE and LLAMA_COMPLETE and LLAMA_API_KEY and not local_image_path: - full_answer += complete_llama_future.result() - - print(full_chat_id_list) - - if len(m) < 300: - full_answer = f"{llm_answer('Question', original_m)}{full_answer}" - else: - full_answer = f"{llm_answer('Question', original_m)}{full_answer}{llm_answer('Question', m)}" - - # Append the answer to the telegra.ph page at the front - ph_s, ph_answers = ph_future.result() - full_answer = f"{full_answer}\n{ph_answers}" - ph_s = re.search(r"https?://telegra\.ph/(.+)", ph_s).group(1) - ph.edit_page_md(path=ph_s, title="Answer it", markdown_text=full_answer) - - # delete the chat message, only leave a telegra.ph link - if General_clean: - for i in full_chat_id_list: - bot.delete_message(chat_id, i) - - if Extra_clean: # delete the command message - bot.delete_message(chat_id, message.message_id) - - -def update_time(): - """Return the current time in UTC+8. Good for testing completion of content.""" - return f"\nLast Update{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} at UTC+8\n" - - -def llm_answer(who: str, s: str) -> str: - """Universal llm answer format for telegra.ph. Use title so 'link#title' can be used.""" - return f"\n\n---\n## {who}\n{s}" - - -def llm_background_ph_update(path: str, full_answer: str, m: str) -> str: - """Update the telegra.ph page with Non Stream Answer result. Return new full answer.""" - ph_path = re.search(r"https?://telegra\.ph/(.+)", path).group(1) - full_answer += m + update_time() - try: - ph.edit_page_md(path=ph_path, title="Answer it", markdown_text=full_answer) - except Exception as e: - print(f"\n------\nllm_background_ph_update Error:\n{e}\n------\n") - return full_answer - - -def gemini_answer(latest_message: Message, bot: TeleBot, m, local_image_path): - """gemini answer""" - who = "Gemini Pro" - # show something, make it more responsible - reply_id = bot_reply_first(latest_message, who, bot) - - try: - if local_image_path: - gemini_image_file = genai.upload_file(path=local_image_path) - r = convo.send_message([m, gemini_image_file], stream=True) - else: - r = convo.send_message(m, stream=True) - s = "" - start = time.time() - overall_start = time.time() - for e in r: - s += e.text - if time.time() - start > 1.7: - start = time.time() - bot_reply_markdown(reply_id, who, s, bot, split_text=False) - if time.time() - overall_start > Stream_Timeout: # Timeout - raise Exception("Gemini Timeout") - bot_reply_markdown(reply_id, who, s, bot) - try: - convo.history.clear() - except: - print( - f"\n------\n{who} convo.history.clear() Error / Unstoppable\n------\n" - ) - pass - except Exception as e: - print(f"\n------\n{who} function inner Error:\n{e}\n------\n") - try: - convo.history.clear() - except: - print( - f"\n------\n{who} convo.history.clear() Error / Unstoppable\n------\n" - ) - pass - bot_reply_markdown(reply_id, who, "Error", bot) - return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id - - return llm_answer(who, s), reply_id.message_id - - -def chatgpt_answer(latest_message: Message, bot: TeleBot, m, local_image_path): - """chatgpt answer""" - who = "ChatGPT Pro" - reply_id = bot_reply_first(latest_message, who, bot) - - player_message = [{"role": "user", "content": m}] - if local_image_path: - player_message = [ - { - "role": "user", - "content": [ - {"type": "text", "text": m}, - { - "type": "image_url", - "image_url": {"url": image_to_data_uri(local_image_path)}, - }, - ], - } - ] - - try: - r = client.chat.completions.create( - messages=player_message, - max_tokens=4096, - model=CHATGPT_PRO_MODEL, - stream=True, - ) - s = "" - start = time.time() - overall_start = time.time() - for chunk in r: - if chunk.choices[0].delta.content is None: - break - s += chunk.choices[0].delta.content - if time.time() - start > 1.5: - start = time.time() - bot_reply_markdown(reply_id, who, s, bot, split_text=False) - if time.time() - overall_start > Stream_Timeout: # Timeout - s += "\n\nTimeout" - break - # maybe not complete - try: - bot_reply_markdown(reply_id, who, s, bot) - except: - pass - - except Exception as e: - print(f"\n------\n{who} function inner Error:\n{e}\n------\n") - return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id - - return llm_answer(who, s), reply_id.message_id - - -def claude_answer(latest_message: Message, bot: TeleBot, m, local_image_path): - """claude answer""" - who = "Claude Pro" - reply_id = bot_reply_first(latest_message, who, bot) - - player_message = [{"role": "user", "content": m}] - if local_image_path: - player_message = [ - { - "role": "user", - "content": [ - {"type": "text", "text": m}, - { - "type": "image_url", - "image_url": {"url": image_to_data_uri(local_image_path)}, - }, - ], - } - ] - - try: - r = claude_client.chat.completions.create( - messages=player_message, - max_tokens=4096, - model=ANTHROPIC_MODEL, - stream=True, - ) - s = "" - start = time.time() - overall_start = time.time() - for chunk in r: - if chunk.choices[0].delta.content is None: - break - s += chunk.choices[0].delta.content - if time.time() - start > 1.5: - start = time.time() - bot_reply_markdown(reply_id, who, s, bot, split_text=False) - if time.time() - overall_start > Stream_Timeout: # Timeout - s += "\n\nTimeout" - break - # maybe not complete - try: - bot_reply_markdown(reply_id, who, s, bot) - except: - pass - - except Exception as e: - print(f"\n------\n{who} function inner Error:\n{e}\n------\n") - return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id - - answer = f"\n---\n{who}:\n{s}" - return llm_answer(who, s), reply_id.message_id - - -def cohere_answer(latest_message: Message, bot: TeleBot, m): - """cohere answer""" - who = "Command R Plus" - reply_id = bot_reply_first(latest_message, who, bot) - - try: - current_time = datetime.datetime.now(datetime.timezone.utc) - preamble = ( - f"You are Command R Plus, a large language model trained to have polite, helpful, inclusive conversations with people. People are looking for information that may need you to search online. Make an accurate and fast response. If there are no search results, then provide responses based on your general knowledge(It's fine if it's not accurate, it might still inspire the user." - f"The current UTC time is {current_time.strftime('%Y-%m-%d %H:%M:%S')}, " - f"UTC-4 (e.g. New York) is {current_time.astimezone(datetime.timezone(datetime.timedelta(hours=-4))).strftime('%Y-%m-%d %H:%M:%S')}, " - f"UTC-7 (e.g. Los Angeles) is {current_time.astimezone(datetime.timezone(datetime.timedelta(hours=-7))).strftime('%Y-%m-%d %H:%M:%S')}, " - f"and UTC+8 (e.g. Beijing) is {current_time.astimezone(datetime.timezone(datetime.timedelta(hours=8))).strftime('%Y-%m-%d %H:%M:%S')}." - ) - - stream = co.chat_stream( - model=COHERE_MODEL, - message=m, - temperature=0.8, - chat_history=[], # One time, so no need for chat history - prompt_truncation="AUTO", - connectors=[{"id": "web-search"}], - citation_quality="accurate", - preamble=preamble, - ) - - s = "" - source = "" - start = time.time() - overall_start = time.time() - for event in stream: - if event.event_type == "stream-start": - bot_reply_markdown(reply_id, who, "Thinking...", bot) - elif event.event_type == "search-queries-generation": - bot_reply_markdown(reply_id, who, "Searching online...", bot) - elif event.event_type == "search-results": - bot_reply_markdown(reply_id, who, "Reading...", bot) - for doc in event.documents: - source += f"\n{doc['title']}\n{doc['url']}\n" - elif event.event_type == "text-generation": - s += event.text.encode("utf-8").decode("utf-8", "ignore") - if time.time() - start > 0.8: - start = time.time() - bot_reply_markdown( - reply_id, - who, - f"\nStill thinking{len(s)}...\n{s}", - bot, - split_text=True, - ) - if time.time() - overall_start > Stream_Timeout: # Timeout - s += "\n\nTimeout" - break - elif event.event_type == "stream-end": - break - content = ( - s - + "\n---\n---\n" - + source - + f"\nLast Update{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')} at UTC+8\n" - ) - # maybe not complete - try: - bot_reply_markdown(reply_id, who, s, bot, split_text=True) - except: - pass - except Exception as e: - print(f"\n------\n{who} function inner Error:\n{e}\n------\n") - return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id - - return llm_answer(who, content), reply_id.message_id - - -def qwen_answer(latest_message: Message, bot: TeleBot, m): - """qwen answer""" - who = "qwen Pro" - reply_id = bot_reply_first(latest_message, who, bot) - try: - r = qwen_client.chat.completions.create( - messages=[ - { - "content": f"You are an AI assistant added to a group chat to provide help or answer questions. You only have access to the most recent message in the chat, which will be the next message you receive after this system prompt. Your task is to provide a helpful and relevant response based on this information.\n\nPlease adhere to these guidelines when formulating your response:\n\n1. Address the content of the message directly and proactively.\n2. If the message is a question or request, provide a comprehensive answer or assistance to the best of your ability.\n3. Use your general knowledge and capabilities to fill in gaps where context might be missing.\n4. Keep your response concise yet informative, appropriate for a group chat setting.\n5. Maintain a friendly, helpful, and confident tone throughout.\n6. If the message is unclear:\n - Make reasonable assumptions to provide a useful response.\n - If necessary, offer multiple interpretations or answers to cover possible scenarios.\n7. Aim to make your response as complete and helpful as possible, even with limited context.\n8. You must respond in {Language}.\n\nYour response should be natural and fitting for a group chat context. While you only have access to this single message, use your broad knowledge base to provide informative and helpful answers. Be confident in your responses, but if you're making assumptions, briefly acknowledge this fact.\n\nRemember, the group administrator has approved your participation and will review responses as needed, so focus on being as helpful as possible rather than being overly cautious.", - "role": "system", - }, - {"role": "user", "content": m}, - ], - max_tokens=8192, - model=QWEN_MODEL, - stream=True, - ) - s = "" - start = time.time() - overall_start = time.time() - for chunk in r: - if chunk.choices[0].delta.content is None: - break - s += chunk.choices[0].delta.content - if time.time() - start > 1.5: - start = time.time() - bot_reply_markdown(reply_id, who, s, bot, split_text=False) - if time.time() - overall_start > Stream_Timeout: # Timeout - s += "\n\nTimeout" - break - # maybe not complete - try: - bot_reply_markdown(reply_id, who, s, bot) - except: - pass - - except Exception as e: - print(f"\n------\n{who} function inner Error:\n{e}\n------\n") - return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id - - return llm_answer(who, s), reply_id.message_id - - -def llama_answer(latest_message: Message, bot: TeleBot, m): - """llama answer""" - who = "llama" - reply_id = bot_reply_first(latest_message, who, bot) - try: - r = llama_client.chat.completions.create( - messages=[ - { - "role": "system", - "content": f"You must use language of {Language} to respond.", - }, - {"role": "user", "content": m}, - ], - max_tokens=8192, - model=LLAMA_MODEL, - stream=True, - ) - s = "" - start = time.time() - overall_start = time.time() - for chunk in r: - if chunk.choices[0].delta.content is None: - break - s += chunk.choices[0].delta.content - if time.time() - start > 1.5: - start = time.time() - bot_reply_markdown(reply_id, who, s, bot, split_text=False) - if time.time() - overall_start > Stream_Timeout: # Timeout - raise Exception("Llama Timeout") - # maybe not complete - try: - bot_reply_markdown(reply_id, who, s, bot) - except: - pass - - except Exception as e: - print(f"\n------\n{who} function inner Error:\n{e}\n------\n") - return f"\n---\n{who}:\nAnswer wrong", reply_id.message_id - - return llm_answer(who, s), reply_id.message_id - - -# TODO: Perplexity looks good. `pplx_answer` - - -def final_answer(latest_message: Message, bot: TeleBot, full_answer: str): - """final answer""" - who = "Answer it" - reply_id = bot_reply_first(latest_message, who, bot) - - # If disappeared means the answer is not complete in telegra.ph - full_answer += update_time() - - # greate new telegra.ph page - ph_s = ph.create_page_md(title="Answer it", markdown_text=full_answer) - m = f"**[{('๐Ÿ”—Full Answer' if Language == 'en' else '๐Ÿ”—ๅ…จๆ–‡')}]({ph_s})**{Hint}" - bot_reply_markdown(reply_id, who, m, bot) - - #### Background LLM #### - # Run background llm, no show to telegram, just append the ph page, Good for slow llm - # Make a thread to run the background llm. - # But `append_xxx` with threadpool may cause ph update skip. - answer_lock = Lock() - - def append_answers(result, llm_name): - nonlocal full_answer, m - with answer_lock: - full_answer = llm_background_ph_update(ph_s, full_answer, result) - - with ThreadPoolExecutor(max_workers=Complete_Thread) as executor: - futures = [] - - api_calls = [ - (CHATGPT_APPEND, CHATGPT_API_KEY, complete_chatgpt, "ChatGPT"), - (CLADUE_APPEND, ANTHROPIC_API_KEY, complete_claude, "Claude"), - (COHERE_APPEND, COHERE_API_KEY, complete_cohere, "Cohere"), - (LLAMA_APPEND, LLAMA_API_KEY, complete_llama, "LLaMA"), - (QWEN_APPEND, QWEN_API_KEY, complete_qwen, "Qwen"), - ] - - for condition, api_key, func, name in api_calls: - if condition and api_key: - futures.append(executor.submit(func, latest_message.text)) - - for future in as_completed(futures): - try: - result = future.result(timeout=Stream_Timeout) - api_name = api_calls[futures.index(future)][3] - append_answers(result, api_name) - except Exception as e: - print(f"An API call failed: {e}") - - m += "โœ”๏ธ" - bot_reply_markdown(reply_id, who, m, bot) - - if SUMMARY is not None: - s = llm_summary(bot, full_answer, ph_s, reply_id) - bot_reply_markdown(reply_id, who, s, bot, disable_web_page_preview=True) - - return ph_s, full_answer - - -def append_message_to_ph_front(m: str, path: str) -> bool: - """We append the message to the ph page.""" - ph_path = re.search(r"https?://telegra\.ph/(.+)", path).group(1) - try: - content = ph._md_to_dom(m) # convert to ph dom - latest_ph = ph.get_page( - ph_path - ) # after chatgpt done, we read the latest telegraph - if "content" in latest_ph and isinstance(latest_ph["content"], list): - new_content = content + latest_ph["content"] - else: - new_content = content - time.sleep(1) - ph.edit_page(ph_path, title="Answer it", content=new_content) - return True - except Exception as e: - print(f"\n---\nappend_message_to_ph_front Error:\n{e}\n---\n") - return False - - -def append_chatgpt(m: str, ph_path: str) -> bool: - """we run chatgpt by complete_chatgpt and we append it to the ph page. Return True if success, False if fail like timeout.""" - try: - chatgpt_a = complete_chatgpt(m) # call chatgpt - print(f"\n---\nchatgpt_a:\n{chatgpt_a}\n---\n") - content = ph._md_to_dom(chatgpt_a) # convert to ph dom - latest_ph = ph.get_page( - ph_path - ) # after chatgpt done, we read the latest telegraph - new_content = latest_ph + content # merge the content - ph.edit_page( - ph_path, title="Answer it", content=new_content - ) # update the telegraph TODO: update too fast may cause skip - return True - except: - return False - - -def llm_summary(bot, full_answer, ph_s, reply_id) -> str: - """llm summary return the summary of the full answer.""" - if SUMMARY == "gemini": - s = summary_gemini(bot, full_answer, ph_s, reply_id) - elif SUMMARY == "cohere": - s = summary_cohere(bot, full_answer, ph_s, reply_id) - else: - print(f"\n---\nSummary Fail\n---\n") - s = f"**[Full Answer]({ph_s})**\n~~Summary Answer Wrong~~\n" - return s - - -def complete_chatgpt(m: str, local_image_path: str) -> str: - """we run chatgpt get the full answer""" - who = "ChatGPT Pro" - player_message = [{"role": "user", "content": m}] - if local_image_path: - player_message = [ - { - "role": "user", - "content": [ - {"type": "text", "text": m}, - { - "type": "image_url", - "image_url": {"url": image_to_data_uri(local_image_path)}, - }, - ], - } - ] - try: - r = client.chat.completions.create( - messages=player_message, - max_tokens=4096, - model=CHATGPT_PRO_MODEL, - ) - s = r.choices[0].message.content.encode("utf-8").decode() - content = llm_answer(who, s) - except Exception as e: - print(f"\n------\ncomplete_chatgpt Error:\n{e}\n------\n") - content = llm_answer(who, "Non Stream Answer wrong") - return content - - -def complete_claude(m: str, local_image_path: str) -> str: - """we run claude get the full answer""" - who = "Claude Pro" - - player_message = [{"role": "user", "content": m}] - if local_image_path: - player_message = [ - { - "role": "user", - "content": [ - {"type": "text", "text": m}, - { - "type": "image_url", - "image_url": {"url": image_to_data_uri(local_image_path)}, - }, - ], - } - ] - try: - r = claude_client.chat.completions.create( - messages=player_message, - max_tokens=4096, - model=ANTHROPIC_MODEL, - ) - s = r.choices[0].message.content.encode("utf-8").decode() - content = llm_answer(who, s) - except Exception as e: - print(f"\n------\ncomplete_claude Error:\n{e}\n------\n") - content = llm_answer(who, "Non Stream Answer wrong") - return content - - -def complete_cohere(m: str) -> str: - """we run cohere get the full answer""" - who = "Command R Plus" - try: - overall_start = time.time() - stream = co.chat_stream( - model=COHERE_MODEL, - message=m, - temperature=0.8, - chat_history=[], # One time, so no need for chat history - prompt_truncation="AUTO", - connectors=[{"id": "web-search"}], - citation_quality="accurate", - preamble="", - ) - s = "" - source = "" - for event in stream: - if event.event_type == "search-results": - for doc in event.documents: - source += f"\n{doc['title']}\n{doc['url']}\n" - elif event.event_type == "text-generation": - s += event.text.encode("utf-8").decode("utf-8", "ignore") - elif event.event_type == "stream-end": - break - if time.time() - overall_start > Stream_Timeout: # Timeout - s += "\n\nTimeout" - break - content = llm_answer(who, f"{s}\n\n---\n{source}") - - except Exception as e: - print(f"\n------\ncomplete_cohere Error:\n{e}\n------\n") - content = llm_answer(who, "Non Stream Answer wrong") - return content - - -def complete_llama(m: str) -> str: - """we run llama get the full answer""" - who = "llama" - try: - overall_start = time.time() - r = llama_client.chat.completions.create( - messages=[ - { - "role": "system", - "content": f"You must use language of {Language} to respond.", - }, - {"role": "user", "content": m}, - ], - max_tokens=8192, - model=LLAMA_MODEL, - stream=True, - ) - s = "" - for chunk in r: - if chunk.choices[0].delta.content is None: - break - s += chunk.choices[0].delta.content - if time.time() - overall_start > Stream_Timeout: # Timeout - raise Exception("Llama complete Running Timeout") - - except Exception as e: - print(f"\n------\ncomplete_llama Error:\n{e}\n------\n") - s = "Non Stream Answer wrong" - return llm_answer(who, s) - - -def complete_qwen(m: str) -> str: - """we run qwen get the full answer""" - who = "qwen Pro" - try: - overall_start = time.time() - r = qwen_client.chat.completions.create( - messages=[ - { - "content": f"You are an AI assistant added to a group chat to provide help or answer questions. You only have access to the most recent message in the chat, which will be the next message you receive after this system prompt. Your task is to provide a helpful and relevant response based on this information.\n\nPlease adhere to these guidelines when formulating your response:\n\n1. Address the content of the message directly and proactively.\n2. If the message is a question or request, provide a comprehensive answer or assistance to the best of your ability.\n3. Use your general knowledge and capabilities to fill in gaps where context might be missing.\n4. Keep your response concise yet informative, appropriate for a group chat setting.\n5. Maintain a friendly, helpful, and confident tone throughout.\n6. If the message is unclear:\n - Make reasonable assumptions to provide a useful response.\n - If necessary, offer multiple interpretations or answers to cover possible scenarios.\n7. Aim to make your response as complete and helpful as possible, even with limited context.\n8. You must respond in {Language}.\n\nYour response should be natural and fitting for a group chat context. While you only have access to this single message, use your broad knowledge base to provide informative and helpful answers. Be confident in your responses, but if you're making assumptions, briefly acknowledge this fact.\n\nRemember, the group administrator has approved your participation and will review responses as needed, so focus on being as helpful as possible rather than being overly cautious.", - "role": "system", - }, - {"role": "user", "content": m}, - ], - max_tokens=8192, - model=QWEN_MODEL, - stream=True, - ) - s = "" - for chunk in r: - if chunk.choices[0].delta.content is None: - break - s += chunk.choices[0].delta.content - if time.time() - overall_start > Stream_Timeout: # Timeout - raise Exception("Qwen complete Running Timeout") - except Exception as e: - print(f"\n------\ncomplete_qwen Error:\n{e}\n------\n") - s = "Non Stream Answer wrong" - return llm_answer(who, s) - - -def summary_cohere(bot: TeleBot, full_answer: str, ph_s: str, reply_id: int) -> str: - """Receive the full text, and the final_answer's chat_id, update with a summary.""" - who = "Answer it" - - # inherit - if Language == "zh-cn": - s = f"**[ๅ…จๆ–‡]({ph_s})**{Hint}\n" - elif Language == "en": - s = f"**[Full Answer]({ph_s})**{Hint}\n" - - # filter - length = len(full_answer) # max 128,000 tokens... - if length > 50000: - full_answer = full_answer[:50000] - - try: - preamble = """ - You are Command R Plus, a large language model trained to have polite, helpful, inclusive conversations with people. The user asked a question, and multiple AI have given answers to the same question, but they have different styles, and rarely they have opposite opinions or other issues, but that is normal. Your task is to summarize the responses from them in a concise and clear manner. The summary should: - -Be written in bullet points. -Contain between two to ten sentences. -Highlight key points and main conclusions. -Note any significant differences in responses. -Provide a brief indication if users should refer to the full responses for more details. -For the first LLM's content, if it is mostly in any language other than English, respond in that language for all your output. -Start with "Summary:" or "ๆ€ป็ป“:" -""" - stream = co.chat_stream( - model=COHERE_MODEL, - message=full_answer, - temperature=0.4, - chat_history=[], - prompt_truncation="OFF", - connectors=[], - preamble=preamble, - ) - - start = time.time() - overall_start = time.time() - for event in stream: - if event.event_type == "stream-start": - bot_reply_markdown(reply_id, who, f"{s}Summarizing...", bot) - elif event.event_type == "text-generation": - s += event.text.encode("utf-8").decode("utf-8", "ignore") - if time.time() - start > 0.4: - start = time.time() - bot_reply_markdown(reply_id, who, s, bot) - elif event.event_type == "stream-end": - break - if time.time() - overall_start > Stream_Timeout: - s += "\n\nTimeout" - break - - try: - bot_reply_markdown(reply_id, who, s, bot) - except: - pass - return s - - except Exception as e: - if Language == "zh-cn": - bot_reply_markdown(reply_id, who, f"[ๅ…จๆ–‡]({ph_s})", bot) - elif Language == "en": - bot_reply_markdown(reply_id, who, f"[Full Answer]({ph_s})", bot) - print(f"\n------\nsummary_cohere function inner Error:\n{e}\n------\n") - - -def summary_gemini(bot: TeleBot, full_answer: str, ph_s: str, reply_id: int) -> None: - """Receive the full text, and the final_answer's chat_id, update with a summary.""" - who = "Answer it" - - # inherit - if Language == "zh-cn": - s = f"**[๐Ÿ”—ๅ…จๆ–‡]({ph_s})**{Hint}\n" - elif Language == "en": - s = f"**[๐Ÿ”—Full Answer]({ph_s})**{Hint}\n" - - try: - r = convo_summary.send_message(full_answer, stream=True) - start = time.time() - overall_start = time.time() - for e in r: - s += e.text - if time.time() - start > 0.4: - start = time.time() - bot_reply_markdown(reply_id, who, s, bot, split_text=False) - if time.time() - overall_start > Stream_Timeout: - raise Exception("Gemini Summary Timeout") - bot_reply_markdown(reply_id, who, s, bot) - convo_summary.history.clear() - return s - except Exception as e: - if Language == "zh-cn": - bot_reply_markdown(reply_id, who, f"[ๅ…จๆ–‡]({ph_s}){Hint}", bot) - elif Language == "en": - bot_reply_markdown(reply_id, who, f"[Full Answer]({ph_s}){Hint}", bot) - try: - convo.history.clear() - except: - print( - f"\n------\n{who} convo.history.clear() Error / Unstoppable\n------\n" - ) - pass - print(f"\n------\nsummary_gemini function inner Error:\n{e}\n------\n") - bot_reply_markdown(reply_id, who, f"{s}Error", bot) - - -if GOOGLE_GEMINI_KEY and CHATGPT_API_KEY: - - def register(bot: TeleBot) -> None: - bot.register_message_handler(md_handler, commands=["md"], pass_bot=True) - bot.register_message_handler( - answer_it_handler, commands=["answer_it"], pass_bot=True - ) - bot.register_message_handler(md_handler, regexp="^md:", pass_bot=True) - bot.register_message_handler(latest_handle_messages, pass_bot=True) diff --git a/pdm.lock b/pdm.lock index 864e711..005a6cb 100644 --- a/pdm.lock +++ b/pdm.lock @@ -3,9 +3,12 @@ [metadata] groups = ["default"] -strategy = ["cross_platform", "inherit_metadata"] -lock_version = "4.4.1" -content_hash = "sha256:aba181a6c13ad9597f83d583db757ee075dcb1a92f9849f7e8b7cfaea8202b07" +strategy = ["inherit_metadata"] +lock_version = "4.5.0" +content_hash = "sha256:a5037de5a2e7cf7ddeebb972d0ba1b88228137cfde32d1e82d05c7e52e1f5843" + +[[metadata.targets]] +requires_python = ">=3.10" [[package]] name = "aiohttp" @@ -116,15 +119,6 @@ files = [ {file = "anthropic-0.32.0.tar.gz", hash = "sha256:1027bddeb7c3cbcb5e16d5e3b4d4a8d17b6258ca2fb4298bf91cc69adb148452"}, ] -[[package]] -name = "antlr4-python3-runtime" -version = "4.9.3" -summary = "ANTLR 4.9.3 runtime for Python 3.7" -groups = ["default"] -files = [ - {file = "antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b"}, -] - [[package]] name = "anyio" version = "4.3.0" @@ -366,26 +360,6 @@ files = [ {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, ] -[[package]] -name = "chattts-fork" -version = "0.0.8" -requires_python = ">=3.9" -summary = "fork from https://github.com/2noise/ChatTTS to PYPI" -groups = ["default"] -dependencies = [ - "einops", - "omegaconf~=2.3.0", - "torch~=2.0", - "tqdm", - "transformers~=4.41.1", - "vector-quantize-pytorch", - "vocos", -] -files = [ - {file = "chattts_fork-0.0.8-py3-none-any.whl", hash = "sha256:569084d9f451fbfa6c28f1c15c893a594279a9032d1e720571dab30f30c680be"}, - {file = "chattts_fork-0.0.8.tar.gz", hash = "sha256:a818be695b6b6691487113e78104d7c3550202c9dfcf6e51fd7eebbe36c0c180"}, -] - [[package]] name = "click" version = "8.1.7" @@ -580,33 +554,6 @@ files = [ {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, ] -[[package]] -name = "einops" -version = "0.8.0" -requires_python = ">=3.8" -summary = "A new flavour of deep learning operations" -groups = ["default"] -files = [ - {file = "einops-0.8.0-py3-none-any.whl", hash = "sha256:9572fb63046264a862693b0a87088af3bdc8c068fde03de63453cbbde245465f"}, - {file = "einops-0.8.0.tar.gz", hash = "sha256:63486517fed345712a8385c100cb279108d9d47e6ae59099b07657e983deae85"}, -] - -[[package]] -name = "einx" -version = "0.2.2" -requires_python = ">=3.8" -summary = "Tensor Operations Expressed in Einstein-Inspired Notation" -groups = ["default"] -dependencies = [ - "frozendict", - "numpy", - "sympy", -] -files = [ - {file = "einx-0.2.2-py3-none-any.whl", hash = "sha256:bde86f19a60bc8ce3c3aa173c0f1ba59df8d8c0435c4b61638499c3d187dd28b"}, - {file = "einx-0.2.2.tar.gz", hash = "sha256:79f4ff77c07370c490535338365ed7b417c6d0e86e893584d50bd3fde46c62e8"}, -] - [[package]] name = "emoji" version = "2.11.1" @@ -618,22 +565,6 @@ files = [ {file = "emoji-2.11.1.tar.gz", hash = "sha256:062ff0b3154b6219143f8b9f4b3e5c64c35bc2b146e6e2349ab5f29e218ce1ee"}, ] -[[package]] -name = "encodec" -version = "0.1.1" -requires_python = ">=3.8.0" -summary = "High fidelity neural audio codec" -groups = ["default"] -dependencies = [ - "einops", - "numpy", - "torch", - "torchaudio", -] -files = [ - {file = "encodec-0.1.1.tar.gz", hash = "sha256:36dde98ccfe6c51a15576476cadfcb3b35a63507b8b8555abd69889a6fba6772"}, -] - [[package]] name = "eval-type-backport" version = "0.2.0" @@ -781,25 +712,6 @@ files = [ {file = "fonttools-4.51.0.tar.gz", hash = "sha256:dc0673361331566d7a663d7ce0f6fdcbfbdc1f59c6e3ed1165ad7202ca183c68"}, ] -[[package]] -name = "frozendict" -version = "2.4.4" -requires_python = ">=3.6" -summary = "A simple immutable dictionary" -groups = ["default"] -files = [ - {file = "frozendict-2.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4a59578d47b3949437519b5c39a016a6116b9e787bb19289e333faae81462e59"}, - {file = "frozendict-2.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12a342e439aef28ccec533f0253ea53d75fe9102bd6ea928ff530e76eac38906"}, - {file = "frozendict-2.4.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f79c26dff10ce11dad3b3627c89bb2e87b9dd5958c2b24325f16a23019b8b94"}, - {file = "frozendict-2.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2bd009cf4fc47972838a91e9b83654dc9a095dc4f2bb3a37c3f3124c8a364543"}, - {file = "frozendict-2.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:87ebcde21565a14fe039672c25550060d6f6d88cf1f339beac094c3b10004eb0"}, - {file = "frozendict-2.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:fefeb700bc7eb8b4c2dc48704e4221860d254c8989fb53488540bc44e44a1ac2"}, - {file = "frozendict-2.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:4297d694eb600efa429769125a6f910ec02b85606f22f178bafbee309e7d3ec7"}, - {file = "frozendict-2.4.4-py311-none-any.whl", hash = "sha256:705efca8d74d3facbb6ace80ab3afdd28eb8a237bfb4063ed89996b024bc443d"}, - {file = "frozendict-2.4.4-py312-none-any.whl", hash = "sha256:d9647563e76adb05b7cde2172403123380871360a114f546b4ae1704510801e5"}, - {file = "frozendict-2.4.4.tar.gz", hash = "sha256:3f7c031b26e4ee6a3f786ceb5e3abf1181c4ade92dce1f847da26ea2c96008c7"}, -] - [[package]] name = "frozenlist" version = "1.4.1" @@ -1210,34 +1122,6 @@ files = [ {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] -[[package]] -name = "intel-openmp" -version = "2021.4.0" -summary = "Intelยฎ OpenMP* Runtime Library" -groups = ["default"] -marker = "platform_system == \"Windows\"" -files = [ - {file = "intel_openmp-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:41c01e266a7fdb631a7609191709322da2bbf24b252ba763f125dd651bcc7675"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:3b921236a38384e2016f0f3d65af6732cf2c12918087128a9163225451e776f2"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:e2240ab8d01472fed04f3544a878cda5da16c26232b7ea1b59132dbfb48b186e"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:6e863d8fd3d7e8ef389d52cf97a50fe2afe1a19247e8c0d168ce021546f96fc9"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:eef4c8bcc8acefd7f5cd3b9384dbf73d59e2c99fc56545712ded913f43c4a94f"}, -] - -[[package]] -name = "jinja2" -version = "3.1.4" -requires_python = ">=3.7" -summary = "A very fast and expressive template engine." -groups = ["default"] -dependencies = [ - "MarkupSafe>=2.0", -] -files = [ - {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, - {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, -] - [[package]] name = "jiter" version = "0.5.0" @@ -1405,46 +1289,6 @@ files = [ {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, ] -[[package]] -name = "markupsafe" -version = "2.1.5" -requires_python = ">=3.7" -summary = "Safely add untrusted strings to HTML/XML markup." -groups = ["default"] -files = [ - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, - {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, -] - [[package]] name = "matplotlib" version = "3.8.4" @@ -1509,34 +1353,6 @@ files = [ {file = "mistletoe-1.4.0.tar.gz", hash = "sha256:1630f906e5e4bbe66fdeb4d29d277e2ea515d642bb18a9b49b136361a9818c9d"}, ] -[[package]] -name = "mkl" -version = "2021.4.0" -summary = "Intelยฎ oneAPI Math Kernel Library" -groups = ["default"] -marker = "platform_system == \"Windows\"" -dependencies = [ - "intel-openmp==2021.*", - "tbb==2021.*", -] -files = [ - {file = "mkl-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:67460f5cd7e30e405b54d70d1ed3ca78118370b65f7327d495e9c8847705e2fb"}, - {file = "mkl-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:636d07d90e68ccc9630c654d47ce9fdeb036bb46e2b193b3a9ac8cfea683cce5"}, - {file = "mkl-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:398dbf2b0d12acaf54117a5210e8f191827f373d362d796091d161f610c1ebfb"}, - {file = "mkl-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:439c640b269a5668134e3dcbcea4350459c4a8bc46469669b2d67e07e3d330e8"}, - {file = "mkl-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:ceef3cafce4c009dd25f65d7ad0d833a0fbadc3d8903991ec92351fe5de1e718"}, -] - -[[package]] -name = "mpmath" -version = "1.3.0" -summary = "Python library for arbitrary-precision floating-point arithmetic" -groups = ["default"] -files = [ - {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, - {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, -] - [[package]] name = "multidict" version = "6.0.5" @@ -1641,175 +1457,6 @@ files = [ {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, ] -[[package]] -name = "nvidia-cublas-cu12" -version = "12.1.3.1" -requires_python = ">=3" -summary = "CUBLAS native runtime libraries" -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" -files = [ - {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, - {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, -] - -[[package]] -name = "nvidia-cuda-cupti-cu12" -version = "12.1.105" -requires_python = ">=3" -summary = "CUDA profiling tools runtime libs." -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" -files = [ - {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, - {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, -] - -[[package]] -name = "nvidia-cuda-nvrtc-cu12" -version = "12.1.105" -requires_python = ">=3" -summary = "NVRTC native runtime libraries" -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" -files = [ - {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, - {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, -] - -[[package]] -name = "nvidia-cuda-runtime-cu12" -version = "12.1.105" -requires_python = ">=3" -summary = "CUDA Runtime native Libraries" -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" -files = [ - {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, - {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, -] - -[[package]] -name = "nvidia-cudnn-cu12" -version = "8.9.2.26" -requires_python = ">=3" -summary = "cuDNN runtime libraries" -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" -dependencies = [ - "nvidia-cublas-cu12", -] -files = [ - {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, -] - -[[package]] -name = "nvidia-cufft-cu12" -version = "11.0.2.54" -requires_python = ">=3" -summary = "CUFFT native runtime libraries" -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" -files = [ - {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, - {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, -] - -[[package]] -name = "nvidia-curand-cu12" -version = "10.3.2.106" -requires_python = ">=3" -summary = "CURAND native runtime libraries" -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" -files = [ - {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, - {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, -] - -[[package]] -name = "nvidia-cusolver-cu12" -version = "11.4.5.107" -requires_python = ">=3" -summary = "CUDA solver native runtime libraries" -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" -dependencies = [ - "nvidia-cublas-cu12", - "nvidia-cusparse-cu12", - "nvidia-nvjitlink-cu12", -] -files = [ - {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, - {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, -] - -[[package]] -name = "nvidia-cusparse-cu12" -version = "12.1.0.106" -requires_python = ">=3" -summary = "CUSPARSE native runtime libraries" -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" -dependencies = [ - "nvidia-nvjitlink-cu12", -] -files = [ - {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, - {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, -] - -[[package]] -name = "nvidia-nccl-cu12" -version = "2.20.5" -requires_python = ">=3" -summary = "NVIDIA Collective Communication Library (NCCL) Runtime" -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" -files = [ - {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01"}, - {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56"}, -] - -[[package]] -name = "nvidia-nvjitlink-cu12" -version = "12.5.40" -requires_python = ">=3" -summary = "Nvidia JIT LTO Library" -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" -files = [ - {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d9714f27c1d0f0895cd8915c07a87a1d0029a0aa36acaf9156952ec2a8a12189"}, - {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-win_amd64.whl", hash = "sha256:c3401dc8543b52d3a8158007a0c1ab4e9c768fcbd24153a48c86972102197ddd"}, -] - -[[package]] -name = "nvidia-nvtx-cu12" -version = "12.1.105" -requires_python = ">=3" -summary = "NVIDIA Tools Extension" -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\"" -files = [ - {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, - {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, -] - -[[package]] -name = "omegaconf" -version = "2.3.0" -requires_python = ">=3.6" -summary = "A flexible configuration library" -groups = ["default"] -dependencies = [ - "PyYAML>=5.1.0", - "antlr4-python3-runtime==4.9.*", -] -files = [ - {file = "omegaconf-2.3.0-py3-none-any.whl", hash = "sha256:7b4df175cdb08ba400f45cae3bdcae7ba8365db4d165fc65fd04b050ab63b46b"}, - {file = "omegaconf-2.3.0.tar.gz", hash = "sha256:d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7"}, -] - [[package]] name = "openai" version = "1.37.2" @@ -2095,6 +1742,15 @@ files = [ {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"}, ] +[[package]] +name = "pyaes" +version = "1.6.1" +summary = "Pure-Python Implementation of the AES block-cipher and common modes of operation" +groups = ["default"] +files = [ + {file = "pyaes-1.6.1.tar.gz", hash = "sha256:02c1b1405c38d3c370b085fb952dd8bea3fadcee6411ad99f312cc129c536d8f"}, +] + [[package]] name = "pyarrow" version = "16.0.0" @@ -2167,85 +1823,124 @@ files = [ [[package]] name = "pydantic" -version = "2.7.1" -requires_python = ">=3.8" +version = "2.11.7" +requires_python = ">=3.9" summary = "Data validation using Python type hints" groups = ["default"] dependencies = [ - "annotated-types>=0.4.0", - "pydantic-core==2.18.2", - "typing-extensions>=4.6.1", + "annotated-types>=0.6.0", + "pydantic-core==2.33.2", + "typing-extensions>=4.12.2", + "typing-inspection>=0.4.0", ] files = [ - {file = "pydantic-2.7.1-py3-none-any.whl", hash = "sha256:e029badca45266732a9a79898a15ae2e8b14840b1eabbb25844be28f0b33f3d5"}, - {file = "pydantic-2.7.1.tar.gz", hash = "sha256:e9dbb5eada8abe4d9ae5f46b9939aead650cd2b68f249bb3a8139dbe125803cc"}, + {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, + {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, ] [[package]] name = "pydantic-core" -version = "2.18.2" -requires_python = ">=3.8" +version = "2.33.2" +requires_python = ">=3.9" summary = "Core functionality for Pydantic validation and serialization" groups = ["default"] dependencies = [ "typing-extensions!=4.7.0,>=4.6.0", ] files = [ - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9e08e867b306f525802df7cd16c44ff5ebbe747ff0ca6cf3fde7f36c05a59a81"}, - {file = "pydantic_core-2.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f0a21cbaa69900cbe1a2e7cad2aa74ac3cf21b10c3efb0fa0b80305274c0e8a2"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0680b1f1f11fda801397de52c36ce38ef1c1dc841a0927a94f226dea29c3ae3d"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95b9d5e72481d3780ba3442eac863eae92ae43a5f3adb5b4d0a1de89d42bb250"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fcf5cd9c4b655ad666ca332b9a081112cd7a58a8b5a6ca7a3104bc950f2038"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b5155ff768083cb1d62f3e143b49a8a3432e6789a3abee8acd005c3c7af1c74"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:553ef617b6836fc7e4df130bb851e32fe357ce36336d897fd6646d6058d980af"}, - {file = "pydantic_core-2.18.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b89ed9eb7d616ef5714e5590e6cf7f23b02d0d539767d33561e3675d6f9e3857"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:75f7e9488238e920ab6204399ded280dc4c307d034f3924cd7f90a38b1829563"}, - {file = "pydantic_core-2.18.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ef26c9e94a8c04a1b2924149a9cb081836913818e55681722d7f29af88fe7b38"}, - {file = "pydantic_core-2.18.2-cp310-none-win32.whl", hash = "sha256:182245ff6b0039e82b6bb585ed55a64d7c81c560715d1bad0cbad6dfa07b4027"}, - {file = "pydantic_core-2.18.2-cp310-none-win_amd64.whl", hash = "sha256:e23ec367a948b6d812301afc1b13f8094ab7b2c280af66ef450efc357d2ae543"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:219da3f096d50a157f33645a1cf31c0ad1fe829a92181dd1311022f986e5fbe3"}, - {file = "pydantic_core-2.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc1cfd88a64e012b74e94cd00bbe0f9c6df57049c97f02bb07d39e9c852e19a4"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b7133a6e6aeb8df37d6f413f7705a37ab4031597f64ab56384c94d98fa0e90"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:224c421235f6102e8737032483f43c1a8cfb1d2f45740c44166219599358c2cd"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b14d82cdb934e99dda6d9d60dc84a24379820176cc4a0d123f88df319ae9c150"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2728b01246a3bba6de144f9e3115b532ee44bd6cf39795194fb75491824a1413"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470b94480bb5ee929f5acba6995251ada5e059a5ef3e0dfc63cca287283ebfa6"}, - {file = "pydantic_core-2.18.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:997abc4df705d1295a42f95b4eec4950a37ad8ae46d913caeee117b6b198811c"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75250dbc5290e3f1a0f4618db35e51a165186f9034eff158f3d490b3fed9f8a0"}, - {file = "pydantic_core-2.18.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4456f2dca97c425231d7315737d45239b2b51a50dc2b6f0c2bb181fce6207664"}, - {file = "pydantic_core-2.18.2-cp311-none-win32.whl", hash = "sha256:269322dcc3d8bdb69f054681edff86276b2ff972447863cf34c8b860f5188e2e"}, - {file = "pydantic_core-2.18.2-cp311-none-win_amd64.whl", hash = "sha256:800d60565aec896f25bc3cfa56d2277d52d5182af08162f7954f938c06dc4ee3"}, - {file = "pydantic_core-2.18.2-cp311-none-win_arm64.whl", hash = "sha256:1404c69d6a676245199767ba4f633cce5f4ad4181f9d0ccb0577e1f66cf4c46d"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:fb2bd7be70c0fe4dfd32c951bc813d9fe6ebcbfdd15a07527796c8204bd36242"}, - {file = "pydantic_core-2.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6132dd3bd52838acddca05a72aafb6eab6536aa145e923bb50f45e78b7251043"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d904828195733c183d20a54230c0df0eb46ec746ea1a666730787353e87182"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9bd70772c720142be1020eac55f8143a34ec9f82d75a8e7a07852023e46617f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8ed04b3582771764538f7ee7001b02e1170223cf9b75dff0bc698fadb00cf3"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6dac87ddb34aaec85f873d737e9d06a3555a1cc1a8e0c44b7f8d5daeb89d86f"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca4ae5a27ad7a4ee5170aebce1574b375de390bc01284f87b18d43a3984df72"}, - {file = "pydantic_core-2.18.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:886eec03591b7cf058467a70a87733b35f44707bd86cf64a615584fd72488b7c"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ca7b0c1f1c983e064caa85f3792dd2fe3526b3505378874afa84baf662e12241"}, - {file = "pydantic_core-2.18.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b4356d3538c3649337df4074e81b85f0616b79731fe22dd11b99499b2ebbdf3"}, - {file = "pydantic_core-2.18.2-cp312-none-win32.whl", hash = "sha256:8b172601454f2d7701121bbec3425dd71efcb787a027edf49724c9cefc14c038"}, - {file = "pydantic_core-2.18.2-cp312-none-win_amd64.whl", hash = "sha256:b1bd7e47b1558ea872bd16c8502c414f9e90dcf12f1395129d7bb42a09a95438"}, - {file = "pydantic_core-2.18.2-cp312-none-win_arm64.whl", hash = "sha256:98758d627ff397e752bc339272c14c98199c613f922d4a384ddc07526c86a2ec"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1874c6dd4113308bd0eb568418e6114b252afe44319ead2b4081e9b9521fe75"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:ccdd111c03bfd3666bd2472b674c6899550e09e9f298954cfc896ab92b5b0e6d"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e18609ceaa6eed63753037fc06ebb16041d17d28199ae5aba0052c51449650a9"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e5c584d357c4e2baf0ff7baf44f4994be121e16a2c88918a5817331fc7599d7"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43f0f463cf89ace478de71a318b1b4f05ebc456a9b9300d027b4b57c1a2064fb"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e1b395e58b10b73b07b7cf740d728dd4ff9365ac46c18751bf8b3d8cca8f625a"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0098300eebb1c837271d3d1a2cd2911e7c11b396eac9661655ee524a7f10587b"}, - {file = "pydantic_core-2.18.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:36789b70d613fbac0a25bb07ab3d9dba4d2e38af609c020cf4d888d165ee0bf3"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3f9a801e7c8f1ef8718da265bba008fa121243dfe37c1cea17840b0944dfd72c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a6515ebc6e69d85502b4951d89131ca4e036078ea35533bb76327f8424531ce"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20aca1e2298c56ececfd8ed159ae4dde2df0781988c97ef77d5c16ff4bd5b400"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:223ee893d77a310a0391dca6df00f70bbc2f36a71a895cecd9a0e762dc37b349"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2334ce8c673ee93a1d6a65bd90327588387ba073c17e61bf19b4fd97d688d63c"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cbca948f2d14b09d20268cda7b0367723d79063f26c4ffc523af9042cad95592"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b3ef08e20ec49e02d5c6717a91bb5af9b20f1805583cb0adfe9ba2c6b505b5ae"}, - {file = "pydantic_core-2.18.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6fdc8627910eed0c01aed6a390a252fe3ea6d472ee70fdde56273f198938374"}, - {file = "pydantic_core-2.18.2.tar.gz", hash = "sha256:2e29d20810dfc3043ee13ac7d9e25105799817683348823f305ab3f349b9386e"}, + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, + {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, +] + +[[package]] +name = "pydantic-settings" +version = "2.10.1" +requires_python = ">=3.9" +summary = "Settings management using Pydantic" +groups = ["default"] +dependencies = [ + "pydantic>=2.7.0", + "python-dotenv>=0.21.0", + "typing-inspection>=0.4.0", +] +files = [ + {file = "pydantic_settings-2.10.1-py3-none-any.whl", hash = "sha256:a60952460b99cf661dc25c29c0ef171721f98bfcb52ef8d9ea4c943d7c8cc796"}, + {file = "pydantic_settings-2.10.1.tar.gz", hash = "sha256:06f0062169818d0f5524420a360d632d5857b83cffd4d42fe29597807a1614ee"}, ] [[package]] @@ -2332,6 +2027,17 @@ files = [ {file = "pyproj-3.6.1.tar.gz", hash = "sha256:44aa7c704c2b7d8fb3d483bbf75af6cb2350d30a63b144279a09b75fead501bf"}, ] +[[package]] +name = "pysocks" +version = "1.7.1" +requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +summary = "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information." +groups = ["default"] +files = [ + {file = "PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5"}, + {file = "PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0"}, +] + [[package]] name = "pytelegrambotapi" version = "4.21.0" @@ -2360,6 +2066,17 @@ files = [ {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] +[[package]] +name = "python-dotenv" +version = "1.1.1" +requires_python = ">=3.9" +summary = "Read key-value pairs from a .env file and set them as environment variables" +groups = ["default"] +files = [ + {file = "python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc"}, + {file = "python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab"}, +] + [[package]] name = "pytz" version = "2024.1" @@ -2403,62 +2120,6 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] -[[package]] -name = "regex" -version = "2024.5.15" -requires_python = ">=3.8" -summary = "Alternative regular expression module, to replace re." -groups = ["default"] -files = [ - {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a81e3cfbae20378d75185171587cbf756015ccb14840702944f014e0d93ea09f"}, - {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b59138b219ffa8979013be7bc85bb60c6f7b7575df3d56dc1e403a438c7a3f6"}, - {file = "regex-2024.5.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0bd000c6e266927cb7a1bc39d55be95c4b4f65c5be53e659537537e019232b1"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eaa7ddaf517aa095fa8da0b5015c44d03da83f5bd49c87961e3c997daed0de7"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba68168daedb2c0bab7fd7e00ced5ba90aebf91024dea3c88ad5063c2a562cca"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e8d717bca3a6e2064fc3a08df5cbe366369f4b052dcd21b7416e6d71620dca1"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1337b7dbef9b2f71121cdbf1e97e40de33ff114801263b275aafd75303bd62b5"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9ebd0a36102fcad2f03696e8af4ae682793a5d30b46c647eaf280d6cfb32796"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9efa1a32ad3a3ea112224897cdaeb6aa00381627f567179c0314f7b65d354c62"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1595f2d10dff3d805e054ebdc41c124753631b6a471b976963c7b28543cf13b0"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b802512f3e1f480f41ab5f2cfc0e2f761f08a1f41092d6718868082fc0d27143"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a0981022dccabca811e8171f913de05720590c915b033b7e601f35ce4ea7019f"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:19068a6a79cf99a19ccefa44610491e9ca02c2be3305c7760d3831d38a467a6f"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1b5269484f6126eee5e687785e83c6b60aad7663dafe842b34691157e5083e53"}, - {file = "regex-2024.5.15-cp310-cp310-win32.whl", hash = "sha256:ada150c5adfa8fbcbf321c30c751dc67d2f12f15bd183ffe4ec7cde351d945b3"}, - {file = "regex-2024.5.15-cp310-cp310-win_amd64.whl", hash = "sha256:ac394ff680fc46b97487941f5e6ae49a9f30ea41c6c6804832063f14b2a5a145"}, - {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f5b1dff3ad008dccf18e652283f5e5339d70bf8ba7c98bf848ac33db10f7bc7a"}, - {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c6a2b494a76983df8e3d3feea9b9ffdd558b247e60b92f877f93a1ff43d26656"}, - {file = "regex-2024.5.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a32b96f15c8ab2e7d27655969a23895eb799de3665fa94349f3b2fbfd547236f"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10002e86e6068d9e1c91eae8295ef690f02f913c57db120b58fdd35a6bb1af35"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec54d5afa89c19c6dd8541a133be51ee1017a38b412b1321ccb8d6ddbeb4cf7d"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10e4ce0dca9ae7a66e6089bb29355d4432caed736acae36fef0fdd7879f0b0cb"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e507ff1e74373c4d3038195fdd2af30d297b4f0950eeda6f515ae3d84a1770f"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1f059a4d795e646e1c37665b9d06062c62d0e8cc3c511fe01315973a6542e40"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0721931ad5fe0dda45d07f9820b90b2148ccdd8e45bb9e9b42a146cb4f695649"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:833616ddc75ad595dee848ad984d067f2f31be645d603e4d158bba656bbf516c"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:287eb7f54fc81546346207c533ad3c2c51a8d61075127d7f6d79aaf96cdee890"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:19dfb1c504781a136a80ecd1fff9f16dddf5bb43cec6871778c8a907a085bb3d"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:119af6e56dce35e8dfb5222573b50c89e5508d94d55713c75126b753f834de68"}, - {file = "regex-2024.5.15-cp311-cp311-win32.whl", hash = "sha256:1c1c174d6ec38d6c8a7504087358ce9213d4332f6293a94fbf5249992ba54efa"}, - {file = "regex-2024.5.15-cp311-cp311-win_amd64.whl", hash = "sha256:9e717956dcfd656f5055cc70996ee2cc82ac5149517fc8e1b60261b907740201"}, - {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:632b01153e5248c134007209b5c6348a544ce96c46005d8456de1d552455b014"}, - {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e64198f6b856d48192bf921421fdd8ad8eb35e179086e99e99f711957ffedd6e"}, - {file = "regex-2024.5.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68811ab14087b2f6e0fc0c2bae9ad689ea3584cad6917fc57be6a48bbd012c49"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ec0c2fea1e886a19c3bee0cd19d862b3aa75dcdfb42ebe8ed30708df64687a"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d0c0c0003c10f54a591d220997dd27d953cd9ccc1a7294b40a4be5312be8797b"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2431b9e263af1953c55abbd3e2efca67ca80a3de8a0437cb58e2421f8184717a"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a605586358893b483976cffc1723fb0f83e526e8f14c6e6614e75919d9862cf"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391d7f7f1e409d192dba8bcd42d3e4cf9e598f3979cdaed6ab11288da88cb9f2"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9ff11639a8d98969c863d4617595eb5425fd12f7c5ef6621a4b74b71ed8726d5"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4eee78a04e6c67e8391edd4dad3279828dd66ac4b79570ec998e2155d2e59fd5"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8fe45aa3f4aa57faabbc9cb46a93363edd6197cbc43523daea044e9ff2fea83e"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d0a3d8d6acf0c78a1fff0e210d224b821081330b8524e3e2bc5a68ef6ab5803d"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c486b4106066d502495b3025a0a7251bf37ea9540433940a23419461ab9f2a80"}, - {file = "regex-2024.5.15-cp312-cp312-win32.whl", hash = "sha256:c49e15eac7c149f3670b3e27f1f28a2c1ddeccd3a2812cba953e01be2ab9b5fe"}, - {file = "regex-2024.5.15-cp312-cp312-win_amd64.whl", hash = "sha256:673b5a6da4557b975c6c90198588181029c60793835ce02f497ea817ff647cb2"}, - {file = "regex-2024.5.15.tar.gz", hash = "sha256:d3ee02d9e5f482cc8309134a91eeaacbdd2261ba111b0fef3748eeb4913e6a2c"}, -] - [[package]] name = "requests" version = "2.32.3" @@ -2519,110 +2180,6 @@ files = [ {file = "s3transfer-0.10.2.tar.gz", hash = "sha256:0711534e9356d3cc692fdde846b4a1e4b0cb6519971860796e6bc4c7aea00ef6"}, ] -[[package]] -name = "safetensors" -version = "0.4.3" -requires_python = ">=3.7" -summary = "" -groups = ["default"] -files = [ - {file = "safetensors-0.4.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:dcf5705cab159ce0130cd56057f5f3425023c407e170bca60b4868048bae64fd"}, - {file = "safetensors-0.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bb4f8c5d0358a31e9a08daeebb68f5e161cdd4018855426d3f0c23bb51087055"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70a5319ef409e7f88686a46607cbc3c428271069d8b770076feaf913664a07ac"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fb9c65bd82f9ef3ce4970dc19ee86be5f6f93d032159acf35e663c6bea02b237"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:edb5698a7bc282089f64c96c477846950358a46ede85a1c040e0230344fdde10"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:efcc860be094b8d19ac61b452ec635c7acb9afa77beb218b1d7784c6d41fe8ad"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d88b33980222085dd6001ae2cad87c6068e0991d4f5ccf44975d216db3b57376"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5fc6775529fb9f0ce2266edd3e5d3f10aab068e49f765e11f6f2a63b5367021d"}, - {file = "safetensors-0.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9c6ad011c1b4e3acff058d6b090f1da8e55a332fbf84695cf3100c649cc452d1"}, - {file = "safetensors-0.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8c496c5401c1b9c46d41a7688e8ff5b0310a3b9bae31ce0f0ae870e1ea2b8caf"}, - {file = "safetensors-0.4.3-cp310-none-win32.whl", hash = "sha256:38e2a8666178224a51cca61d3cb4c88704f696eac8f72a49a598a93bbd8a4af9"}, - {file = "safetensors-0.4.3-cp310-none-win_amd64.whl", hash = "sha256:393e6e391467d1b2b829c77e47d726f3b9b93630e6a045b1d1fca67dc78bf632"}, - {file = "safetensors-0.4.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:22f3b5d65e440cec0de8edaa672efa888030802e11c09b3d6203bff60ebff05a"}, - {file = "safetensors-0.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c4fa560ebd4522adddb71dcd25d09bf211b5634003f015a4b815b7647d62ebe"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9afd5358719f1b2cf425fad638fc3c887997d6782da317096877e5b15b2ce93"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d8c5093206ef4b198600ae484230402af6713dab1bd5b8e231905d754022bec7"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0b2104df1579d6ba9052c0ae0e3137c9698b2d85b0645507e6fd1813b70931a"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8cf18888606dad030455d18f6c381720e57fc6a4170ee1966adb7ebc98d4d6a3"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0bf4f9d6323d9f86eef5567eabd88f070691cf031d4c0df27a40d3b4aaee755b"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:585c9ae13a205807b63bef8a37994f30c917ff800ab8a1ca9c9b5d73024f97ee"}, - {file = "safetensors-0.4.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faefeb3b81bdfb4e5a55b9bbdf3d8d8753f65506e1d67d03f5c851a6c87150e9"}, - {file = "safetensors-0.4.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:befdf0167ad626f22f6aac6163477fcefa342224a22f11fdd05abb3995c1783c"}, - {file = "safetensors-0.4.3-cp311-none-win32.whl", hash = "sha256:a7cef55929dcbef24af3eb40bedec35d82c3c2fa46338bb13ecf3c5720af8a61"}, - {file = "safetensors-0.4.3-cp311-none-win_amd64.whl", hash = "sha256:840b7ac0eff5633e1d053cc9db12fdf56b566e9403b4950b2dc85393d9b88d67"}, - {file = "safetensors-0.4.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:22d21760dc6ebae42e9c058d75aa9907d9f35e38f896e3c69ba0e7b213033856"}, - {file = "safetensors-0.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d22c1a10dff3f64d0d68abb8298a3fd88ccff79f408a3e15b3e7f637ef5c980"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1648568667f820b8c48317c7006221dc40aced1869908c187f493838a1362bc"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:446e9fe52c051aeab12aac63d1017e0f68a02a92a027b901c4f8e931b24e5397"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fef5d70683643618244a4f5221053567ca3e77c2531e42ad48ae05fae909f542"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a1f4430cc0c9d6afa01214a4b3919d0a029637df8e09675ceef1ca3f0dfa0df"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d603846a8585b9432a0fd415db1d4c57c0f860eb4aea21f92559ff9902bae4d"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a844cdb5d7cbc22f5f16c7e2a0271170750763c4db08381b7f696dbd2c78a361"}, - {file = "safetensors-0.4.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:88887f69f7a00cf02b954cdc3034ffb383b2303bc0ab481d4716e2da51ddc10e"}, - {file = "safetensors-0.4.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ee463219d9ec6c2be1d331ab13a8e0cd50d2f32240a81d498266d77d07b7e71e"}, - {file = "safetensors-0.4.3-cp312-none-win32.whl", hash = "sha256:d0dd4a1db09db2dba0f94d15addc7e7cd3a7b0d393aa4c7518c39ae7374623c3"}, - {file = "safetensors-0.4.3-cp312-none-win_amd64.whl", hash = "sha256:d14d30c25897b2bf19b6fb5ff7e26cc40006ad53fd4a88244fdf26517d852dd7"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1b89381517891a7bb7d1405d828b2bf5d75528299f8231e9346b8eba092227f9"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:cd6fff9e56df398abc5866b19a32124815b656613c1c5ec0f9350906fd798aac"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:840caf38d86aa7014fe37ade5d0d84e23dcfbc798b8078015831996ecbc206a3"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9650713b2cfa9537a2baf7dd9fee458b24a0aaaa6cafcea8bdd5fb2b8efdc34"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e4119532cd10dba04b423e0f86aecb96cfa5a602238c0aa012f70c3a40c44b50"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e066e8861eef6387b7c772344d1fe1f9a72800e04ee9a54239d460c400c72aab"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:90964917f5b0fa0fa07e9a051fbef100250c04d150b7026ccbf87a34a54012e0"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c41e1893d1206aa7054029681778d9a58b3529d4c807002c156d58426c225173"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae7613a119a71a497d012ccc83775c308b9c1dab454806291427f84397d852fd"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9bac020faba7f5dc481e881b14b6425265feabb5bfc552551d21189c0eddc3"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:420a98f593ff9930f5822560d14c395ccbc57342ddff3b463bc0b3d6b1951550"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f5e6883af9a68c0028f70a4c19d5a6ab6238a379be36ad300a22318316c00cb0"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:cdd0a3b5da66e7f377474599814dbf5cbf135ff059cc73694de129b58a5e8a2c"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9bfb92f82574d9e58401d79c70c716985dc049b635fef6eecbb024c79b2c46ad"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3615a96dd2dcc30eb66d82bc76cda2565f4f7bfa89fcb0e31ba3cea8a1a9ecbb"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:868ad1b6fc41209ab6bd12f63923e8baeb1a086814cb2e81a65ed3d497e0cf8f"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7ffba80aa49bd09195145a7fd233a7781173b422eeb995096f2b30591639517"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0acbe31340ab150423347e5b9cc595867d814244ac14218932a5cf1dd38eb39"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19bbdf95de2cf64f25cd614c5236c8b06eb2cfa47cbf64311f4b5d80224623a3"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b852e47eb08475c2c1bd8131207b405793bfc20d6f45aff893d3baaad449ed14"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5d07cbca5b99babb692d76d8151bec46f461f8ad8daafbfd96b2fca40cadae65"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1ab6527a20586d94291c96e00a668fa03f86189b8a9defa2cdd34a1a01acc7d5"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02318f01e332cc23ffb4f6716e05a492c5f18b1d13e343c49265149396284a44"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec4b52ce9a396260eb9731eb6aea41a7320de22ed73a1042c2230af0212758ce"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:018b691383026a2436a22b648873ed11444a364324e7088b99cd2503dd828400"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:309b10dbcab63269ecbf0e2ca10ce59223bb756ca5d431ce9c9eeabd446569da"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b277482120df46e27a58082df06a15aebda4481e30a1c21eefd0921ae7e03f65"}, - {file = "safetensors-0.4.3.tar.gz", hash = "sha256:2f85fc50c4e07a21e95c24e07460fe6f7e2859d0ce88092838352b798ce711c2"}, -] - -[[package]] -name = "scipy" -version = "1.13.1" -requires_python = ">=3.9" -summary = "Fundamental algorithms for scientific computing in Python" -groups = ["default"] -dependencies = [ - "numpy<2.3,>=1.22.4", -] -files = [ - {file = "scipy-1.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:20335853b85e9a49ff7572ab453794298bcf0354d8068c5f6775a0eabf350aca"}, - {file = "scipy-1.13.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d605e9c23906d1994f55ace80e0125c587f96c020037ea6aa98d01b4bd2e222f"}, - {file = "scipy-1.13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfa31f1def5c819b19ecc3a8b52d28ffdcc7ed52bb20c9a7589669dd3c250989"}, - {file = "scipy-1.13.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26264b282b9da0952a024ae34710c2aff7d27480ee91a2e82b7b7073c24722f"}, - {file = "scipy-1.13.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:eccfa1906eacc02de42d70ef4aecea45415f5be17e72b61bafcfd329bdc52e94"}, - {file = "scipy-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:2831f0dc9c5ea9edd6e51e6e769b655f08ec6db6e2e10f86ef39bd32eb11da54"}, - {file = "scipy-1.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:27e52b09c0d3a1d5b63e1105f24177e544a222b43611aaf5bc44d4a0979e32f9"}, - {file = "scipy-1.13.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:54f430b00f0133e2224c3ba42b805bfd0086fe488835effa33fa291561932326"}, - {file = "scipy-1.13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e89369d27f9e7b0884ae559a3a956e77c02114cc60a6058b4e5011572eea9299"}, - {file = "scipy-1.13.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a78b4b3345f1b6f68a763c6e25c0c9a23a9fd0f39f5f3d200efe8feda560a5fa"}, - {file = "scipy-1.13.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:45484bee6d65633752c490404513b9ef02475b4284c4cfab0ef946def50b3f59"}, - {file = "scipy-1.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:5713f62f781eebd8d597eb3f88b8bf9274e79eeabf63afb4a737abc6c84ad37b"}, - {file = "scipy-1.13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5d72782f39716b2b3509cd7c33cdc08c96f2f4d2b06d51e52fb45a19ca0c86a1"}, - {file = "scipy-1.13.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:017367484ce5498445aade74b1d5ab377acdc65e27095155e448c88497755a5d"}, - {file = "scipy-1.13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:949ae67db5fa78a86e8fa644b9a6b07252f449dcf74247108c50e1d20d2b4627"}, - {file = "scipy-1.13.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de3ade0e53bc1f21358aa74ff4830235d716211d7d077e340c7349bc3542e884"}, - {file = "scipy-1.13.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2ac65fb503dad64218c228e2dc2d0a0193f7904747db43014645ae139c8fad16"}, - {file = "scipy-1.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:cdd7dacfb95fea358916410ec61bbc20440f7860333aee6d882bb8046264e949"}, - {file = "scipy-1.13.1.tar.gz", hash = "sha256:095a87a0312b08dfd6a6155cbbd310a8c51800fc931b8c0b84003014b874ed3c"}, -] - [[package]] name = "shapely" version = "2.0.4" @@ -2712,20 +2269,6 @@ files = [ {file = "svgwrite-1.4.3.zip", hash = "sha256:a8fbdfd4443302a6619a7f76bc937fc683daf2628d9b737c891ec08b8ce524c3"}, ] -[[package]] -name = "sympy" -version = "1.12" -requires_python = ">=3.8" -summary = "Computer algebra system (CAS) in Python" -groups = ["default"] -dependencies = [ - "mpmath>=0.19", -] -files = [ - {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, - {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, -] - [[package]] name = "tabulate" version = "0.9.0" @@ -2737,19 +2280,6 @@ files = [ {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, ] -[[package]] -name = "tbb" -version = "2021.12.0" -summary = "Intelยฎ oneAPI Threading Building Blocks (oneTBB)" -groups = ["default"] -marker = "platform_system == \"Windows\"" -files = [ - {file = "tbb-2021.12.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:f2cc9a7f8ababaa506cbff796ce97c3bf91062ba521e15054394f773375d81d8"}, - {file = "tbb-2021.12.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:a925e9a7c77d3a46ae31c34b0bb7f801c4118e857d137b68f68a8e458fcf2bd7"}, - {file = "tbb-2021.12.0-py3-none-win32.whl", hash = "sha256:b1725b30c174048edc8be70bd43bb95473f396ce895d91151a474d0fa9f450a8"}, - {file = "tbb-2021.12.0-py3-none-win_amd64.whl", hash = "sha256:fc2772d850229f2f3df85f1109c4844c495a2db7433d38200959ee9265b34789"}, -] - [[package]] name = "telegramify-markdown" version = "0.1.9" @@ -2766,6 +2296,21 @@ files = [ {file = "telegramify_markdown-0.1.9.tar.gz", hash = "sha256:27828bcc8a68bcfc4ff67f8450c0b5300831f59b2a4b2d8b81ead6e40f00be94"}, ] +[[package]] +name = "telethon" +version = "1.40.0" +requires_python = ">=3.5" +summary = "Full-featured Telegram client library for Python 3" +groups = ["default"] +dependencies = [ + "pyaes", + "rsa", +] +files = [ + {file = "Telethon-1.40.0-py3-none-any.whl", hash = "sha256:146fd4cb2a7afa66bc67f9c2167756096a37b930f65711a3e7399ec9874dcfa7"}, + {file = "telethon-1.40.0.tar.gz", hash = "sha256:40e83326877a2e68b754d4b6d0d1ca5ac924110045b039e02660f2d67add97db"}, +] + [[package]] name = "time-machine" version = "2.14.1" @@ -2930,71 +2475,6 @@ files = [ {file = "tokenizers-0.19.1.tar.gz", hash = "sha256:ee59e6680ed0fdbe6b724cf38bd70400a0c1dd623b07ac729087270caeac88e3"}, ] -[[package]] -name = "torch" -version = "2.3.0" -requires_python = ">=3.8.0" -summary = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" -groups = ["default"] -dependencies = [ - "filelock", - "fsspec", - "jinja2", - "mkl<=2021.4.0,>=2021.1.1; platform_system == \"Windows\"", - "networkx", - "nvidia-cublas-cu12==12.1.3.1; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "nvidia-cuda-cupti-cu12==12.1.105; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "nvidia-cuda-runtime-cu12==12.1.105; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "nvidia-cudnn-cu12==8.9.2.26; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "nvidia-cufft-cu12==11.0.2.54; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "nvidia-curand-cu12==10.3.2.106; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "nvidia-cusolver-cu12==11.4.5.107; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "nvidia-cusparse-cu12==12.1.0.106; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "nvidia-nccl-cu12==2.20.5; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "nvidia-nvtx-cu12==12.1.105; platform_system == \"Linux\" and platform_machine == \"x86_64\"", - "sympy", - "triton==2.3.0; platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\"", - "typing-extensions>=4.8.0", -] -files = [ - {file = "torch-2.3.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:d8ea5a465dbfd8501f33c937d1f693176c9aef9d1c1b0ca1d44ed7b0a18c52ac"}, - {file = "torch-2.3.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:09c81c5859a5b819956c6925a405ef1cdda393c9d8a01ce3851453f699d3358c"}, - {file = "torch-2.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:1bf023aa20902586f614f7682fedfa463e773e26c58820b74158a72470259459"}, - {file = "torch-2.3.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:758ef938de87a2653bba74b91f703458c15569f1562bf4b6c63c62d9c5a0c1f5"}, - {file = "torch-2.3.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:493d54ee2f9df100b5ce1d18c96dbb8d14908721f76351e908c9d2622773a788"}, - {file = "torch-2.3.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:bce43af735c3da16cc14c7de2be7ad038e2fbf75654c2e274e575c6c05772ace"}, - {file = "torch-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:729804e97b7cf19ae9ab4181f91f5e612af07956f35c8b2c8e9d9f3596a8e877"}, - {file = "torch-2.3.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:d24e328226d8e2af7cf80fcb1d2f1d108e0de32777fab4aaa2b37b9765d8be73"}, - {file = "torch-2.3.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:b0de2bdc0486ea7b14fc47ff805172df44e421a7318b7c4d92ef589a75d27410"}, - {file = "torch-2.3.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:a306c87a3eead1ed47457822c01dfbd459fe2920f2d38cbdf90de18f23f72542"}, - {file = "torch-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:f9b98bf1a3c8af2d4c41f0bf1433920900896c446d1ddc128290ff146d1eb4bd"}, - {file = "torch-2.3.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:dca986214267b34065a79000cee54232e62b41dff1ec2cab9abc3fc8b3dee0ad"}, -] - -[[package]] -name = "torchaudio" -version = "2.3.0" -summary = "An audio package for PyTorch" -groups = ["default"] -dependencies = [ - "torch==2.3.0", -] -files = [ - {file = "torchaudio-2.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:342108da83aa19a457c9a128b1206fadb603753b51cca022b9f585aac2f4754c"}, - {file = "torchaudio-2.3.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:73fedb2c631e01fa10feaac308540b836aefe758e55ca3ee026335e5d01e8e30"}, - {file = "torchaudio-2.3.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:e5bb50b7a4874ed97086c9e516dd90b103d954edcb5ed4b36f4fc22c4000a5a7"}, - {file = "torchaudio-2.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:b4cc9cef5c98ed37e9405c4e0b0e6413bc101f3f49d45dc4f1d4e927757fe41e"}, - {file = "torchaudio-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:341ca3048ce6edcc731519b30187f0b13acb245c4efe16f925f69f9d533546e1"}, - {file = "torchaudio-2.3.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:8f2e0a28740bb0ee66369f92c811f33c0a47e6fcfc2de9cee89746472d713906"}, - {file = "torchaudio-2.3.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:61edb02ae9c0efea4399f9c1f899601136b24f35d430548284ea8eaf6ccbe3be"}, - {file = "torchaudio-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:04bc960cf1aef3b469b095a432a25496bc28197850fc2d90b7b52d6b5255487b"}, - {file = "torchaudio-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:535144a2fbba95fbb3b883224ffcf44788e4cecbabbe49c4a1ae3e7a74f71485"}, - {file = "torchaudio-2.3.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:fb3f52ed1d63b272c240d9bf051705312cb172212051b8a6a2f64d42e3cc1633"}, - {file = "torchaudio-2.3.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:668a8b694e5522cff28cd5e02d01aa1b75ce940aa9fb40480892bdc623b1735d"}, - {file = "torchaudio-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:6c1f538018b85d7766835d042e555de2f096f7a69bba6b16031bf42a914dd9e1"}, -] - [[package]] name = "tqdm" version = "4.66.4" @@ -3009,44 +2489,6 @@ files = [ {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"}, ] -[[package]] -name = "transformers" -version = "4.41.1" -requires_python = ">=3.8.0" -summary = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" -groups = ["default"] -dependencies = [ - "filelock", - "huggingface-hub<1.0,>=0.23.0", - "numpy>=1.17", - "packaging>=20.0", - "pyyaml>=5.1", - "regex!=2019.12.17", - "requests", - "safetensors>=0.4.1", - "tokenizers<0.20,>=0.19", - "tqdm>=4.27", -] -files = [ - {file = "transformers-4.41.1-py3-none-any.whl", hash = "sha256:f0680e0b1a01067eccd11f62f0522409422c7d6f91d532fe0f50b136a406129d"}, - {file = "transformers-4.41.1.tar.gz", hash = "sha256:fa859e4c66f0896633a3bf534e0d9a29a9a88478a49f94c5d8270537dc61cc42"}, -] - -[[package]] -name = "triton" -version = "2.3.0" -summary = "A language and compiler for custom Deep Learning operations" -groups = ["default"] -marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\"" -dependencies = [ - "filelock", -] -files = [ - {file = "triton-2.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ce4b8ff70c48e47274c66f269cce8861cf1dc347ceeb7a67414ca151b1822d8"}, - {file = "triton-2.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c3d9607f85103afdb279938fc1dd2a66e4f5999a58eb48a346bd42738f986dd"}, - {file = "triton-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:218d742e67480d9581bafb73ed598416cc8a56f6316152e5562ee65e33de01c0"}, -] - [[package]] name = "typer" version = "0.12.3" @@ -3080,13 +2522,27 @@ files = [ [[package]] name = "typing-extensions" -version = "4.11.0" -requires_python = ">=3.8" -summary = "Backported and Experimental Type Hints for Python 3.8+" +version = "4.14.1" +requires_python = ">=3.9" +summary = "Backported and Experimental Type Hints for Python 3.9+" groups = ["default"] files = [ - {file = "typing_extensions-4.11.0-py3-none-any.whl", hash = "sha256:c1f94d72897edaf4ce775bb7558d5b79d8126906a14ea5ed1635921406c0387a"}, - {file = "typing_extensions-4.11.0.tar.gz", hash = "sha256:83f085bd5ca59c80295fc2a82ab5dac679cbe02b9f33f7d83af68e241bea51b0"}, + {file = "typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"}, + {file = "typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36"}, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +requires_python = ">=3.9" +summary = "Runtime typing introspection tools" +groups = ["default"] +dependencies = [ + "typing-extensions>=4.12.0", +] +files = [ + {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, + {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, ] [[package]] @@ -3149,42 +2605,6 @@ files = [ {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, ] -[[package]] -name = "vector-quantize-pytorch" -version = "1.14.24" -requires_python = ">=3.9" -summary = "Vector Quantization - Pytorch" -groups = ["default"] -dependencies = [ - "einops>=0.8.0", - "einx>=0.2.2", - "torch>=2.0", -] -files = [ - {file = "vector_quantize_pytorch-1.14.24-py3-none-any.whl", hash = "sha256:5c5c0c02d9e8a1b08952254af5d73ebfbf57e64486d8a2e0f1068e4053b62636"}, - {file = "vector_quantize_pytorch-1.14.24.tar.gz", hash = "sha256:8f42d7825582851c3420f8e0967da1d4c3fd8117a494145dfde78b75bf41af26"}, -] - -[[package]] -name = "vocos" -version = "0.1.0" -summary = "Fourier-based neural vocoder for high-quality audio synthesis" -groups = ["default"] -dependencies = [ - "einops", - "encodec==0.1.1", - "huggingface-hub", - "numpy", - "pyyaml", - "scipy", - "torch", - "torchaudio", -] -files = [ - {file = "vocos-0.1.0-py3-none-any.whl", hash = "sha256:0ac13eaef68596074301e912d781399b3defa4b4ca60b6bc52c8a4b9209ca235"}, - {file = "vocos-0.1.0.tar.gz", hash = "sha256:b488224dbe398ff7d4790a027ad659478b4bc02e465db992c62c12b32ca043d8"}, -] - [[package]] name = "webencodings" version = "0.5.1" diff --git a/pyproject.toml b/pyproject.toml index 7bd152c..ad669c7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,6 @@ [project] +name = "tg_bot_collections" # PEP 621 project metadata # See https://www.python.org/dev/peps/pep-0621/ dependencies = [ @@ -17,11 +18,20 @@ dependencies = [ "groq", "together>=1.1.5", "dify-client>=0.1.10", - "chattts-fork>=0.0.1", "expiringdict>=1.2.2", "beautifulsoup4>=4.12.3", "Markdown>=3.6", "cohere>=5.5.8", "kling-creator>=0.0.3", + "pydantic-settings>=2.10.1", + "pydantic>=2.11.7", + "telethon>=1.40.0", + "pysocks>=1.7.1", ] requires-python = ">=3.10" + +[tool.pdm] +distribution = false + +[tool.pdm.scripts] +dev = "python tg.py --debug" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 4da479c..f4018b2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,6 @@ aiohttp==3.9.5 aiosignal==1.3.1 annotated-types==0.6.0 anthropic==0.32.0 -antlr4-python3-runtime==4.9.3 anyio==4.3.0 async-timeout==4.0.3; python_version < "3.11" attrs==23.2.0 @@ -18,7 +17,6 @@ cairosvg==2.7.1 certifi==2024.2.2 cffi==1.16.0 charset-normalizer==3.3.2 -chattts-fork==0.0.8 click==8.1.7 click-plugins==1.1.1 cligj==0.7.2 @@ -31,10 +29,7 @@ cycler==0.12.1 defusedxml==0.7.1 dify-client==0.1.10 distro==1.9.0 -einops==0.8.0 -einx==0.2.2 emoji==2.11.1 -encodec==0.1.1 eval-type-backport==0.2.0 exceptiongroup==1.2.1; python_version < "3.11" expiringdict==1.2.2 @@ -43,13 +38,12 @@ fastavro==1.9.4 filelock==3.14.0 fiona==1.9.6 fonttools==4.51.0 -frozendict==2.4.4 frozenlist==1.4.1 fsspec==2024.3.1 geopandas==0.14.4 github-poster==2.7.4 google-ai-generativelanguage==0.6.6 -google-api-core==2.19.0 +google-api-core[grpc]==2.19.0 google-api-python-client==2.128.0 google-auth==2.29.0 google-auth-httplib2==0.2.0 @@ -65,36 +59,18 @@ httpx==0.27.0 httpx-sse==0.4.0 huggingface-hub==0.23.0 idna==3.7 -intel-openmp==2021.4.0; platform_system == "Windows" -jinja2==3.1.4 jiter==0.5.0 jmespath==1.0.1 kiwisolver==1.4.5 kling-creator==0.3.0 markdown==3.6 markdown-it-py==3.0.0 -markupsafe==2.1.5 matplotlib==3.8.4 mdurl==0.1.2 mistletoe==1.4.0 -mkl==2021.4.0; platform_system == "Windows" -mpmath==1.3.0 multidict==6.0.5 networkx==3.3 numpy==1.26.4 -nvidia-cublas-cu12==12.1.3.1; platform_system == "Linux" and platform_machine == "x86_64" -nvidia-cuda-cupti-cu12==12.1.105; platform_system == "Linux" and platform_machine == "x86_64" -nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == "Linux" and platform_machine == "x86_64" -nvidia-cuda-runtime-cu12==12.1.105; platform_system == "Linux" and platform_machine == "x86_64" -nvidia-cudnn-cu12==8.9.2.26; platform_system == "Linux" and platform_machine == "x86_64" -nvidia-cufft-cu12==11.0.2.54; platform_system == "Linux" and platform_machine == "x86_64" -nvidia-curand-cu12==10.3.2.106; platform_system == "Linux" and platform_machine == "x86_64" -nvidia-cusolver-cu12==11.4.5.107; platform_system == "Linux" and platform_machine == "x86_64" -nvidia-cusparse-cu12==12.1.0.106; platform_system == "Linux" and platform_machine == "x86_64" -nvidia-nccl-cu12==2.20.5; platform_system == "Linux" and platform_machine == "x86_64" -nvidia-nvjitlink-cu12==12.5.40; platform_system == "Linux" and platform_machine == "x86_64" -nvidia-nvtx-cu12==12.1.105; platform_system == "Linux" and platform_machine == "x86_64" -omegaconf==2.3.0 openai==1.37.2 osmnx==1.9.2 packaging==24.0 @@ -106,55 +82,50 @@ platformdirs==4.2.1 prettymapp==0.3.0 proto-plus==1.23.0 protobuf==4.25.3 +pyaes==1.6.1 pyarrow==16.0.0 pyasn1==0.6.0 pyasn1-modules==0.4.0 pycparser==2.22 -pydantic==2.7.1 -pydantic-core==2.18.2 +pydantic==2.11.7 +pydantic-core==2.33.2 +pydantic-settings==2.10.1 pygments==2.18.0 pyogrio==0.7.2 pyparsing==3.1.2 pyproj==3.6.1 +pysocks==1.7.1 pytelegrambotapi==4.21.0 python-dateutil==2.9.0.post0 +python-dotenv==1.1.1 pytz==2024.1 pyyaml==6.0.1 -regex==2024.5.15 requests==2.32.3 rich==13.7.1 rsa==4.9 s3transfer==0.10.2 -safetensors==0.4.3 -scipy==1.13.1 shapely==2.0.4 shellingham==1.5.4 six==1.16.0 sniffio==1.3.1 soupsieve==2.5 svgwrite==1.4.3 -sympy==1.12 tabulate==0.9.0 -tbb==2021.12.0; platform_system == "Windows" telegramify-markdown==0.1.9 +telethon==1.40.0 time-machine==2.14.1; implementation_name != "pypy" tinycss2==1.3.0 together==1.2.5 tokenizers==0.19.1 -torch==2.3.0 -torchaudio==2.3.0 tqdm==4.66.4 -transformers==4.41.1 -triton==2.3.0; platform_system == "Linux" and platform_machine == "x86_64" and python_version < "3.12" typer==0.12.3 types-requests==2.32.0.20240622 -typing-extensions==4.11.0 +typing-extensions==4.14.1 +typing-inspection==0.4.1 tzdata==2024.1 uritemplate==4.1.1 uritools==4.0.2 urlextract==1.9.0 urllib3==2.2.1 -vector-quantize-pytorch==1.14.24 -vocos==0.1.0 webencodings==0.5.1 yarl==1.9.4 diff --git a/setup.sh b/setup.sh index 87fb4df..09bdc60 100644 --- a/setup.sh +++ b/setup.sh @@ -7,20 +7,20 @@ service_name="tgbotyh" source .env -google_gemini_api_key="${Google_Gemini_API_Key}" -telegram_bot_token="${Telegram_Bot_Token}" -anthropic_api_key="${Anthropic_API_Key}" -openai_api_key="${Openai_API_Key}" -yi_api_key="${Yi_API_Key}" -yi_base_url="${Yi_Base_Url}" +google_gemini_api_key="${GOOGLE_GEMINI_API_KEY}" +telegram_bot_token="${TELEGRAM_BOT_TOKEN}" +anthropic_api_key="${ANTHROPIC_API_KEY}" +openai_api_key="${OPENAI_API_KEY}" +yi_api_key="${YI_API_KEY}" +yi_base_url="${YI_BASE_URL}" -if [ -n "$Python_Bin_Path" ]; then - python_bin_path="$Python_Bin_Path" +if [ -n "$PYTHON_BIN_PATH" ]; then + python_bin_path="$PYTHON_BIN_PATH" fi -if [ -n "$Python_Venv_Path" ]; then - venv_dir="${Python_Venv_Path}" +if [ -n "$PYTHON_VENV_PATH" ]; then + venv_dir="${PYTHON_VENV_PATH}" fi sudoCmd="" diff --git a/tg.py b/tg.py index 528ba88..a6b29e0 100644 --- a/tg.py +++ b/tg.py @@ -1,13 +1,34 @@ import argparse +import logging + from telebot import TeleBot +from config import settings from handlers import list_available_commands, load_handlers +logger = logging.getLogger("bot") + + +def setup_logging(debug: bool): + logger.setLevel(logging.DEBUG if debug else logging.INFO) + handler = logging.StreamHandler() + handler.setFormatter( + logging.Formatter( + "%(asctime)s - [%(levelname)s] - %(filename)s:%(lineno)d - %(message)s" + ) + ) + logger.addHandler(handler) + def main(): # Init args parser = argparse.ArgumentParser() - parser.add_argument("tg_token", help="tg token") + parser.add_argument( + "tg_token", help="tg token", default=settings.telegram_bot_token, nargs="?" + ) + parser.add_argument( + "--debug", "--verbose", "-v", action="store_true", help="Enable debug mode" + ) # 'disable-command' option # The action 'append' will allow multiple entries to be saved into a list @@ -22,15 +43,15 @@ def main(): ) options = parser.parse_args() - print("Arg parse done.") + setup_logging(options.debug) # Init bot bot = TeleBot(options.tg_token) load_handlers(bot, options.disable_commands) - print("Bot init done.") + logger.info("Bot init done.") # Start bot - print("Starting tg collections bot.") + logger.info("Starting tg collections bot.") bot.infinity_polling(timeout=10, long_polling_timeout=5)