fix: gpt token

This commit is contained in:
yihong0618
2024-05-18 10:05:36 +08:00
parent 323adb9953
commit 870a1db1fc
5 changed files with 26 additions and 35 deletions

View File

@ -117,14 +117,15 @@ def chatgpt_pro_handler(message: Message, bot: TeleBot) -> None:
reply_id = bot_reply_first(message, who, bot)
player_message.append({"role": "user", "content": m})
# keep the last 5, every has two ask and answer.
if len(player_message) > 10:
# keep the last 3, every has two ask and answer.
# save me some money
if len(player_message) > 6:
player_message = player_message[2:]
try:
r = client.chat.completions.create(
messages=player_message,
max_tokens=2048,
max_tokens=8192,
model=CHATGPT_PRO_MODEL,
stream=True,
)
@ -134,7 +135,7 @@ def chatgpt_pro_handler(message: Message, bot: TeleBot) -> None:
if chunk.choices[0].delta.content is None:
break
s += chunk.choices[0].delta.content
if time.time() - start > 2.0:
if time.time() - start > 1.7:
start = time.time()
bot_reply_markdown(reply_id, who, s, bot, split_text=False)

View File

@ -40,7 +40,7 @@ gemini_file_player_dict = {}
def make_new_gemini_convo(is_pro=False) -> ChatSession:
model_name = "models/gemini-1.0-pro-latest"
model_name = "gemini-1.5-flash-latest"
if is_pro:
model_name = "models/gemini-1.5-pro-latest"