|
@@ -27,7 +27,7 @@ TEMPERATURE = float(os.getenv('TEMPERATURE', "1.1"))
|
|
|
# Initialiser le client OpenAI asynchrone ici
|
|
# Initialiser le client OpenAI asynchrone ici
|
|
|
openai_client = AsyncOpenAI(api_key=OPENAI_API_KEY, base_url=URL_OPENAI_API)
|
|
openai_client = AsyncOpenAI(api_key=OPENAI_API_KEY, base_url=URL_OPENAI_API)
|
|
|
|
|
|
|
|
-BOT_VERSION = "2.4.3-penta"
|
|
|
|
|
|
|
+BOT_VERSION = "2.5.0-penta"
|
|
|
|
|
|
|
|
# Vérifier que les tokens et le prompt de personnalité sont récupérés
|
|
# Vérifier que les tokens et le prompt de personnalité sont récupérés
|
|
|
if DISCORD_TOKEN is None or OPENAI_API_KEY is None or DISCORD_CHANNEL_ID is None:
|
|
if DISCORD_TOKEN is None or OPENAI_API_KEY is None or DISCORD_CHANNEL_ID is None:
|
|
@@ -203,7 +203,6 @@ def calculate_cost(usage, model=MODEL):
|
|
|
|
|
|
|
|
# Obtenir les tarifs du modèle spécifié
|
|
# Obtenir les tarifs du modèle spécifié
|
|
|
if model not in model_costs:
|
|
if model not in model_costs:
|
|
|
- logger.warning(f"Modèle inconnu '{model}'. Utilisation des tarifs par défaut pour 'gpt-4o'.")
|
|
|
|
|
model = 'gpt-4o'
|
|
model = 'gpt-4o'
|
|
|
|
|
|
|
|
input_rate = model_costs[model]['input_rate']
|
|
input_rate = model_costs[model]['input_rate']
|
|
@@ -434,7 +433,8 @@ async def call_openai_api(user_text, user_name, image_data=None, detail='high'):
|
|
|
})
|
|
})
|
|
|
|
|
|
|
|
# Afficher dans la console
|
|
# Afficher dans la console
|
|
|
- logging.info(f"Coût de la réponse : ${total_cost:.4f} / Input: {input_tokens} / Output: {output_tokens} / Total: {input_tokens + output_tokens}")
|
|
|
|
|
|
|
+ logging.info("Réponse envoyée.")
|
|
|
|
|
+ #logging.info(f"Coût de la réponse : ${total_cost:.4f} / Input: {input_tokens} / Output: {output_tokens} / Total: {input_tokens + output_tokens}")
|
|
|
|
|
|
|
|
return response
|
|
return response
|
|
|
except OpenAIError as e:
|
|
except OpenAIError as e:
|
|
@@ -609,7 +609,14 @@ async def on_message(message):
|
|
|
result = await call_openai_api(user_text, message.author.name, image_data)
|
|
result = await call_openai_api(user_text, message.author.name, image_data)
|
|
|
if result:
|
|
if result:
|
|
|
reply = result.choices[0].message.content
|
|
reply = result.choices[0].message.content
|
|
|
- await message.channel.send(reply.rstrip("</s>"))
|
|
|
|
|
|
|
+ reply = reply.rstrip("</s>")
|
|
|
|
|
+
|
|
|
|
|
+ if len(reply.split('</think>')) > 1:
|
|
|
|
|
+ reply = reply.split('</think>')[1]
|
|
|
|
|
+ elif len(reply.split('</response>')) > 1:
|
|
|
|
|
+ reply = reply.split('</response>')[1]
|
|
|
|
|
+
|
|
|
|
|
+ await message.channel.send(reply)
|
|
|
|
|
|
|
|
async def add_to_conversation_history(new_message):
|
|
async def add_to_conversation_history(new_message):
|
|
|
global conversation_history, last_analysis_index, messages_since_last_analysis
|
|
global conversation_history, last_analysis_index, messages_since_last_analysis
|