|
@@ -1,10 +1,16 @@
|
|
|
import os
|
|
import os
|
|
|
import logging
|
|
import logging
|
|
|
import discord
|
|
import discord
|
|
|
-from dotenv import load_dotenv
|
|
|
|
|
-from openai import AsyncOpenAI, OpenAIError
|
|
|
|
|
import json
|
|
import json
|
|
|
import urllib3
|
|
import urllib3
|
|
|
|
|
+import base64
|
|
|
|
|
+import pytz
|
|
|
|
|
+
|
|
|
|
|
+from dotenv import load_dotenv
|
|
|
|
|
+from openai import AsyncOpenAI, OpenAIError
|
|
|
|
|
+from PIL import Image
|
|
|
|
|
+from io import BytesIO
|
|
|
|
|
+from datetime import datetime, timezone, timedelta
|
|
|
|
|
|
|
|
# Charger les variables d'environnement depuis le fichier .env
|
|
# Charger les variables d'environnement depuis le fichier .env
|
|
|
load_dotenv()
|
|
load_dotenv()
|
|
@@ -13,16 +19,16 @@ OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
|
|
DISCORD_CHANNEL_ID = os.getenv('DISCORD_CHANNEL_ID')
|
|
DISCORD_CHANNEL_ID = os.getenv('DISCORD_CHANNEL_ID')
|
|
|
PERSONALITY_PROMPT_FILE = os.getenv('PERSONALITY_PROMPT_FILE', 'personality_prompt.txt')
|
|
PERSONALITY_PROMPT_FILE = os.getenv('PERSONALITY_PROMPT_FILE', 'personality_prompt.txt')
|
|
|
CONVERSATION_HISTORY_FILE = os.getenv('CONVERSATION_HISTORY_FILE', 'conversation_history.json')
|
|
CONVERSATION_HISTORY_FILE = os.getenv('CONVERSATION_HISTORY_FILE', 'conversation_history.json')
|
|
|
-CONVERSATION_HISTORY_SIZE = int(os.getenv('CONVERSATION_HISTORY_SIZE', '50'))
|
|
|
|
|
|
|
+CONVERSATION_HISTORY_SIZE = int(os.getenv('CONVERSATION_HISTORY_SIZE', '100'))
|
|
|
BOT_NAME = os.getenv('BOT_NAME', 'ChatBot')
|
|
BOT_NAME = os.getenv('BOT_NAME', 'ChatBot')
|
|
|
-MODEL = os.getenv('MODEL', 'gpt-4')
|
|
|
|
|
|
|
+MODEL = os.getenv('MODEL', 'llama:3.2')
|
|
|
URL_OPENAI_API = os.getenv('URL_OPENAI_API', 'http://localai.localai.svc.cluster.local:8080/v1')
|
|
URL_OPENAI_API = os.getenv('URL_OPENAI_API', 'http://localai.localai.svc.cluster.local:8080/v1')
|
|
|
TEMPERATURE = float(os.getenv('TEMPERATURE', "1.0"))
|
|
TEMPERATURE = float(os.getenv('TEMPERATURE', "1.0"))
|
|
|
|
|
|
|
|
# Initialiser le client OpenAI asynchrone ici
|
|
# Initialiser le client OpenAI asynchrone ici
|
|
|
openai_client = AsyncOpenAI(api_key=OPENAI_API_KEY, base_url=URL_OPENAI_API)
|
|
openai_client = AsyncOpenAI(api_key=OPENAI_API_KEY, base_url=URL_OPENAI_API)
|
|
|
|
|
|
|
|
-BOT_VERSION = "2.7.0-penta"
|
|
|
|
|
|
|
+BOT_VERSION = "2.8.0-beta"
|
|
|
|
|
|
|
|
# Vérifier que les tokens et le prompt de personnalité sont récupérés
|
|
# Vérifier que les tokens et le prompt de personnalité sont récupérés
|
|
|
if DISCORD_TOKEN is None or OPENAI_API_KEY is None or DISCORD_CHANNEL_ID is None:
|
|
if DISCORD_TOKEN is None or OPENAI_API_KEY is None or DISCORD_CHANNEL_ID is None:
|
|
@@ -40,16 +46,16 @@ log_format = '%(asctime)-13s : %(name)-15s : %(levelname)-8s : %(message)s'
|
|
|
logging.basicConfig(handlers=[logging.FileHandler("./chatbot.log", 'a', 'utf-8')], format=log_format, level="INFO")
|
|
logging.basicConfig(handlers=[logging.FileHandler("./chatbot.log", 'a', 'utf-8')], format=log_format, level="INFO")
|
|
|
|
|
|
|
|
console = logging.StreamHandler()
|
|
console = logging.StreamHandler()
|
|
|
-console.setLevel(logging.INFO)
|
|
|
|
|
|
|
+console.setLevel(logging.DEBUG)
|
|
|
console.setFormatter(logging.Formatter(log_format))
|
|
console.setFormatter(logging.Formatter(log_format))
|
|
|
|
|
|
|
|
logger = logging.getLogger(BOT_NAME)
|
|
logger = logging.getLogger(BOT_NAME)
|
|
|
-logger.setLevel("INFO")
|
|
|
|
|
|
|
+logger.setLevel("DEBUG")
|
|
|
|
|
|
|
|
logging.getLogger('').addHandler(console)
|
|
logging.getLogger('').addHandler(console)
|
|
|
|
|
|
|
|
httpx_logger = logging.getLogger('httpx')
|
|
httpx_logger = logging.getLogger('httpx')
|
|
|
-httpx_logger.setLevel(logging.WARNING)
|
|
|
|
|
|
|
+httpx_logger.setLevel(logging.DEBUG)
|
|
|
|
|
|
|
|
urllib3.disable_warnings()
|
|
urllib3.disable_warnings()
|
|
|
|
|
|
|
@@ -146,6 +152,52 @@ def has_text(text):
|
|
|
"""
|
|
"""
|
|
|
return bool(text.strip())
|
|
return bool(text.strip())
|
|
|
|
|
|
|
|
|
|
+def resize_image(image_bytes, mode='high', attachment_filename=None):
|
|
|
|
|
+ """Redimensionne l'image selon le mode spécifié."""
|
|
|
|
|
+ try:
|
|
|
|
|
+ with Image.open(BytesIO(image_bytes)) as img:
|
|
|
|
|
+ original_format = img.format # Stocker le format original
|
|
|
|
|
+
|
|
|
|
|
+ if mode == 'high':
|
|
|
|
|
+ img.thumbnail((2000, 2000))
|
|
|
|
|
+ if min(img.size) < 768:
|
|
|
|
|
+ scale = 768 / min(img.size)
|
|
|
|
|
+ new_size = tuple(int(x * scale) for x in img.size)
|
|
|
|
|
+ img = img.resize(new_size, Image.Resampling.LANCZOS)
|
|
|
|
|
+ elif mode == 'low':
|
|
|
|
|
+ img = img.resize((512, 512))
|
|
|
|
|
+
|
|
|
|
|
+ buffer = BytesIO()
|
|
|
|
|
+ img_format = img.format or _infer_image_format(attachment_filename)
|
|
|
|
|
+ img.save(buffer, format=img_format)
|
|
|
|
|
+ return buffer.getvalue()
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ logger.error(f"Erreur lors du redimensionnement de l'image : {e}")
|
|
|
|
|
+ raise
|
|
|
|
|
+
|
|
|
|
|
+async def encode_image_from_attachment(attachment, mode='high'):
|
|
|
|
|
+ """Encode une image depuis une pièce jointe en base64 après redimensionnement."""
|
|
|
|
|
+ image_data = await attachment.read()
|
|
|
|
|
+ resized_image = resize_image(image_data, mode=mode, attachment_filename=attachment.filename)
|
|
|
|
|
+ return base64.b64encode(resized_image).decode('utf-8')
|
|
|
|
|
+
|
|
|
|
|
+def _infer_image_format(filename):
|
|
|
|
|
+ """Déduit le format de l'image basé sur l'extension du fichier."""
|
|
|
|
|
+ if filename:
|
|
|
|
|
+ _, ext = os.path.splitext(filename)
|
|
|
|
|
+ ext = ext.lower()
|
|
|
|
|
+ format_mapping = {
|
|
|
|
|
+ '.jpg': 'JPEG',
|
|
|
|
|
+ '.jpeg': 'JPEG',
|
|
|
|
|
+ '.png': 'PNG',
|
|
|
|
|
+ '.gif': 'GIF',
|
|
|
|
|
+ '.bmp': 'BMP',
|
|
|
|
|
+ '.tiff': 'TIFF'
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return format_mapping.get(ext, 'PNG')
|
|
|
|
|
+ return 'PNG'
|
|
|
|
|
+
|
|
|
# Fonction de sauvegarde de l'historique
|
|
# Fonction de sauvegarde de l'historique
|
|
|
def save_conversation_history():
|
|
def save_conversation_history():
|
|
|
try:
|
|
try:
|
|
@@ -156,7 +208,7 @@ def save_conversation_history():
|
|
|
|
|
|
|
|
# Convertir l'ID du channel en entier
|
|
# Convertir l'ID du channel en entier
|
|
|
try:
|
|
try:
|
|
|
- chatgpt_channel_id = int(DISCORD_CHANNEL_ID)
|
|
|
|
|
|
|
+ discord_channel_id = int(DISCORD_CHANNEL_ID)
|
|
|
except ValueError:
|
|
except ValueError:
|
|
|
raise ValueError("L'ID du channel Discord est invalide. Assurez-vous qu'il s'agit d'un entier.")
|
|
raise ValueError("L'ID du channel Discord est invalide. Assurez-vous qu'il s'agit d'un entier.")
|
|
|
|
|
|
|
@@ -174,28 +226,45 @@ client_discord = MyDiscordClient(intents=intents)
|
|
|
# Appeler la fonction pour charger l'historique au démarrage
|
|
# Appeler la fonction pour charger l'historique au démarrage
|
|
|
load_conversation_history()
|
|
load_conversation_history()
|
|
|
|
|
|
|
|
-def extract_text_from_message(message):
|
|
|
|
|
- content = message.get("content", "")
|
|
|
|
|
- if isinstance(content, list):
|
|
|
|
|
- # Extraire le texte de chaque élément de la liste
|
|
|
|
|
- texts = []
|
|
|
|
|
- for part in content:
|
|
|
|
|
- if isinstance(part, dict):
|
|
|
|
|
- text = part.get("text", "")
|
|
|
|
|
- if text:
|
|
|
|
|
- texts.append(text)
|
|
|
|
|
- return ' '.join(texts)
|
|
|
|
|
- elif isinstance(content, str):
|
|
|
|
|
- return content
|
|
|
|
|
|
|
+async def call_for_image_analysis(image_data, user_name, user_text=None, detail='high'):
|
|
|
|
|
+ """Appelle l'API pour analyser une image."""
|
|
|
|
|
+ prompt = PERSONALITY_PROMPT
|
|
|
|
|
+ if user_text:
|
|
|
|
|
+ user_content = f"{user_name} a envoyé une image avec le messsage : \"{user_text}\"."
|
|
|
else:
|
|
else:
|
|
|
- return ""
|
|
|
|
|
|
|
+ user_content = f"{user_name} a envoyé une image:"
|
|
|
|
|
+
|
|
|
|
|
+ prompt += user_content
|
|
|
|
|
+
|
|
|
|
|
+ message_to_send = {
|
|
|
|
|
+ "role": "user",
|
|
|
|
|
+ "content": [
|
|
|
|
|
+ {"type": "text", "text": prompt},
|
|
|
|
|
+ {
|
|
|
|
|
+ "type": "image_url",
|
|
|
|
|
+ "image_url": {
|
|
|
|
|
+ "url": f"data:image/jpeg;base64,{image_data}",
|
|
|
|
|
+ "detail": detail
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ ]
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
-async def read_text_file(attachment):
|
|
|
|
|
- file_bytes = await attachment.read()
|
|
|
|
|
- return file_bytes.decode('utf-8')
|
|
|
|
|
|
|
+ user_message = {"role": "user", "content": user_content}
|
|
|
|
|
+ messages = [user_message] + [message_to_send] + conversation_history
|
|
|
|
|
|
|
|
-async def call_openai_api(user_text, user_name, detail='high'):
|
|
|
|
|
|
|
+ analysis = await openai_client.chat.completions.create(
|
|
|
|
|
+ model=MODEL,
|
|
|
|
|
+ messages=messages,
|
|
|
|
|
+ temperature=TEMPERATURE
|
|
|
|
|
+ )
|
|
|
|
|
|
|
|
|
|
+ if analysis:
|
|
|
|
|
+ logger.info(f"Analyse de l'image par l'API : {analysis.choices[0].message.content}")
|
|
|
|
|
+ return analysis.choices[0].message.content
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+async def call_openai_api(user_text, user_name, detail='high'):
|
|
|
# Préparer le contenu pour l'appel API
|
|
# Préparer le contenu pour l'appel API
|
|
|
message_to_send = {
|
|
message_to_send = {
|
|
|
"role": "user",
|
|
"role": "user",
|
|
@@ -242,7 +311,7 @@ async def on_ready():
|
|
|
logger.info("Aucun historique trouvé. L'historique commence vide.")
|
|
logger.info("Aucun historique trouvé. L'historique commence vide.")
|
|
|
|
|
|
|
|
# Envoyer un message de version dans le canal Discord
|
|
# Envoyer un message de version dans le canal Discord
|
|
|
- channel = client_discord.get_channel(chatgpt_channel_id)
|
|
|
|
|
|
|
+ channel = client_discord.get_channel(discord_channel_id)
|
|
|
if channel:
|
|
if channel:
|
|
|
try:
|
|
try:
|
|
|
embed = discord.Embed(
|
|
embed = discord.Embed(
|
|
@@ -251,20 +320,20 @@ async def on_ready():
|
|
|
color=0x2222aa # Bleu
|
|
color=0x2222aa # Bleu
|
|
|
)
|
|
)
|
|
|
await channel.send(embed=embed)
|
|
await channel.send(embed=embed)
|
|
|
- logger.info(f"Message de connexion envoyé dans le canal ID {chatgpt_channel_id}")
|
|
|
|
|
|
|
+ logger.info(f"Message de connexion envoyé dans le canal ID {discord_channel_id}")
|
|
|
except discord.Forbidden:
|
|
except discord.Forbidden:
|
|
|
- logger.error(f"Permissions insuffisantes pour envoyer des messages dans le canal ID {chatgpt_channel_id}.")
|
|
|
|
|
|
|
+ logger.error(f"Permissions insuffisantes pour envoyer des messages dans le canal ID {discord_channel_id}.")
|
|
|
except discord.HTTPException as e:
|
|
except discord.HTTPException as e:
|
|
|
logger.error(f"Erreur lors de l'envoi du message de connexion : {e}")
|
|
logger.error(f"Erreur lors de l'envoi du message de connexion : {e}")
|
|
|
else:
|
|
else:
|
|
|
- logger.error(f"Canal avec ID {chatgpt_channel_id} non trouvé.")
|
|
|
|
|
|
|
+ logger.error(f"Canal avec ID {discord_channel_id} non trouvé.")
|
|
|
|
|
|
|
|
@client_discord.event
|
|
@client_discord.event
|
|
|
async def on_message(message):
|
|
async def on_message(message):
|
|
|
global conversation_history
|
|
global conversation_history
|
|
|
|
|
|
|
|
# Vérifier si le message provient du canal autorisé
|
|
# Vérifier si le message provient du canal autorisé
|
|
|
- if message.channel.id != chatgpt_channel_id:
|
|
|
|
|
|
|
+ if message.channel.id != discord_channel_id:
|
|
|
return
|
|
return
|
|
|
|
|
|
|
|
# Ignorer les messages du bot lui-même
|
|
# Ignorer les messages du bot lui-même
|
|
@@ -293,6 +362,7 @@ async def on_message(message):
|
|
|
|
|
|
|
|
# Variable pour stocker si le message contient un fichier
|
|
# Variable pour stocker si le message contient un fichier
|
|
|
has_file = False
|
|
has_file = False
|
|
|
|
|
+ image_data = None
|
|
|
|
|
|
|
|
# Vérifier s'il y a une pièce jointe
|
|
# Vérifier s'il y a une pièce jointe
|
|
|
if message.attachments:
|
|
if message.attachments:
|
|
@@ -302,6 +372,54 @@ async def on_message(message):
|
|
|
file_content = await read_text_file(attachment)
|
|
file_content = await read_text_file(attachment)
|
|
|
attachment_filename = attachment.filename
|
|
attachment_filename = attachment.filename
|
|
|
break
|
|
break
|
|
|
|
|
+ elif attachment.content_type and attachment.content_type.startswith('image/'):
|
|
|
|
|
+ image_data = await encode_image_from_attachment(attachment, mode='high')
|
|
|
|
|
+ break
|
|
|
|
|
+
|
|
|
|
|
+ # Traitement des images
|
|
|
|
|
+ if image_data:
|
|
|
|
|
+ logger.debug(image_data)
|
|
|
|
|
+ has_user_text = has_text(user_text)
|
|
|
|
|
+ user_text_to_use = user_text if has_user_text else None
|
|
|
|
|
+
|
|
|
|
|
+ temp_msg = await message.channel.send(f"*{BOT_NAME} observe l'image...*")
|
|
|
|
|
+
|
|
|
|
|
+ try:
|
|
|
|
|
+ # Analyser l'image avec l'API
|
|
|
|
|
+ analysis = await call_for_image_analysis(image_data, message.author.name, user_text=user_text_to_use)
|
|
|
|
|
+
|
|
|
|
|
+ if analysis:
|
|
|
|
|
+ await temp_msg.delete()
|
|
|
|
|
+ await message.channel.send(analysis)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+ if has_user_text:
|
|
|
|
|
+ user_message_text = f"{message.author.name} a envoyé une image avec le messsage : \"{user_text}\"."
|
|
|
|
|
+ else:
|
|
|
|
|
+ user_message_text = f"{message.author.name} a envoyé une image."
|
|
|
|
|
+
|
|
|
|
|
+ user_message = {
|
|
|
|
|
+ "role": "user",
|
|
|
|
|
+ "content": f"{user_message_text}"
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ assistant_message = {
|
|
|
|
|
+ "role": "assistant",
|
|
|
|
|
+ "content": analysis
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ await add_to_conversation_history(user_message)
|
|
|
|
|
+ await add_to_conversation_history(assistant_message)
|
|
|
|
|
+ else:
|
|
|
|
|
+ await temp_msg.delete()
|
|
|
|
|
+ await message.channel.send("Désolé, je n'ai pas pu analyser l'image.")
|
|
|
|
|
+
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ await temp_msg.delete()
|
|
|
|
|
+ await message.channel.send("Une erreur est survenue lors du traitement de l'image.")
|
|
|
|
|
+ logger.error(f"Erreur lors du traitement de l'image: {e}")
|
|
|
|
|
+
|
|
|
|
|
+ return # Ne pas continuer le traitement après une image
|
|
|
|
|
|
|
|
# Ajouter le contenu du fichier à la requête si présent
|
|
# Ajouter le contenu du fichier à la requête si présent
|
|
|
if file_content:
|
|
if file_content:
|
|
@@ -313,7 +431,7 @@ async def on_message(message):
|
|
|
|
|
|
|
|
async with message.channel.typing():
|
|
async with message.channel.typing():
|
|
|
try:
|
|
try:
|
|
|
- # Appeler l'API OpenAI
|
|
|
|
|
|
|
+ # Appeler l'API
|
|
|
result = await call_openai_api(user_text, message.author.name)
|
|
result = await call_openai_api(user_text, message.author.name)
|
|
|
|
|
|
|
|
if result:
|
|
if result:
|