import requests
import json
from guessit import guessit
from datetime import datetime
import time

# ✅ Configuration des API
ALLDEBRID_API_KEY = "lUCRvOPYXci0DOVAz61u"
TMDB_API_KEY = "ff2a5e4ba27621d899afc6c49a2040e9"
JSON_FILE = "films.json"
PROXY_BASE_URL = "http://ibo.plex-stream.net/darki/V2/Films/debrid.php?id="

# ✅ Nombre de tentatives max pour une requête
MAX_RETRIES = 3

# ✅ Charger les films existants
def load_existing_films():
    try:
        with open(JSON_FILE, "r", encoding="utf-8") as f:
            return json.load(f).get("films", [])
    except (FileNotFoundError, json.JSONDecodeError):
        return []

# ✅ Sauvegarder les films dans le JSON
def save_films(films):
    with open(JSON_FILE, "w", encoding="utf-8") as f:
        json.dump({"status": "success", "films": films}, f, indent=4)

# ✅ Extraire les métadonnées avec GuessIt
def extract_metadata(filename):
    guess = guessit(filename)
    title = guess.get("title", "Inconnu")
    year = guess.get("year", "Inconnue")
    quality = guess.get("screen_size", "Inconnue")
    return title, year, quality

# ✅ Gérer les requêtes avec retries
def make_request(url, params=None, headers=None, timeout=5):
    for attempt in range(MAX_RETRIES):
        try:
            response = requests.get(url, params=params, headers=headers, timeout=timeout)
            response.raise_for_status()
            return response.json()
        except requests.exceptions.RequestException as e:
            print(f"⚠️ Erreur de connexion ({url}) : {e} (tentative {attempt+1}/{MAX_RETRIES})")
            time.sleep(2)
    return None

# ✅ Récupérer les liens AllDebrid
def get_alldebrid_links():
    url = "https://api.alldebrid.com/v4/user/links"
    headers = {"Authorization": f"Bearer {ALLDEBRID_API_KEY}"}
    data = make_request(url, headers=headers)

    print("🔍 Réponse AllDebrid:", json.dumps(data, indent=4, ensure_ascii=False))  # 🛠️ Debug

    if not data or "error" in data:
        print(f"❌ Erreur AllDebrid : {data.get('error', {}).get('message', 'Réponse vide')}")
        return []

    return data["data"].get("links", [])

# ✅ Récupérer les infos du film via TMDb en français
def get_movie_info(title):
    url = "https://api.themoviedb.org/3/search/movie"
    params = {"api_key": TMDB_API_KEY, "query": title, "language": "fr-FR"}
    data = make_request(url, params=params)

    if data and "results" in data and data["results"]:
        movie = data["results"][0]
        genre_ids = movie.get("genre_ids", [])
        genres = get_movie_genres(genre_ids)

        return {
            "id": movie["id"],
            "title": movie["title"],
            "release_date": movie.get("release_date", "Inconnue"),
            "genres": genres
        }
    return None

# ✅ Récupérer les genres à partir des IDs
def get_movie_genres(genre_ids):
    url = "https://api.themoviedb.org/3/genre/movie/list"
    params = {"api_key": TMDB_API_KEY, "language": "fr-FR"}
    data = make_request(url, params=params)

    if data and "genres" in data:
        genre_dict = {genre["id"]: genre["name"] for genre in data["genres"]}
        return [genre_dict.get(genre_id, "Inconnu") for genre_id in genre_ids]
    return ["Inconnu"]

# ✅ Générer et mettre à jour le JSON
def generate_json():
    links = get_alldebrid_links()
    if not links:
        print("❌ Aucun lien trouvé dans AllDebrid.")
        return

    existing_films = load_existing_films()
    film_dict = {film["tmdb_id"]: film for film in existing_films if film["tmdb_id"]}

    all_proxy_links = set()

    for link in links:
        filename = link["filename"]
        clean_link = link["link"]
        file_id = clean_link.split("/")[-1]
        proxy_link = f"{PROXY_BASE_URL}{file_id}"
        all_proxy_links.add(proxy_link)

        title, year, quality = extract_metadata(filename)
        movie_info = get_movie_info(title)
        tmdb_id = movie_info["id"] if movie_info else None
        genres = movie_info["genres"] if movie_info else ["Inconnu"]

        existing_film = next((f for f in existing_films if f["tmdb_id"] == tmdb_id), None)

        if existing_film:
            date_added = existing_film["date_added"]

            if quality not in existing_film["qualities"]:
                existing_film["qualities"].append(quality)

            if proxy_link not in existing_film["urls"]:
                existing_film["urls"].append(proxy_link)
        else:
            date_added = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            film_dict[tmdb_id] = {
                "category": "film",
                "tmdb_id": tmdb_id,
                "title": movie_info["title"] if movie_info else title,
                "playlist": None,
                "year": movie_info["release_date"][:4] if movie_info and "release_date" in movie_info else year,
                "genres": genres,
                "qualities": [quality],
                "urls": [proxy_link],
                "date_added": date_added
            }

    for tmdb_id in list(film_dict.keys()):
        film = film_dict[tmdb_id]
        film["urls"] = [url for url in film["urls"] if url in all_proxy_links]

        if not film["urls"]:
            del film_dict[tmdb_id]

    save_films(list(film_dict.values()))
    print("✅ Fichier JSON mis à jour avec succès !")

# ✅ Exécuter le script
if __name__ == "__main__":
    generate_json()
    with open(JSON_FILE, "r", encoding="utf-8") as f:
        print(f.read())
