import json
import random  # NEU: Für Zufallswerte
from openai import OpenAI
import os
import sys
import time
import gc  # <--- NEU: Garbage Collector
import sqlite3 # NEU: Für DB Zugriff
import numpy as np
from PIL import Image, ImageEnhance # NEU: ImageEnhance für Sättigung
from moviepy.editor import *
from moviepy.config import change_settings

# --- FIX FÜR PILLOW 10+ (AttributeError: ANTIALIAS) ---
import PIL.Image
if not hasattr(PIL.Image, 'ANTIALIAS'):
    PIL.Image.ANTIALIAS = PIL.Image.LANCZOS
# ------------------------------------------------------

# ----------------------------------------------------------
# KONFIGURATION & PFADE
# ----------------------------------------------------------

DB_PATH = 'database.sqlite' # Pfad zur Datenbank

# 1. ImageMagick Pfad
change_settings({"IMAGEMAGICK_BINARY": "/usr/bin/convert"})

# 2. OpenAI Key
client = OpenAI(
    api_key="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"
)

# 3. Fonts & Ordner
FONT_OUTLINE = r"/var/www/SF-Pro-Rounded-Semibold.otf"
FONT_MAIN    = r"/var/www/SF-Pro-Display-Semibold.otf"
EMOJI_FOLDER = "emojis"
EMOJI_JSON   = "./emojis/emojis.json"
# WORKFLOW_JSON wird jetzt dynamisch aus der DB geladen

# ABSTÄNDE EINSTELLEN (in Pixeln)
PADDING_TEXT_TO_EMOJI = 10
PADDING_BETWEEN_EMOJIS = 5


# ==========================================================
# DB & JOB HELPER (NEU)
# ==========================================================

def get_db_connection():
    conn = sqlite3.connect(DB_PATH)
    conn.row_factory = sqlite3.Row
    return conn

def update_job(job_id, status=None, progress=None, log=None, output_file=None):
    """Aktualisiert den Status in der Datenbank"""
    try:
        conn = get_db_connection()
        c = conn.cursor()
        
        updates = []
        params = []
        
        if status:
            updates.append("status = ?")
            params.append(status)
        if progress is not None:
            updates.append("progress = ?")
            params.append(progress)
        if log:
            # Log Text anhängen
            c.execute("SELECT log_text FROM jobs WHERE id = ?", (job_id,))
            row = c.fetchone()
            current_log = row['log_text'] if row and row['log_text'] else ""
            new_log = current_log + str(log) + "\n"
            updates.append("log_text = ?")
            params.append(new_log)
            print(f"[JOB {job_id}] {log}") # Auch in Konsole ausgeben
        if output_file:
            updates.append("output_file = ?")
            params.append(output_file)
            
        if updates:
            updates.append("updated_at = CURRENT_TIMESTAMP")
            query = f"UPDATE jobs SET {', '.join(updates)} WHERE id = ?"
            params.append(job_id)
            c.execute(query, params)
            conn.commit()
        conn.close()
    except Exception as e:
        print(f"DB Error: {e}")

def check_cancel(job_id):
    """Prüft ob User abgebrochen hat"""
    conn = get_db_connection()
    c = conn.cursor()
    c.execute("SELECT status FROM jobs WHERE id = ?", (job_id,))
    row = c.fetchone()
    conn.close()
    if row and row['status'] == 'canceled':
        return True
    return False

# ==========================================================
# TEIL 1: KI & TEXT GENERIERUNG
# ==========================================================

def load_json_safe(path):
    try:
        with open(path, "r", encoding="utf-8") as f:
            return json.load(f)
    except UnicodeDecodeError:
        with open(path, "r", encoding="latin-1") as f:
            return json.load(f)

def build_master_prompt(emoji_dict):
    emoji_description_text = "\n".join([f"{name}: {desc}" for name, desc in emoji_dict.items()])
    emoji_names_list = list(emoji_dict.keys())
    emoji_names_str = ", ".join([f'"{n}"' for n in emoji_names_list])

    return f"""
Du bist ein professioneller Werbetexter für extrem kurze Instagram-Reel Hooks.
Schreibe eine Caption: schreib die so, dass sie natürlich wirkt und verkaufspsychologisch Sinn macht.
Maximal 1–2 Zeilen. Modern, TikTok-Style.

REGELN:
- Hooks max 75 Zeichen.
- Call-To-Actions max 25 Zeichen.
- Keine Hashtags.
- Benutz keine Gedankenstriche wie "- –"
- Benutz nicht immer den Produktnamen
- Gib IMMER reines JSON zurück.
- Variiere die Hooks und Call-To-Actions, schreibe NICHT immer das gleiche oder sehr 
- wenn es beispiele in der prompt gibt orientier dich stark an denen. Ähnliches.
  {{
    "text": "...",
    "emojis": ["emoji1.png", "emoji2.png"]
  }}

REGELN FÜR EMOJIS:
- Du darfst AUSSCHLIESSLICH folgende Emoji-Dateien verwenden: {emoji_names_str}
- Bedeutungen: {emoji_description_text}
- Bei "link in bio" oder CTA -> "emojis": []
- Sonst EXAKT zwei passende Emojis auswählen (Dateinamen!).
"""

def ask_chatgpt(instruction, master_prompt):
    if not instruction.strip():
        return {"text": "", "emojis": []}

    messages = [
        {"role": "system", "content": master_prompt},
        {"role": "user", "content": instruction},
    ]
    # print(f"   > Sende Prompt an ChatGPT...") 
    
    try:
        response = client.chat.completions.create(
            model="gpt-5-chat-latest",
            messages=messages,
            temperature=1.2,
            max_tokens=120
        )
        raw = response.choices[0].message.content 
        raw = raw.replace("```json", "").replace("```", "").strip()
        return json.loads(raw)
    except Exception as e:
        print(f"   ! Fehler bei ChatGPT oder JSON Parsing: {e}")
        return {"text": "", "emojis": []}

def get_captions_from_data(job_data, emoji_json_path, job_id):
    """Angepasst: Nimmt Daten direkt entgegen statt Datei zu lesen"""
    
    data = job_data # Direktes Dict
    emoji_dict = load_json_safe(emoji_json_path)
    
    audio_file = data.get("audio_file") 

    master_prompt = build_master_prompt(emoji_dict)
    
    playlist = data["playlist"]
    processed_playlist = []

    current_caption_text = ""
    current_caption_emojis = []
    
    update_job(job_id, log="--- Starte KI Generierung ---")

    total_items = len(playlist)

    for i, seq in enumerate(playlist):
        if check_cancel(job_id): raise Exception("Vom User abgebrochen")

        seq_name   = seq["sequence"]
        video_path = seq["path"]
        show_cap   = seq.get("show_caption", True)
        prompt     = seq.get("ai_prompt", "")
        height     = seq.get("custom_value", 60)

        if prompt.strip() != "":
            gpt_output = ask_chatgpt(prompt, master_prompt)
            current_caption_text   = gpt_output.get("text", "")
            current_caption_emojis = gpt_output.get("emojis", [])
            update_job(job_id, log=f"[{seq_name}] Generiert: {current_caption_text}")

        final_text = current_caption_text if show_cap else ""
        final_emojis = current_caption_emojis if show_cap else []

        processed_playlist.append({
            "sequence": seq_name,
            "video_path": video_path,
            "caption": final_text,
            "emojis": final_emojis,
            "caption_y_pos": height,
            "show_caption": show_cap
        })
        
        # Progress Simulation für KI Part (0-30%)
        prog = int((i / total_items) * 30)
        update_job(job_id, progress=prog)
    
    return processed_playlist, audio_file


# ==========================================================
# TEIL 2: VIDEO RENDERING
# ==========================================================

def trim_transparent_clip(clip):
    """
    Schneidet transparente Ränder von einem MoviePy-Clip ab.
    Diese verbesserte Version prüft die Maske (Alpha-Kanal).
    """
    try:
        frame = clip.get_frame(0)
        
        if clip.mask is None:
            return clip
            
        mask = clip.mask.get_frame(0)
        
        if mask.max() <= 1.0:
            mask = (mask * 255).astype(np.uint8)
        else:
            mask = mask.astype(np.uint8)

        im = Image.fromarray(frame)
        
        if len(mask.shape) == 3:
            mask = mask.squeeze()
            
        mask_im = Image.fromarray(mask, mode='L')
        im.putalpha(mask_im)
        
        bbox = im.getbbox()
        if bbox:
            return clip.crop(x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
            
    except Exception as e:
        print(f"Warnung beim Trimmen: {e}")
    return clip

def load_clean_emoji(path, height):
    """Lädt Emoji, entfernt Ränder (Crop) und graue Pixel"""
    if not os.path.exists(path):
        if os.path.exists(os.path.basename(path)):
            path = os.path.basename(path)
        else:
            print(f"WARNUNG: Emoji nicht gefunden: {path}")
            return None
        
    img = Image.open(path).convert("RGBA")
    
    bbox = img.getbbox()
    if bbox:
        img = img.crop(bbox)

    data = np.array(img)
    alpha = data[:, :, 3]
    mask = alpha == 0
    data[mask] = [0, 0, 0, 0]
    cleaned = Image.fromarray(data, "RGBA")
    
    clip = ImageClip(np.array(cleaned)).set_duration(999)
    clip = clip.resize(height=height)
    return clip

def capcut_style_text(text, fontsize=50, stroke=3):
    try:
        main = TextClip(txt=text, fontsize=fontsize, font=FONT_MAIN, color="white", method="label")
    except Exception as e:
        print(f"Font Error, nutze Standard. Fehler: {e}")
        main = TextClip(txt=text, fontsize=fontsize, color="white", method="label")

    final_w = main.w + (stroke * 2)
    final_h = main.h + (stroke * 2)

    offsets = [
        (-stroke, 0), (stroke, 0), (0, -stroke), (0, stroke),
        (-stroke, -stroke), (-stroke, stroke), (stroke, -stroke), (stroke, stroke)
    ]

    shadow_clips = []
    for ox, oy in offsets:
        try:
            shadow = TextClip(txt=text, fontsize=fontsize, font=FONT_OUTLINE, color="black", method="label")
        except:
             shadow = TextClip(txt=text, fontsize=fontsize, color="black", method="label")
             
        shadow = shadow.set_position((ox + stroke, oy + stroke))
        shadow_clips.append(shadow)

    main = main.set_position((stroke, stroke))
    
    combined = CompositeVideoClip(shadow_clips + [main], size=(final_w, final_h))
    return trim_transparent_clip(combined)

def render_text_with_emojis(text, emoji_list, fontsize=55, max_chars=25, line_spacing=15):
    if not text:
        return None

    words = text.split(" ")
    lines = []
    current = ""
    for w in words:
        if len((current + " " + w).strip()) <= max_chars:
            current = (current + " " + w).strip()
        else:
            lines.append(current)
            current = w
    if current: lines.append(current)

    rendered_lines = []
    widest_text = 0
    total_text_height = 0

    for ln in lines:
        clip = capcut_style_text(ln, fontsize)
        rendered_lines.append(clip)
        widest_text = max(widest_text, clip.w)
        total_text_height += clip.h + line_spacing

    total_text_height -= line_spacing 

    emoji_clips = []
    emoji_width_total = 0
    for emoji_name in emoji_list:
        path = os.path.join(EMOJI_FOLDER, emoji_name)
        em = load_clean_emoji(path, fontsize)
        if em:
            emoji_clips.append(em)
            emoji_width_total += em.w + PADDING_BETWEEN_EMOJIS
    
    if emoji_width_total > 0: emoji_width_total -= PADDING_BETWEEN_EMOJIS

    last_line = rendered_lines[-1]
    last_line_width = last_line.w
    full_last_line_width = last_line_width + (PADDING_TEXT_TO_EMOJI + emoji_width_total if emoji_clips else 0)

    PADDING = 15
    final_width = max(widest_text, full_last_line_width) + (PADDING * 2)

    final_clips = []
    y = 0

    for clip in rendered_lines[:-1]:
        cx = PADDING + (final_width - (PADDING*2) - clip.w) // 2
        final_clips.append(clip.set_position((cx, y)))
        y += clip.h + line_spacing

    last_row_content_width = full_last_line_width
    start_x = PADDING + (final_width - (PADDING*2) - last_row_content_width) // 2
    
    final_clips.append(last_line.set_position((start_x, y)))
    
    emoji_x = start_x + last_line_width + PADDING_TEXT_TO_EMOJI
    for em in emoji_clips:
        final_clips.append(em.set_position((emoji_x, y)))
        emoji_x += em.w + PADDING_BETWEEN_EMOJIS

    max_height_last_row = last_line.h
    for em in emoji_clips:
        if em.h > max_height_last_row:
            max_height_last_row = em.h
            
    total_comp_height = int(y + max_height_last_row + 20)
    
    return CompositeVideoClip(final_clips, size=(final_width, total_comp_height))


# ==========================================================
# NEU: RANDOM EFFECTS FUNKTION
# ==========================================================
def apply_random_effects(clip):
    """
    Wendet 3 zufällige Effekte auf jeden Clip an:
    1. Trim Start/End (0 - 0.15s)
    2. Zoom (0 - 5%) - Schneidet Ränder ab
    3. Sättigung (-2 bis +2)
    """
    
    # 1. Trim (Kürzen)
    trim_start = random.uniform(0.0, 0.15)
    trim_end = random.uniform(0.0, 0.15)
    
    # Sicherstellen, dass wir nicht den ganzen Clip wegschneiden
    if clip.duration > (trim_start + trim_end + 0.1):
        clip = clip.subclip(trim_start, clip.duration - trim_end)
        
    # 2. Zoom (0% - 5%)
    # Wir machen das per Crop + Resize
    zoom_factor = random.uniform(0.0, 0.05) # 0.0 bis 0.05
    if zoom_factor > 0:
        w, h = clip.size
        
        # Berechne neuen Ausschnitt (zentriert)
        crop_w = int(w * (1 - zoom_factor))
        crop_h = int(h * (1 - zoom_factor))
        
        x1 = int((w - crop_w) / 2)
        y1 = int((h - crop_h) / 2)
        
        # Erst zuschneiden (Ränder entfernen)
        clip = clip.crop(x1=x1, y1=y1, width=crop_w, height=crop_h)
        
        # Dann wieder auf Originalgröße skalieren
        clip = clip.resize(newsize=(w, h))

    # 3. Sättigung (-2 bis +2)
    capcut_value = random.uniform(-2, 2)  # wie in CapCut
    saturation_factor = 1.0 + (capcut_value / 100.0)  # -> 0.98 bis 1.02

    def adjust_saturation(image):
        im = Image.fromarray(image)
        enhancer = ImageEnhance.Color(im)
        im = enhancer.enhance(saturation_factor)
        return np.array(im)

    clip = clip.fl_image(adjust_saturation)
    
    return clip


# ==========================================================
# HAUPTPROGRAMM - JETZT ALS JOB PROZESSOR
# ==========================================================

def process_job(job):
    job_id = job['id']
    json_data = json.loads(job['json_data'])
    
    update_job(job_id, status='processing', progress=5, log="Starte Verarbeitung...")

    output_filename = f"output/final_video_{job_id}_{int(time.time())}.mp4"
    if not os.path.exists("output"): os.makedirs("output")

    # Variablen initialisieren für Cleanup
    final_video = None
    final_clips_to_concat = []
    
    try:
        playlist_data, audio_path = get_captions_from_data(json_data, EMOJI_JSON, job_id)
        
        update_job(job_id, log="--- Starte Video Rendering ---")

        total_clips = len(playlist_data)
        
        for index, item in enumerate(playlist_data):
            if check_cancel(job_id): raise Exception("Vom User abgebrochen")

            update_job(job_id, log=f"Bearbeite Sequenz {index+1}/{total_clips}: {item['sequence']}")
            
            try:
                vid_path = item["video_path"]
                # Pfad-Fallback falls relativ/absolut gemischt
                if not os.path.exists(vid_path) and os.path.exists("uploads/"+os.path.basename(vid_path)):
                    vid_path = "uploads/"+os.path.basename(vid_path)

                # Video laden
                video = VideoFileClip(vid_path)
            except OSError:
                update_job(job_id, log=f"!!! FEHLER: Video nicht gefunden: {item['video_path']}")
                continue

            # Effekte anwenden
            video = apply_random_effects(video)

            txt_clip = None
            if item["show_caption"] and item["caption"]:
                txt_clip = render_text_with_emojis(item["caption"], item["emojis"], fontsize=55)
            
            if txt_clip:
                y_pos = item["caption_y_pos"]
                txt_clip = txt_clip.set_duration(video.duration).set_position(("center", y_pos))
                combined = CompositeVideoClip([video, txt_clip])
                final_clips_to_concat.append(combined)
                
                # WICHTIG: Text Clips sofort schließen um RAM zu sparen
                txt_clip.close() 
            else:
                final_clips_to_concat.append(video)
            
            prog = 30 + int((index / total_clips) * 50)
            update_job(job_id, progress=prog)

        if final_clips_to_concat:
            update_job(job_id, progress=85, log="Rendere finales Video (Concatenation)...")
            final_video = concatenate_videoclips(final_clips_to_concat, method="chain")
            
            # --- AUDIO LOGIK ---
            if audio_path:
                aud_path = audio_path
                if not os.path.exists(aud_path) and os.path.exists("uploads/"+os.path.basename(aud_path)):
                    aud_path = "uploads/"+os.path.basename(aud_path)
                
                if os.path.exists(aud_path):
                    try:
                        music = AudioFileClip(aud_path)
                        if music.duration < final_video.duration:
                            repeats = int(final_video.duration / music.duration) + 1
                            music = concatenate_audioclips([music] * repeats)
                        
                        music = music.set_duration(final_video.duration)
                        
                        if final_video.audio:
                            final_audio = CompositeAudioClip([final_video.audio, music])
                        else:
                            final_audio = music
                            
                        final_video = final_video.set_audio(final_audio)
                    except Exception as e:
                         update_job(job_id, log=f"!!! Fehler beim Audio: {e}")
            # -------------------

            update_job(job_id, progress=90, log=f"Speichere Datei...")
            final_video.write_videofile(
                output_filename,
                fps=30,
                codec="libx264",
                audio_codec="aac",
                bitrate="15M",
                ffmpeg_params=["-preset", "fast", "-pix_fmt", "yuv420p"],
                logger=None 
            )
            
            # Status auf Fertig setzen
            update_job(job_id, status='completed', progress=100, output_file=output_filename, log="Fertiggestellt.")

            # ==========================================================
            # NEU: AUTOMATISCHER EINTRAG IN DIE VIDEOS TABELLE (SHOP)
            # ==========================================================
            shop_id = json_data.get('shop_id')
            
            if shop_id:
                try:
                    conn_vid = get_db_connection()
                    c_vid = conn_vid.cursor()
                    
                    # Eintrag erstellen: filename, shop_id, status='frei zur nutzung'
                    c_vid.execute("""
                        INSERT INTO videos (filename, shop_id, status, upload_date) 
                        VALUES (?, ?, 'frei zur nutzung', CURRENT_TIMESTAMP)
                    """, (output_filename, shop_id))
                    
                    conn_vid.commit()
                    conn_vid.close()
                    print(f"-> Video automatisch für Shop-ID {shop_id} registriert.")
                    update_job(job_id, log=f"Video wurde erfolgreich im Shop (ID {shop_id}) gespeichert.")
                    
                except Exception as db_e:
                    print(f"Fehler beim Eintragen in videos Tabelle: {db_e}")
                    update_job(job_id, log=f"Warnung: Konnte Video nicht dem Shop zuweisen: {db_e}")
            # ==========================================================

        else:
            update_job(job_id, status='error', log="Keine Clips zum Rendern gefunden.")

    except Exception as e:
        update_job(job_id, status='error', log=f"CRITICAL ERROR: {str(e)}")

    finally:
        # ==========================================
        # MEMORY LEAK CLEANUP
        # ==========================================
        print(f"[Cleanup] Räume RAM für Job {job_id} auf...")
        
        # 1. Finales Video schließen
        if final_video:
            try:
                final_video.close()
                if final_video.audio: final_video.audio.close()
            except: pass

        # 2. Alle Einzel-Clips schließen
        for clip in final_clips_to_concat:
            try:
                clip.close()
                if hasattr(clip, 'clips'):
                    for sub in clip.clips:
                        try: sub.close()
                        except: pass
            except: pass
            
        # 3. Garbage Collector zwingen, den RAM zu leeren
        final_video = None
        final_clips_to_concat = []
        gc.collect() 
        print("[Cleanup] Fertig.")

def main_loop():
    print("Worker gestartet (24/7 Modus). Drücke Strg+C zum Beenden.")
    
    # Wir brauchen keine Zähler mehr, da er nie aufhören soll.
    
    while True:
        try:
            conn = get_db_connection()
            c = conn.cursor()
            # Prüfen ob ein Job da ist
            c.execute("SELECT * FROM jobs WHERE status = 'pending' ORDER BY id ASC LIMIT 1")
            job = c.fetchone()
            conn.close()
            
            if job:
                print(f"Job {job['id']} gefunden. Starte...")
                process_job(job)
                
                # WICHTIG: Nach jedem Job RAM aufräumen!
                # Da das Script ewig läuft, ist das überlebenswichtig.
                gc.collect() 
            else:
                # Kein Job da?
                # Einfach 2 Sekunden warten und dann die Schleife von vorne beginnen.
                # KEIN break, KEIN Counter. Einfach warten.
                time.sleep(2)
                
        except KeyboardInterrupt:
            # Damit du das Script im Terminal mit Strg+C sauber beenden kannst
            print("\nWorker manuell beendet.")
            break
            
        except Exception as e:
            print(f"Loop Error: {e}")
            # Bei Datenbank-Problemen kurz warten, damit das Log nicht explodiert
            time.sleep(5)

if __name__ == "__main__":
    main_loop()