#!/usr/bin/env python3
import sys
import os
import subprocess
import argparse
import json
import time
import platform

# --- BOOTSTRAP ---
def bootstrap():
    """Ensure python 3.7+ and requests are available."""
    if sys.version_info < (3, 7):
        print("Error: Python 3.7+ is required.", flush=True)
        sys.exit(1)
    
    try:
        import requests
        return requests
    except ImportError:
        print("Installing requests module...", flush=True)
        try:
            subprocess.run([sys.executable, "-m", "pip", "install", "requests", "--break-system-packages", "-q"], check=True)
            import requests
            return requests
        except subprocess.CalledProcessError:
            print("Trying older pip install...", flush=True)
            # Try without --break-system-packages if it fails (older pip)
            subprocess.run([sys.executable, "-m", "pip", "install", "requests", "-q"], check=True)
            import requests
            return requests

# --- CONFIG ---
def get_config(source):
    """Retrieve configuration from URL or local file."""
    print(f"Reading config from {source}...", flush=True)
    requests = bootstrap()
    if source.startswith("http://") or source.startswith("https://"):
        print(f"Downloading configuration from {source}...", flush=True)
        try:
            response = requests.get(source, timeout=10)
            response.raise_for_status()
            return response.text
        except requests.RequestException as e:
            raise ValueError(f"Error downloading config: {e}")
    else:
        if not os.path.exists(source):
            raise ValueError(f"Error: Configuration file not found at {source}")
        with open(source, "r") as f:
            return f.read()

# --- MODELS CONFIG ---
def get_models_config():
    """Retrieve models configuration from local file or default URL."""
    config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models.json")
    default_url = "https://provision.sh/models.json"
    
    if os.path.exists(config_path):
        #print(f"Loading models config from {config_path}...", flush=True)
        try:
            with open(config_path, "r") as f:
                return json.load(f)
        except json.JSONDecodeError as e:
            print(f"Warning: Error parsing local models.json: {e}", flush=True)
            # Fallthrough to download
    
    # print(f"Downloading models config from {default_url}...", flush=True)
    requests = bootstrap()
    try:
        response = requests.get(default_url, timeout=5)
        response.raise_for_status()
        return response.json()
    except Exception as e:
        print(f"Warning: Could not load models config from {default_url}: {e}", flush=True)
        return {}

# --- VARIABLES ---
def parse_and_prompt_variables(markdown_content):
    """Scan for {{VAR}} and {{secret:VAR}}, prompt user, substitute."""
    import re
    import getpass
    
    # Regex for {{VARIABLE}} and {{secret:VARIABLE}}
    # Captures the type (secret: or empty) and the variable name
    pattern = re.compile(r'\{\{(?:(secret):)?([A-Z0-9_]+)\}\}')
    
    variables = {}
    matches = pattern.findall(markdown_content)
    
    # Preserve order, unique keys only
    ordered_vars = []
    seen = set()
    for is_secret, var_name in matches:
        if var_name not in seen:
            ordered_vars.append((is_secret, var_name))
            seen.add(var_name)
            
    if not ordered_vars:
        return markdown_content, {}

    print(f"\nFound {len(ordered_vars)} variables:", flush=True)
    
    replacements = {}
    secrets = {} # identifying which vars are secrets for later masking if needed
    
    for is_secret, var_name in ordered_vars:
        prompt_text = f"  {var_name}: "
        if is_secret:
            val = safe_getpass(prompt_text)
            placeholder = f"__SECRET_{var_name}__"
            replacements[f"{{{{secret:{var_name}}}}}"] = placeholder
            secrets[placeholder] = val
        else:
            val = safe_input(prompt_text)
            replacements[f"{{{{{var_name}}}}}"] = val
            
    # Substitute in markdown
    substituted_markdown = markdown_content
    for target, value in replacements.items():
        substituted_markdown = substituted_markdown.replace(target, value)
        
    return substituted_markdown, secrets

# --- LLM CLIENT ---
def llm_call(prompt, system_instruction=None, model=None, api_key=None, provider="anthropic", mock=False, models_config=None):
    """Call LLM API."""
    if models_config is None:
        models_config = {}

    if mock:
        print(f"[MOCK] LLM Call (Provider: {provider})", flush=True)
        # Simple heuristic mock responses based on prompt content
        if "Parse the provided Markdown configuration" in (system_instruction or ""):
            print("[MOCK] Returning planned tasks", flush=True)
            return json.dumps([
                {"id": 1, "task": "Update apt package cache", "section": "System"},
                {"id": 2, "task": "Install curl", "section": "System"}
            ])
        elif "Generate the shell commands" in prompt:
             print("[MOCK] Returning commands for task", flush=True)
             if "Task: Update apt package cache" in prompt:
                 return json.dumps({
                     "commands": ["apt-get update"],
                     "verify": "true"
                 })
             elif "Task: Install curl" in prompt:
                 return json.dumps({
                     "commands": ["apt-get install -y curl"],
                     "verify": "command -v curl"
                 })
             else:
                 return json.dumps({
                     "commands": ["/bin/echo Mock command for unknown task"],
                     "verify": "true"
                 })
        elif "Diagnose the problem" in prompt:
            print("[MOCK] Returning self-repair fix", flush=True)
            return json.dumps({
                "action": "fix",
                "commands": ["/bin/echo Fixed"],
                "retry_original": True
            })
        return "Mock response"

    requests = bootstrap()
    
    if provider == "anthropic":
        if not model: model = models_config.get("anthropic", {}).get("default_model", "claude-3-5-sonnet-20240620")
        url = models_config.get("anthropic", {}).get("base_url", "https://api.anthropic.com/v1/messages")
        headers = {
            "x-api-key": api_key,
            "anthropic-version": "2023-06-01",
            "content-type": "application/json"
        }
        data = {
            "model": model,
            "max_tokens": 4096,
            "messages": [{"role": "user", "content": prompt}]
        }
        if system_instruction:
            data["system"] = system_instruction
            
        try:
            response = requests.post(url, headers=headers, json=data, timeout=60)
            response.raise_for_status()
            result = response.json()
            return result["content"][0]["text"]
        except requests.RequestException as e:
            print(f"LLM API Error: {e}", flush=True)
            if hasattr(e, 'response') and e.response is not None:
                 print(f"Response: {e.response.text}", flush=True)
            sys.exit(1)
            
    elif provider == "openai":
         if not model: model = models_config.get("openai", {}).get("default_model", "gpt-4o")
         url = models_config.get("openai", {}).get("base_url", "https://api.openai.com/v1/chat/completions")
         headers = {
             "Authorization": f"Bearer {api_key}",
             "Content-Type": "application/json"
         }
         messages = []
         if system_instruction:
             messages.append({"role": "system", "content": system_instruction})
         messages.append({"role": "user", "content": prompt})
         
         data = {
             "model": model,
             "messages": messages,
             "temperature": 0
         }
         try:
            response = requests.post(url, headers=headers, json=data, timeout=60)
            response.raise_for_status()
            return response.json()["choices"][0]["message"]["content"]
         except requests.RequestException as e:
            print(f"LLM API Error: {e}", flush=True)
            if hasattr(e, 'response') and e.response is not None:
                 print(f"Response: {e.response.text}", flush=True)
            sys.exit(1)

    elif provider == "google":
         # Simple implementation for Gemini API via REST
         if not model: model = models_config.get("google", {}).get("default_model", "gemini-1.5-pro")
         base_url = models_config.get("google", {}).get("base_url", "https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent")
         url = f"{base_url.replace('{model}', model)}?key={api_key}"
         headers = {"Content-Type": "application/json"}
         
         contents = []
         if system_instruction:
             # Gemini REST doesn't have a separate system field in the same way, usually prepended or sent as part of content
             # For simplicity, we'll prepend it to the prompt here or use the system_instruction if using the sophisticated client
             # But for single-file requests usage, prepending is safer.
             actual_prompt = f"System: {system_instruction}\n\nUser: {prompt}"
             contents.append({"parts": [{"text": actual_prompt}]})
         else:
             contents.append({"parts": [{"text": prompt}]})
             
         data = {"contents": contents}
         
         try:
            response = requests.post(url, headers=headers, json=data, timeout=60)
            response.raise_for_status()
            return response.json()["candidates"][0]["content"]["parts"][0]["text"]
         except requests.RequestException as e:
            print(f"LLM API Error: {e}", flush=True)
            sys.exit(1)
            
    else:
        print(f"Error: Unknown provider {provider}", flush=True)
        sys.exit(1)

# --- UTILS ---
def detect_provider(api_key):
    """Detect LLM provider from API key prefix."""
    if not api_key:
        return None
    if api_key.startswith("sk-ant-"):
        return "anthropic"
    if api_key.startswith("sk-"):
        return "openai"
    if api_key.startswith("AIza"):
        return "google"
    return None

def safe_input(prompt=""):
    """Read input from TTY even if stdin is a pipe (curl | python)."""
    if sys.stdin.isatty():
        return input(prompt)
    
    # Stdin is not a TTY (likely a pipe). Try to open /dev/tty.
    try:
        # Use separate handles for reading and writing to avoid r+ issues
        with open("/dev/tty", "w") as tty_out, open("/dev/tty", "r") as tty_in:
            if prompt:
                tty_out.write(prompt)
                tty_out.flush()
            return tty_in.readline().rstrip('\n')
    except OSError:
        return ""

def safe_getpass(prompt="Password: "):
    """Read password from TTY even if stdin is a pipe, disabling echo."""
    # If stdin is a TTY, use standard getpass
    if sys.stdin.isatty():
        import getpass
        return getpass.getpass(prompt)
    
    # Try /dev/tty
    try:
        import termios
        # logic: open tty for r+ (needed for tcsetattr on same fd?) 
        # Actually standard getpass uses /dev/tty internally too.
        # But we want to be explicit.
        # Let's keep r+ for termios but ensure flushing.
        with open("/dev/tty", "r+") as tty:
            fd = tty.fileno()
            old = termios.tcgetattr(fd)
            new = termios.tcgetattr(fd)
            # Turn off ECHO
            new[3] = new[3] & ~termios.ECHO
            try:
                termios.tcsetattr(fd, termios.TCSADRAIN, new)
                if prompt:
                    tty.write(prompt)
                    tty.flush()
                passwd = tty.readline().rstrip('\n')
                tty.write('\n') # Input doesn't echo newline, so add one
                return passwd
            finally:
                termios.tcsetattr(fd, termios.TCSADRAIN, old)
    except (OSError, ImportError):
        # Fallback
        import getpass
        return getpass.getpass(prompt)

# --- PLANNER ---
def plan_tasks(markdown_content, api_key, provider, distro_info, mock=False, models_config=None):
    """Convert markdown to ordered task list."""
    system_prompt = f"""You are a system administration assistant.
    You are configuring a system running: {distro_info}

    Parse the provided Markdown configuration into an ordered list of discrete tasks.
    Respond with a JSON array of objects:
    [
      {{"id": 1, "task": "Update package cache", "section": "System"}},
      ...
    ]
    Only JSON. No commentary."""
    
    prompt = f"Markdown configuration:\n\n---\n{markdown_content}\n---"
    
    response_text = llm_call(prompt, system_instruction=system_prompt, api_key=api_key, provider=provider, mock=mock, models_config=models_config)
    
    # Defensive JSON parsing
    try:
        # Strip code fences if present
        if "```json" in response_text:
            response_text = response_text.split("```json")[1].split("```")[0].strip()
        elif "```" in response_text:
            response_text = response_text.split("```")[1].split("```")[0].strip()
            
        tasks = json.loads(response_text)
        return tasks
    except json.JSONDecodeError:
        print("Error parsing LLM response as JSON.", flush=True)
        print("Response was:", response_text, flush=True)
        sys.exit(1)


# --- EXECUTOR ---
def detect_system_info():
    """Detect the OS, distro, and version."""
    system = platform.system()
    
    if system == "Darwin":
        try:
            mac_ver = platform.mac_ver()
            return f"macOS {mac_ver[0]} ({platform.platform()})"
        except:
            return "macOS"
            
    elif system == "Linux":
        # Try /etc/os-release
        try:
            with open("/etc/os-release") as f:
                data = {}
                for line in f:
                    if "=" in line:
                        k, v = line.strip().split("=", 1)
                        data[k] = v.strip('"')
            
            if "PRETTY_NAME" in data:
                return f"Linux ({data['PRETTY_NAME']})"
            elif "NAME" in data:
                return f"Linux ({data['NAME']} {data.get('VERSION_ID', '')})"
        except:
            pass
            
        # Fallback to lsb_release
        try:
            return "Linux (" + subprocess.check_output(["lsb_release", "-ds"], stderr=subprocess.DEVNULL).decode().strip() + ")"
        except:
            pass
            
    return f"{system} ({platform.platform()})"

def execute_command_with_retry(command, timeout=120):
    try:
        result = subprocess.run(
            command,
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            timeout=timeout
        )
        return result.returncode, result.stdout.decode(errors='replace'), result.stderr.decode(errors='replace')
    except subprocess.TimeoutExpired:
        return -1, "", "Command timed out"

def execute_task(task, history, secrets, api_key, provider, distro_info, mock=False, models_config=None):
    
    system_prompt = f"""You are a system administration assistant. You are configuring a
    {distro_info} system. You generate shell commands that are:
    - Idempotent where possible (safe to re-run)
    - Non-interactive (no prompts; use -y flags, DEBIAN_FRONTEND=noninteractive, etc.)
    - Minimal (do exactly what's asked, nothing more)
    - Appropriate for the specific OS detected.

    You never explain commands unless asked. You respond only in the requested format."""

    
    prompt = f"""Task: {task['task']}
    
    Previously completed tasks:
    {json.dumps(history, indent=2)}
    
    Generate the shell commands to accomplish this task on {distro_info}.
    Commands must be non-interactive and idempotent where possible.
    
    Respond with a JSON object:
    {{
      "commands": ["command1", "command2"],
      "verify": "command that returns exit code 0 if the task was successful"
    }}
    
    Only JSON. No commentary."""

    response_text = llm_call(prompt, system_instruction=system_prompt, api_key=api_key, provider=provider, mock=mock, models_config=models_config)
    
    try:
        if "```json" in response_text:
            response_text = response_text.split("```json")[1].split("```")[0].strip()
        elif "```" in response_text:
            response_text = response_text.split("```")[1].split("```")[0].strip()
        plan = json.loads(response_text)
    except json.JSONDecodeError:
        print(f"Error parsing task plan: {response_text}", flush=True)
        return False, f"Error parsing task plan. Response: {response_text}"

    print(f"\n [Task] {task['task']}", flush=True)
    
    full_diagnostics = f"Task: {task['task']}\nPlan: {json.dumps(plan, indent=2)}\n"
    
    for cmd in plan.get("commands", []):
        # Substitute secrets
        for placeholder, secret_val in secrets.items():
            cmd = cmd.replace(placeholder, secret_val)
            
        print(f"      $ {cmd}", flush=True)
        
        # Self-repair loop
        max_retries = 3
        diagnostic_context = ""
        
        for attempt in range(max_retries + 1):
            rc, out, err = execute_command_with_retry(cmd)
            full_diagnostics += f"\n>>> Command: {cmd}\nAttempt: {attempt+1}\nExit Code: {rc}\nStdout: {out}\nStderr: {err}\n"
            
            if rc == 0:
                print(f"      ✓", flush=True)
                break
            else:
                print(f"      ✗ (Exit code {rc})", flush=True)
                if attempt < max_retries:
                    print(f"      Attempting self-repair ({attempt+1}/{max_retries})...", flush=True)
                    # Diagnose
                    repair_prompt = f"""The following command failed:
                    
                    Command: {cmd}
                    Exit code: {rc}
                    Stdout: {out}
                    Stderr: {err}
                    
                    This was part of the task: {task['task']}
                    Running on: {distro_info}
                    
                    Diagnostic Context (outputs from previous repair attempts):
                    {diagnostic_context}
                    
                    Diagnose the problem. 
                    If you need more information to diagnose, provide "fix" commands that gather it (e.g. cat logs).
                    If you can fix it, respond with "fix" commands.
                    
                    Respond with JSON:
                    {{"action": "fix", "commands": ["fix_command_1", "fix_command_2"], "retry_original": true/false}}
                    
                    If the fix requires information from the user (e.g., a choice or credential) that you cannot gather via commands, respond with:
                    {{"action": "escalate", "reason": "description"}}
                    
                    Only JSON."""
                    
                    if legend := llm_call(repair_prompt, system_instruction=system_prompt, api_key=api_key, provider=provider, mock=mock, models_config=models_config):
                         repair_json = legend
                    else:
                         # Fallback if mock returns nothing or whatever (shouldn't happen with updated mock)
                         repair_json = "{}"
                    try:
                         if "```json" in repair_json:
                              repair_json = repair_json.split("```json")[1].split("```")[0].strip()
                         elif "```" in repair_json:
                              repair_json = repair_json.split("```")[1].split("```")[0].strip()
                         repair_plan = json.loads(repair_json)
                         
                         full_diagnostics += f"\nRepair Plan: {json.dumps(repair_plan, indent=2)}\n"

                         if repair_plan.get("action") == "fix":
                             for fix_cmd in repair_plan.get("commands", []):
                                 print(f"      (Fix/Diag) $ {fix_cmd}", flush=True)
                                 frc, fout, ferr = execute_command_with_retry(fix_cmd)
                                 
                                 # Accumulate diagnostic context
                                 diagnostic_context += f"\n>>> Command: {fix_cmd}\nExit Code: {frc}\nStdout: {fout}\nStderr: {ferr}\n"
                                 full_diagnostics += f"\n>>> Fix Command: {fix_cmd}\nExit Code: {frc}\nStdout: {fout}\nStderr: {ferr}\n"
                                 
                                 if frc != 0:
                                     print(f"      (Fix Failed) {ferr}", flush=True)
                             
                             if repair_plan.get("retry_original"):
                                 continue # retry the original command loop
                             else:
                                 # If it sets "retry_original": false, it means "I'm done, assume success".
                                 pass 
                         elif repair_plan.get("action") == "escalate":
                             print(f"      Escalation needed: {repair_plan.get('reason')}", flush=True)
                             choice = safe_input("      (R)etry / (S)kip / (A)bort? ").lower()
                             if choice == "s":
                                 return True, full_diagnostics # Treat as success/skipped
                             elif choice == "r":
                                 continue # Retry
                             else:
                                 # Return failure so main loop handles abort
                                 return False, full_diagnostics
                             return False, full_diagnostics
                    except Exception as e:
                         print(f"      Self-repair failed to generate valid JSON: {e}", flush=True)
                         full_diagnostics += f"\nSelf-repair JSON error: {e}\n"
                         pass # Continue to next attempt

        else:
             # Retries exhausted
             return False, full_diagnostics

    # Verify
    if "verify" in plan and plan["verify"]:
        vvr = plan["verify"]
        for placeholder, secret_val in secrets.items():
            vvr = vvr.replace(placeholder, secret_val)
            
        print(f"      (Verify) $ {plan['verify']}", flush=True) # Print verify cmd (no secret)
        vrc, vout, verr = execute_command_with_retry(vvr)
        full_diagnostics += f"\n>>> Verify Command: {vvr}\nExit Code: {vrc}\nStdout: {vout}\nStderr: {verr}\n"
        
        if vrc == 0:
             print(f"      ✓ (Verified)", flush=True)
             return True, full_diagnostics
        else:
             print(f"      ✗ Verification failed. (Exit code {vrc})", flush=True)
             if vout: print(f"      Stdout: {vout}", flush=True)
             if verr: print(f"      Stderr: {verr}", flush=True)
             
             # Attempt self-repair for verification failure!
             print(f"      Attempting self-repair for verification failure...", flush=True)
             
             verify_repair_prompt = f"""The verification command failed after tasks were executed.
             
             Verification Command: {plan['verify']}
             Exit code: {vrc}
             Stdout: {vout}
             Stderr: {verr}
             
             This is part of task: {task['task']}
             Running on: {distro_info}
             
             Diagnostic Context:
             {diagnostic_context}
             
             The task commands apparently ran (exit 0) but verification failed.
             Diagnose. If you can fix it (maybe wait longer, or fix config), respond with "fix".
             If you need diagnostics, respond with "fix" commands.
             
             Respond with JSON:
             {{"action": "fix", "commands": ["fix_command_1"], "retry_original": true/false}}
             
             Or escalate if needed.
             Only JSON."""
             
             if legend := llm_call(verify_repair_prompt, system_instruction=system_prompt, api_key=api_key, provider=provider, mock=mock, models_config=models_config):
                 try:
                     if "```json" in legend: legend = legend.split("```json")[1].split("```")[0].strip()
                     elif "```" in legend: legend = legend.split("```")[1].split("```")[0].strip()
                     vr_plan = json.loads(legend)
                     
                     full_diagnostics += f"\nVerify Repair Plan: {json.dumps(vr_plan, indent=2)}\n"
                     
                     if vr_plan.get("action") == "fix":
                         for fix_cmd in vr_plan.get("commands", []):
                             print(f"      (Fix/Verify) $ {fix_cmd}", flush=True)
                             vfrc, vfout, vferr = execute_command_with_retry(fix_cmd)
                             full_diagnostics += f"\n>>> Verify Fix Command: {fix_cmd}\nExit Code: {vfrc}\nStdout: {vfout}\nStderr: {vferr}\n"
                             
                         # After running fix, we fail the task so user can decide or we can loop?
                         # Ideally we re-run verification?
                         # Let's simple fail for now, trusting user to hit Retry which reruns whole task (idempotent).
                         pass
                 except:
                     pass
                     
             return False, full_diagnostics
             
    return True, full_diagnostics

def save_diagnostics(content):
    filename = "provision_error.log"
    try:
        with open(filename, "w") as f:
            f.write(content)
        print(f"\n  📄 Diagnostics saved to {os.path.abspath(filename)}", flush=True)
    except Exception as e:
        print(f"\n  ⚠️ Failed to save diagnostics: {e}", flush=True)

# --- MAIN ---
def main():
    parser = argparse.ArgumentParser(description="Provision - LLM-Powered Server Setup")
    parser.add_argument("--config", help="URL or local path to configuration file")
    parser.add_argument("--provider", help="LLM provider (default: auto-detect or anthropic)")
    parser.add_argument("--api-key", help="LLM API key")
    parser.add_argument("--plan", action="store_true", help="Show planned commands without executing")
    parser.add_argument("--yes", action="store_true", help="Skip confirmation")
    parser.add_argument("--log", help="Log file path", default="/var/log/provision.log")
    parser.add_argument("--verbose", action="store_true", help="Show full command output")
    parser.add_argument("--mock", action="store_true", help="Use mock LLM responses for testing")
    args = parser.parse_args()
    
    # Interactive Prompts if missing args
    if not args.config:
        print("╔══════════════════════════════════════════╗", flush=True)
        print("║  Provision — LLM-Powered Server Setup   ║", flush=True)
        print("╚══════════════════════════════════════════╝", flush=True)
    
    # Get config (loop until valid)
    raw_markdown = None
    while raw_markdown is None:
        if not args.config:
            args.config = safe_input("Config URL or path: ").strip()
        
        if not args.config:
             # User hit enter without input, try again
             continue

        try:
            raw_markdown = get_config(args.config)
        except ValueError as e:
            print(e, flush=True)
            args.config = None # Reset to prompt again
            # If args.config was passed via command line and failed, we should probably exit if non-interactive?
            # But here we are in the interactive block/loop. 
            # If user passed a bad config flag, they will be prompted to enter a new one.
            pass

    if not args.api_key and "PROVISION_API_KEY" in os.environ:
        args.api_key = os.environ["PROVISION_API_KEY"]
        
    if not args.api_key and not args.mock:
        args.api_key = safe_getpass("LLM API Key: ").strip()

    if not args.config or (not args.api_key and not args.mock):
         print("Error: Config and API Key are required.", flush=True)
         sys.exit(1)

    # 1.5 Auto-detect provider if not set
    if not args.provider and args.api_key:
        detected = detect_provider(args.api_key)
        if detected:
            print(f"Auto-detected provider: {detected}", flush=True)
            args.provider = detected
    
    if not args.provider:
        args.provider = "anthropic" # Default fallback


    # 1. Get Config (Already done above)
    # raw_markdown = get_config(args.config) 
    
    # 2. Parse Variables
    substituted_markdown, secrets = parse_and_prompt_variables(raw_markdown)
    
    # 3. Load Models Config
    models_config = get_models_config()

    # 4. Detect System Info
    print("\nDetecting system information...", flush=True)
    distro_info = detect_system_info()
    print(f"Detected: {distro_info}", flush=True)

    # 5. Plan
    print("\nPlanning tasks...", flush=True)
    tasks = plan_tasks(substituted_markdown, args.api_key, args.provider, distro_info, mock=args.mock, models_config=models_config)
    
    print(f"\n 📋 Task Plan ({len(tasks)} tasks):", flush=True)
    for t in tasks:
        print(f"  {t['id']}. [{t.get('section', 'General')}] {t['task']}", flush=True)
        
    if args.plan:
        return
        
    if not args.yes:
        confirm = safe_input("\nProceed? [Y/n]: ").lower()
        if confirm not in ['y', 'yes', '']:
            print("Aborted.", flush=True)
            sys.exit(0)

    print("\nExecuting...", flush=True)
    history = []
    
    total_start = time.time()
    success_count = 0
    
    # Setup Logging
    log_file = None
    if args.log:
        try:
             log_file = open(args.log, "a")
             log_file.write(f"--- Session started at {time.ctime()} ---\n")
        except PermissionError:
             print(f"Warning: Cannot write to {args.log}. Running without log file.", flush=True)
    
    try:
        for t in tasks:
            start_time = time.time()
            
            # Manual Retry Loop
            while True:
                success, diagnostics = execute_task(t, history, secrets, args.api_key, args.provider, distro_info, mock=args.mock, models_config=models_config)
                
                if success:
                    duration = time.time() - start_time
                    success_count += 1
                    history.append({"task": t['task'], "status": "success"})
                    if log_file: log_file.write(f"[SUCCESS] {t['task']} ({duration:.2f}s)\n")
                    print(f"[SUCCESS] {t['task']}", flush=True)
                    break # Next task
                else:
                    print(f"\nTask failed: {t['task']}", flush=True)
                    choice = safe_input("(R)etry / (S)kip / (A)bort? ").lower()
                    if choice == "r":
                         print("Retrying task...", flush=True)
                         continue # Retry same task
                    elif choice == "s":
                         print("Skipping...", flush=True)
                         history.append({"task": t['task'], "status": "skipped"})
                         if log_file: log_file.write(f"[SKIPPED] {t['task']}\n")
                         break # Next task
                    else:
                         if log_file: log_file.write(f"[ABORTED] {t['task']}\n")
                         
                         # Save diagnostics prompt
                         save_log = safe_input("Save detailed error diagnostics to provision_error.log? [Y/n] ").lower()
                         if save_log in ['y', 'yes', '']:
                             save_diagnostics(diagnostics)
                             
                         sys.exit(1)
    finally:
        if log_file:
            log_file.close()

    total_duration = time.time() - total_start
    print(f"\n══════════════════════════════════════════", flush=True)
    print(f" ✅ Complete: {success_count}/{len(tasks)} tasks succeeded", flush=True)
    print(f" ⏱  Total time: {int(total_duration // 60)}m {int(total_duration % 60)}s", flush=True)
    if args.log and log_file:
        print(f" 📄 Log saved to {args.log}", flush=True)
    print(f"══════════════════════════════════════════", flush=True)


if __name__ == "__main__":
    main()
