"""
APS BIM Upload Script
Upload Revit/IFC/AutoCAD files to Autodesk Platform Services,
convert to web format, and get viewer URN.
"""

import os
import sys
import base64
import time
import json
import requests
from pathlib import Path
from dotenv import load_dotenv

load_dotenv()

APS_CLIENT_ID = os.getenv("APS_CLIENT_ID")
APS_CLIENT_SECRET = os.getenv("APS_CLIENT_SECRET")
APS_BUCKET_KEY = os.getenv("APS_BUCKET_KEY", "dthub-bim-demo")

BASE_URL = "https://developer.api.autodesk.com"


def get_access_token():
    """Step 3: Lấy Access Token từ APS."""
    url = f"{BASE_URL}/authentication/v2/token"
    data = {
        "grant_type": "client_credentials",
        "scope": "data:read data:write data:create bucket:create bucket:read",
    }
    resp = requests.post(url, data=data, auth=(APS_CLIENT_ID, APS_CLIENT_SECRET))
    resp.raise_for_status()
    token = resp.json()["access_token"]
    print(f"[OK] Access token received (expires in {resp.json()['expires_in']}s)")
    return token


def create_bucket(token):
    """Step 4: Tạo Bucket lưu file BIM."""
    url = f"{BASE_URL}/oss/v2/buckets"
    headers = {
        "Authorization": f"Bearer {token}",
        "Content-Type": "application/json",
    }
    body = {
        "bucketKey": APS_BUCKET_KEY,
        "policyKey": "persistent",
    }
    resp = requests.post(url, headers=headers, json=body)
    if resp.status_code == 409:
        print(f"[OK] Bucket '{APS_BUCKET_KEY}' already exists")
        return True
    resp.raise_for_status()
    print(f"[OK] Bucket '{APS_BUCKET_KEY}' created")
    return True


CHUNK_SIZE = 20 * 1024 * 1024  # 20 MB per chunk


def upload_file(token, file_path):
    """Step 5: Upload file BIM lên bucket via S3 signed URLs."""
    file_path = Path(file_path)
    if not file_path.exists():
        print(f"[ERROR] File not found: {file_path}")
        return None

    file_size = file_path.stat().st_size
    object_key = file_path.name
    total_parts = max(1, (file_size + CHUNK_SIZE - 1) // CHUNK_SIZE)

    print(f"[...] Uploading {object_key} ({file_size / 1024 / 1024:.1f} MB, {total_parts} part(s))...")

    # 1. Get signed upload URLs
    sign_url = f"{BASE_URL}/oss/v2/buckets/{APS_BUCKET_KEY}/objects/{object_key}/signeds3upload"
    params = {"parts": total_parts}
    headers = {"Authorization": f"Bearer {token}"}
    resp = requests.get(sign_url, headers=headers, params=params)
    resp.raise_for_status()
    sign_data = resp.json()
    upload_key = sign_data["uploadKey"]
    urls = sign_data["urls"]

    # 2. Upload each part to its signed URL
    with open(file_path, "rb") as f:
        for i, part_url in enumerate(urls):
            chunk = f.read(CHUNK_SIZE)
            part_resp = requests.put(part_url, data=chunk, headers={"Content-Type": "application/octet-stream"}, timeout=300)
            if part_resp.status_code not in (200, 201):
                print(f"[ERROR] Part {i+1}/{total_parts} failed: {part_resp.status_code} {part_resp.text[:200]}")
                return None
            pct = int((i + 1) * 100 / total_parts)
            print(f"      Part {i+1}/{total_parts} uploaded ({pct}%)")

    # 3. Complete the upload
    complete_url = f"{BASE_URL}/oss/v2/buckets/{APS_BUCKET_KEY}/objects/{object_key}/signeds3upload"
    complete_headers = {
        "Authorization": f"Bearer {token}",
        "Content-Type": "application/json",
    }
    complete_body = {"uploadKey": upload_key}
    resp = requests.post(complete_url, headers=complete_headers, json=complete_body)
    resp.raise_for_status()

    object_id = resp.json()["objectId"]
    urn = base64.urlsafe_b64encode(object_id.encode()).decode().rstrip("=")
    print(f"[OK] Uploaded: {object_key}")
    print(f"     Object ID: {object_id}")
    print(f"     URN (base64): {urn}")
    return urn


def translate_model(token, urn):
    """Step 6: Convert BIM sang SVF2 web format."""
    url = f"{BASE_URL}/modelderivative/v2/designdata/job"
    headers = {
        "Authorization": f"Bearer {token}",
        "Content-Type": "application/json",
    }
    body = {
        "input": {"urn": urn},
        "output": {
            "formats": [
                {"type": "svf2", "views": ["2d", "3d"]}
            ]
        },
    }
    resp = requests.post(url, headers=headers, json=body)
    resp.raise_for_status()
    print(f"[OK] Translation job started for URN: {urn}")
    return urn


def check_translation_status(token, urn):
    """Kiểm tra trạng thái convert."""
    url = f"{BASE_URL}/modelderivative/v2/designdata/{urn}/manifest"
    headers = {"Authorization": f"Bearer {token}"}
    resp = requests.get(url, headers=headers)
    resp.raise_for_status()
    result = resp.json()
    status = result.get("status", "unknown")
    progress = result.get("progress", "0%")
    return status, progress


def wait_for_translation(token, urn, timeout=600):
    """Đợi cho đến khi convert xong."""
    print("[...] Waiting for translation to complete...")
    start = time.time()
    while time.time() - start < timeout:
        status, progress = check_translation_status(token, urn)
        print(f"      Status: {status} | Progress: {progress}")
        if status == "success":
            print("[OK] Translation complete!")
            return True
        if status == "failed":
            print("[ERROR] Translation failed!")
            return False
        time.sleep(10)
    print("[ERROR] Translation timed out")
    return False


def save_viewer_config(urns, merge=False):
    """Lưu thông tin URN vào file JSON để viewer sử dụng."""
    config_path = Path(__file__).parent / "viewer_config.json"
    existing_models = []
    if merge and config_path.exists():
        with open(config_path, "r", encoding="utf-8") as f:
            existing_models = json.load(f).get("models", [])

    new_models = [
        {"name": Path(name).stem, "urn": urn}
        for name, urn in urns
    ]

    # Merge: update existing by name, add new ones
    existing_names = {m["name"]: i for i, m in enumerate(existing_models)}
    for m in new_models:
        if m["name"] in existing_names:
            existing_models[existing_names[m["name"]]] = m
        else:
            existing_models.append(m)

    config = {"models": existing_models}
    with open(config_path, "w", encoding="utf-8") as f:
        json.dump(config, f, indent=2, ensure_ascii=False)
    print(f"[OK] Viewer config saved to {config_path} ({len(existing_models)} models)")


def main():
    if not APS_CLIENT_ID or not APS_CLIENT_SECRET:
        print("[ERROR] Set APS_CLIENT_ID and APS_CLIENT_SECRET in .env file")
        sys.exit(1)

    # Find BIM files
    if len(sys.argv) > 1:
        bim_dir = Path(sys.argv[1])
        if not bim_dir.is_absolute():
            bim_dir = Path(__file__).parent / bim_dir
    else:
        bim_dir = Path(__file__).parent
    supported_exts = ["*.rvt", "*.ifc", "*.dwg", "*.dxf", "*.dwf", "*.dwfx"]
    bim_files = []
    for ext in supported_exts:
        bim_files.extend(bim_dir.glob(ext))
    if not bim_files:
        print("[ERROR] No supported files (.rvt/.ifc/.dwg/.dxf/.dwf) found in", bim_dir)
        sys.exit(1)

    print(f"Found {len(bim_files)} BIM file(s):")
    for f in bim_files:
        print(f"  - {f.name} ({f.stat().st_size / 1024 / 1024:.1f} MB)")
    print()

    # Load existing config to skip already uploaded files
    config_path = Path(__file__).parent / "viewer_config.json"
    existing_names = set()
    if config_path.exists():
        with open(config_path, "r", encoding="utf-8") as f:
            existing_names = {m["name"] for m in json.load(f).get("models", [])}

    new_files = [f for f in bim_files if f.stem not in existing_names]
    skipped = len(bim_files) - len(new_files)
    if skipped > 0:
        print(f"[SKIP] {skipped} file(s) already uploaded, skipping:")
        for f in bim_files:
            if f.stem in existing_names:
                print(f"  - {f.name}")
        print()

    if not new_files:
        print("[OK] All files already uploaded. Nothing to do.")
        sys.exit(0)

    print(f"Will upload {len(new_files)} new file(s):")
    for f in new_files:
        print(f"  - {f.name} ({f.stat().st_size / 1024 / 1024:.1f} MB)")
    print()

    # Step 3: Get token
    token = get_access_token()

    # Step 4: Create bucket
    create_bucket(token)

    # Step 5 & 6: Upload and translate each file
    urns = []
    for bim_file in new_files:
        print(f"\n{'='*60}")
        print(f"Processing: {bim_file.name}")
        print(f"{'='*60}")

        urn = upload_file(token, bim_file)
        if not urn:
            continue

        translate_model(token, urn)
        success = wait_for_translation(token, urn)
        if success:
            urns.append((bim_file.name, urn))

    if urns:
        save_viewer_config(urns, merge=len(sys.argv) > 1)
        print(f"\n{'='*60}")
        print("DONE! All models processed.")
        print(f"{'='*60}")
        print("\nOpen viewer.html in a browser to view models.")
        print("\nURNs for DTHub integration:")
        for name, urn in urns:
            print(f"  {name}: {urn}")
    else:
        print("\n[WARNING] No models were successfully processed.")


if __name__ == "__main__":
    main()
