﻿#!/usr/bin/env python3
from __future__ import annotations

import json
import os
import re
import subprocess
import urllib.error
import urllib.request
from datetime import datetime, timezone

CONFIG_PATH = "app/config/config.yml"
CHANGELOG_PATH = "changelog.md"
MAX_FILES_IN_ENTRY = 60
MAX_COMMITS_IN_ENTRY = 60
MAX_IMPACT_ITEMS = 4
MAX_FEATURE_ITEMS = 4
MAX_ANALYSIS_FILES = 50
MAX_PATCH_LINES_PER_FILE = 120
MAX_EXECUTIVE_ITEMS = 4
MAX_TECHNICAL_ITEMS = 6
MAX_USER_ITEMS = 6
MAX_VALIDATION_ITEMS = 8
MAX_RISK_ITEMS = 4
MAX_ROLLBACK_ITEMS = 3
MAX_INTEGRATION_COMMITS = 6
MAX_AI_CONTEXT_COMMITS = 20
MAX_AI_CONTEXT_FILES = 35
MAX_AI_BULLET_LEN = 200
AI_REQUEST_TIMEOUT_SECONDS = 45
DEFAULT_BUSINESS_CONTEXT_PATHS = (
    "changelog_business_context.md",
    "docs/changelog_business_context.md",
    "scripts/changelog_business_context.md",
)

BUCKET_PRIORITY = [
    "Configuracion del sistema",
    "Base de datos",
    "Flujos de negocio",
    "Logica de negocio y datos",
    "Pantallas y experiencia visual",
    "Comportamiento de interfaz",
    "Recursos y documentos",
    "Codigo de aplicacion",
    "Otros cambios",
]

STATUS_SYMBOL = {
    "A": "A",
    "M": "M",
    "D": "D",
    "R": "R",
    "C": "C",
    "T": "M",
}

CONDITION_KEYWORDS = (
    " where ",
    " if ",
    " elseif",
    "&&",
    "||",
    "==",
    "!=",
    "<>",
    " and ",
    " or ",
    " validar",
    " valid",
    " filtro",
    " nit",
    " estado",
    " activo",
    " coddirectorio",
)

FEATURE_COMMIT_KEYWORDS = (
    "feat",
    "feature",
    "nueva",
    "nuevo",
    "agrega",
    "agregar",
    "implementa",
    "habilita",
    "soporte",
    "crear",
)

MANAGEMENT_TECH_TOKENS = (
    " commit",
    "hash",
    "sha",
    "git ",
    "pull request",
    "pr #",
    ".php",
    ".py",
    ".yml",
    ".yaml",
    ".sql",
    ".twig",
    "/controller/",
    "/scripts/",
    "workflow",
    "changelog.md",
)

GENERIC_USER_PHRASES = (
    "modulo afectado",
    "area afectada",
    "flujo habitual",
    "flujo principal del area",
    "puede notar cambios en pantallas o interacciones",
    "el mayor cambio se concentra",
    "se recomienda validar una operacion completa",
    "sin diferencias operativas",
)

DIFF_HINT_PATTERNS = (
    ("agrego", ("playsoundsafe", "sonarok", "sonarnotok", "audok", "audnotok", "audio.play"), "sonidos de confirmacion (exito y error)"),
    ("modifico", ("normalizarhorareporte", "reporte_hora", "horareporte"), "lectura correcta de la hora programada del reporte"),
    ("corrigio", ("filter_validate_boolean", "filter_var(", "testparam", "resetparam"), "lectura correcta de opciones tipo si/no"),
    ("corrigio", ("fallidos", "intentos", "enviados"), "mensaje final mas claro con intentos, enviados y fallidos"),
    ("corrigio", ("reportes programados error", "tiposam", "guardarsam"), "registro separado entre ejecucion correcta y con error"),
    ("corrigio", ("no hay destinatarios", "destinatarios para el reporte"), "aviso claro cuando un reporte no tiene destinatarios"),
    ("modifico", (" where ", " join ", "select ", " from "), "forma de calcular resultados en consultas y reportes"),
    ("modifico", (" if ", " elseif", "validar", "filtro"), "reglas de control del proceso"),
)

METHOD_RE = re.compile(r"(?:public|protected|private)?\s*function\s+([A-Za-z_][A-Za-z0-9_]*)\s*\(")
MERGE_PR_RE = re.compile(r"^merge pull request #(\d+)\s+from\s+(.+)$", re.IGNORECASE)
MERGE_BRANCH_RE = re.compile(r"^merge branch\s+'?([^']+)'?(?:\s+into\s+'?([^']+)')?", re.IGNORECASE)
MERGE_REMOTE_RE = re.compile(r"^merge remote-tracking branch\s+'?([^']+)'?", re.IGNORECASE)
SQUASH_PR_RE = re.compile(r"\(#(\d+)\)")


def run_cmd(cmd: list[str], allow_fail: bool = False) -> str:
    result = subprocess.run(
        cmd,
        text=True,
        capture_output=True,
        encoding="utf-8",
        errors="replace",
    )
    if result.returncode != 0:
        if allow_fail:
            return ""
        stderr = (result.stderr or "").strip()
        raise RuntimeError(f"Command failed ({result.returncode}): {' '.join(cmd)}\n{stderr}")
    return (result.stdout or "").strip()


def git(*args: str, allow_fail: bool = False) -> str:
    return run_cmd(["git", *args], allow_fail=allow_fail)


def read_text(path: str) -> str:
    with open(path, "r", encoding="utf-8", errors="replace") as handle:
        return handle.read()


def write_text(path: str, content: str) -> None:
    with open(path, "w", encoding="utf-8") as handle:
        handle.write(content)


def normalize_spaces(text: str) -> str:
    return re.sub(r"\s+", " ", text).strip()


def shorten_fragment(text: str, max_len: int = 120) -> str:
    cleaned = normalize_spaces(text).replace("`", "'")
    if len(cleaned) > max_len:
        return cleaned[: max_len - 1] + "..."
    return cleaned


def sanitize_bullet(text: str, max_len: int = 220) -> str:
    cleaned = normalize_spaces(text)
    cleaned = re.sub(r"`([^`]+)`", r"\1", cleaned)
    if len(cleaned) > max_len:
        cleaned = cleaned[: max_len - 3].rstrip() + "..."
    return cleaned


def clean_yaml_value(raw_value: str) -> str:
    value = raw_value.split(" #", 1)[0].strip()
    if len(value) >= 2 and value[0] == value[-1] and value[0] in {'"', "'"}:
        value = value[1:-1]
    return value.strip()


def extract_version_from_config(config_text: str) -> tuple[str, str]:
    version_number = ""
    version_date = ""

    in_parameters_block = False
    parameters_indent = 0

    for raw_line in config_text.splitlines():
        line = raw_line.rstrip()
        stripped = line.strip()
        if not stripped or stripped.startswith("#"):
            continue

        indent = len(line) - len(line.lstrip(" "))

        if stripped == "parameters:":
            in_parameters_block = True
            parameters_indent = indent
            continue

        if in_parameters_block and indent <= parameters_indent and re.match(r"^[A-Za-z0-9_]+\s*:", stripped):
            break

        if not in_parameters_block:
            continue

        match = re.match(r"^(version_numero|version_fecha)\s*:\s*(.+?)\s*$", stripped)
        if not match:
            continue

        key = match.group(1)
        value = clean_yaml_value(match.group(2))

        if key == "version_numero":
            version_number = value
        elif key == "version_fecha":
            version_date = value

    if version_number and version_date:
        return version_number, version_date

    for raw_line in config_text.splitlines():
        stripped = raw_line.strip()
        if stripped.startswith("version_numero:") and "%version_numero%" not in stripped:
            version_number = clean_yaml_value(stripped.split(":", 1)[1])
        if stripped.startswith("version_fecha:") and "%version_fecha%" not in stripped:
            version_date = clean_yaml_value(stripped.split(":", 1)[1])

    return version_number, version_date


def short_sha(sha: str) -> str:
    return sha[:8] if sha else "inicio"


def human_bucket(filename: str) -> str:
    normalized = filename.lower().replace("\\", "/")

    if normalized == CONFIG_PATH.lower() or "/config/" in normalized or normalized.endswith((".yml", ".yaml", ".ini", ".env", ".xml", ".json")):
        return "Configuracion del sistema"
    if normalized.endswith(".sql"):
        return "Base de datos"
    if "/controller/" in normalized:
        return "Flujos de negocio"
    if "/entity/" in normalized or "/repository/" in normalized or "/clases/" in normalized:
        return "Logica de negocio y datos"
    if "/resources/views/" in normalized or normalized.endswith((".twig", ".html", ".css", ".scss", ".sass")):
        return "Pantallas y experiencia visual"
    if normalized.endswith((".js", ".ts", ".jsx", ".tsx", ".vue")):
        return "Comportamiento de interfaz"
    if normalized.startswith("web/") and normalized.endswith((".png", ".jpg", ".jpeg", ".gif", ".svg", ".ico", ".pdf", ".xlsx", ".csv", ".mp3", ".wav")):
        return "Recursos y documentos"
    if normalized.endswith(".php"):
        return "Codigo de aplicacion"

    return "Otros cambios"


def bucket_sort_key(name: str) -> tuple[int, int | str]:
    if name in BUCKET_PRIORITY:
        return (0, BUCKET_PRIORITY.index(name))
    return (1, name.lower())


def parse_name_status(line: str) -> tuple[str, str] | None:
    parts = line.split("\t")
    if len(parts) < 2:
        return None

    status_token = parts[0].strip()
    if not status_token:
        return None

    status_code = status_token[0].upper()
    if status_code == "R":
        if len(parts) < 3:
            return None
        filename = parts[2].strip()
    else:
        filename = parts[1].strip()

    if not filename:
        return None

    return status_code, filename


def list_config_history() -> list[str]:
    output = git("log", "--format=%H", "--", CONFIG_PATH, allow_fail=True)
    return [line.strip() for line in output.splitlines() if line.strip()]


def is_ancestor(ancestor_sha: str, descendant_sha: str) -> bool:
    result = subprocess.run(
        ["git", "merge-base", "--is-ancestor", ancestor_sha, descendant_sha],
        text=True,
        capture_output=True,
        encoding="utf-8",
        errors="replace",
    )
    return result.returncode == 0


def resolve_range(previous_cut_sha: str, end_sha: str) -> tuple[str, str]:
    if previous_cut_sha and is_ancestor(previous_cut_sha, end_sha):
        return previous_cut_sha, f"{previous_cut_sha}..{end_sha}"

    parent_sha = git("rev-parse", f"{end_sha}^", allow_fail=True)
    if parent_sha:
        return parent_sha, f"{parent_sha}..{end_sha}"

    return "", end_sha


def list_changed_files(start_sha: str, end_sha: str, range_spec: str) -> list[dict]:
    if start_sha:
        output = git("diff", "--name-status", "--find-renames", range_spec, allow_fail=True)
    else:
        output = git("show", "--name-status", "--pretty=format:", "--find-renames", end_sha, allow_fail=True)

    rows: list[dict] = []
    for line in output.splitlines():
        parsed = parse_name_status(line)
        if not parsed:
            continue

        status_code, filename = parsed
        normalized = filename.replace("\\", "/")
        normalized_lower = normalized.lower()

        if normalized_lower in {CHANGELOG_PATH.lower(), CONFIG_PATH.lower()}:
            continue

        bucket = human_bucket(normalized)
        rows.append(
            {
                "status": STATUS_SYMBOL.get(status_code, status_code),
                "filename": normalized,
                "bucket": bucket,
            }
        )

    rows.sort(key=lambda item: (bucket_sort_key(item["bucket"]), item["filename"]))
    return rows

def is_noise_line(line: str) -> bool:
    stripped = line.strip()
    if not stripped:
        return True
    if stripped in {"{", "}", "};", "(", ")", "[", "]"}:
        return True
    if stripped.startswith(("//", "#", "*", "/*", "*/", "use ", "namespace ")):
        return True
    return False


def is_textual_for_analysis(path: str) -> bool:
    normalized = path.lower().replace("\\", "/")
    if normalized == CONFIG_PATH.lower():
        return False

    _, _, ext = normalized.rpartition(".")
    if not ext:
        return False

    return f".{ext}" in {
        ".php",
        ".twig",
        ".yml",
        ".yaml",
        ".sql",
        ".js",
        ".ts",
        ".json",
        ".xml",
        ".ini",
        ".txt",
        ".md",
        ".html",
        ".css",
    }

def collect_patch(start_sha: str, end_sha: str, range_spec: str) -> str:
    if start_sha:
        return git("diff", "--unified=0", "--find-renames", range_spec, allow_fail=True)
    return git("show", "--pretty=format:", "--unified=0", "--find-renames", end_sha, allow_fail=True)


def parse_patch_by_file(patch_text: str) -> list[dict]:
    diffs: list[dict] = []
    current: dict | None = None

    for raw_line in patch_text.splitlines():
        line = raw_line.rstrip("\n")

        if line.startswith("diff --git "):
            if current and current.get("analyze") and current.get("file"):
                if current.get("added") or current.get("removed"):
                    diffs.append(current)

            match = re.match(r"^diff --git a/(.+?) b/(.+)$", line)
            file_path = match.group(2) if match else ""
            current = {
                "file": file_path,
                "analyze": is_textual_for_analysis(file_path),
                "added": [],
                "removed": [],
            }
            continue

        if current is None:
            continue

        if line.startswith("+++ b/"):
            file_path = line[6:].strip()
            current["file"] = file_path
            current["analyze"] = is_textual_for_analysis(file_path)
            continue

        if not current.get("analyze"):
            continue

        if line.startswith("+") and not line.startswith("+++"):
            value = line[1:].strip()
            if value and len(current["added"]) < MAX_PATCH_LINES_PER_FILE:
                current["added"].append(value)
        elif line.startswith("-") and not line.startswith("---"):
            value = line[1:].strip()
            if value and len(current["removed"]) < MAX_PATCH_LINES_PER_FILE:
                current["removed"].append(value)

    if current and current.get("analyze") and current.get("file"):
        if current.get("added") or current.get("removed"):
            diffs.append(current)

    return diffs[:MAX_ANALYSIS_FILES]


def line_has_condition_signal(line: str) -> bool:
    wrapped = f" {line.lower()} "
    return any(token in wrapped for token in CONDITION_KEYWORDS)


def first_condition_line(lines: list[str]) -> str:
    for line in lines:
        if is_noise_line(line):
            continue
        if line_has_condition_signal(line):
            return line
    return ""


def first_sql_line(lines: list[str]) -> str:
    for line in lines:
        if is_noise_line(line):
            continue
        lowered = line.lower()
        if "select " in lowered or " from " in lowered or " where " in lowered or " join " in lowered:
            return line
    return ""


def extract_method_names(lines: list[str]) -> set[str]:
    methods: set[str] = set()
    for line in lines:
        match = METHOD_RE.search(line)
        if not match:
            continue
        name = match.group(1)
        if name:
            methods.add(name)
    return methods


def subject_is_version_update(subject: str) -> bool:
    lowered = subject.lower()
    if lowered.startswith("docs(changelog):"):
        return True
    return "version_numero" in lowered or "version_fecha" in lowered


def list_commits(start_sha: str, end_sha: str, range_spec: str) -> dict[str, list[dict]]:
    pretty = "%H%x1f%P%x1f%an%x1f%ae%x1f%ad%x1f%s"

    if start_sha:
        output = git(
            "log",
            "--reverse",
            "--date=format:%Y-%m-%d %H:%M",
            f"--pretty=format:{pretty}",
            range_spec,
            allow_fail=True,
        )
    else:
        output = git(
            "log",
            "--reverse",
            "-1",
            "--date=format:%Y-%m-%d %H:%M",
            f"--pretty=format:{pretty}",
            end_sha,
            allow_fail=True,
        )

    rows: list[dict] = []
    for line in output.splitlines():
        parts = line.split("\x1f")
        if len(parts) != 6:
            continue

        sha, parent_text, author, email, date_text, subject = [part.strip() for part in parts]
        if not subject:
            continue
        if subject_is_version_update(subject):
            continue

        if "changelog-bot" in email.lower():
            continue

        parents = [item for item in parent_text.split() if item]
        lowered = subject.lower()
        is_merge = (
            len(parents) > 1
            or lowered.startswith("merge pull request")
            or lowered.startswith("merge branch")
            or lowered.startswith("merge remote-tracking branch")
        )

        rows.append(
            {
                "sha": sha,
                "parents": parents,
                "author": author,
                "email": email,
                "date": date_text,
                "subject": subject,
                "is_merge": is_merge,
            }
        )

    non_merge = [row for row in rows if not row["is_merge"]]
    filtered = non_merge if non_merge else rows

    deduped: list[dict] = []
    seen_subjects: set[str] = set()

    for row in filtered:
        key = row["subject"].lower()
        if key in seen_subjects:
            continue
        seen_subjects.add(key)
        deduped.append(row)

    return {"all": rows, "display": deduped}


def normalize_branch_name(raw_branch: str) -> str:
    branch = normalize_spaces(raw_branch.strip().strip("'\""))
    branch = branch.replace("\\", "/")
    branch = re.sub(r"^refs/heads/", "", branch, flags=re.IGNORECASE)
    branch = re.sub(r"^origin/", "", branch, flags=re.IGNORECASE)
    branch = branch.rstrip(".,)")
    return branch


def extract_pr_numbers(subject: str) -> set[str]:
    numbers: set[str] = set()

    merge_match = MERGE_PR_RE.match(subject.strip())
    if merge_match:
        numbers.add(merge_match.group(1))

    for match in SQUASH_PR_RE.findall(subject):
        numbers.add(match)

    return numbers


def parse_integration_hints(subject: str) -> dict:
    hints = {
        "kind": "",
        "pr_numbers": set(),
        "source_branches": set(),
        "target_branches": set(),
    }

    text = subject.strip()

    match_pr = MERGE_PR_RE.match(text)
    if match_pr:
        hints["kind"] = "pull_request"
        hints["pr_numbers"].add(match_pr.group(1))
        source = normalize_branch_name(match_pr.group(2))
        if source:
            hints["source_branches"].add(source)
        return hints

    match_branch = MERGE_BRANCH_RE.match(text)
    if match_branch:
        hints["kind"] = "branch_merge"
        source = normalize_branch_name(match_branch.group(1))
        target = normalize_branch_name(match_branch.group(2) or "")
        if source:
            hints["source_branches"].add(source)
        if target:
            hints["target_branches"].add(target)
        hints["pr_numbers"].update(extract_pr_numbers(text))
        return hints

    match_remote = MERGE_REMOTE_RE.match(text)
    if match_remote:
        hints["kind"] = "remote_tracking_merge"
        source = normalize_branch_name(match_remote.group(1))
        if source:
            hints["source_branches"].add(source)

    hints["pr_numbers"].update(extract_pr_numbers(text))
    return hints


def detect_integration_trace(all_commits: list[dict]) -> dict:
    pr_numbers: set[str] = set()
    source_branches: set[str] = set()
    target_branches: set[str] = set()
    merge_commits: list[dict] = []

    for row in all_commits:
        hints = parse_integration_hints(row["subject"])
        pr_numbers.update(hints["pr_numbers"])
        source_branches.update(hints["source_branches"])
        target_branches.update(hints["target_branches"])

        if row.get("is_merge"):
            merge_commits.append(row)

    if pr_numbers:
        integration_type = "Merge de Pull Request"
    elif merge_commits or source_branches:
        integration_type = "Merge de rama"
    else:
        integration_type = "Commits directos"

    pr_sorted = sorted(pr_numbers, key=lambda raw: (0, int(raw)) if raw.isdigit() else (1, raw))

    return {
        "integration_type": integration_type,
        "pr_numbers": pr_sorted,
        "source_branches": sorted(source_branches),
        "target_branches": sorted(target_branches),
        "merge_commits": merge_commits,
        "merge_count": len(merge_commits),
        "total_commits": len(all_commits),
    }


def build_bucket_map(files: list[dict]) -> dict[str, list[str]]:
    bucket_map: dict[str, list[str]] = {}
    for row in files:
        bucket_map.setdefault(row["bucket"], []).append(row["filename"])
    return bucket_map


def add_unique(lines: list[str], text: str) -> None:
    if text not in lines:
        lines.append(text)


def build_impact_insights(files: list[dict], commits: list[dict], patch_diffs: list[dict]) -> list[str]:
    insights: list[str] = []
    bucket_map = build_bucket_map(filter_functional_files(files) or files)

    if bucket_map:
        counts = sorted(((name, len(paths)) for name, paths in bucket_map.items()), key=lambda item: item[1], reverse=True)
        if len(counts) >= 2:
            add_unique(
                insights,
                f"El mayor impacto cae en {counts[0][0]} ({counts[0][1]} archivos) y {counts[1][0]} ({counts[1][1]} archivos).",
            )
        else:
            add_unique(insights, f"El impacto principal cae en {counts[0][0]} ({counts[0][1]} archivos).")

    if any(row["filename"] == CONFIG_PATH for row in files):
        add_unique(
            insights,
            "Se actualizo la version en app/config/config.yml; este corte consolida todos los cambios acumulados desde el corte anterior.",
        )

    for diff in patch_diffs:
        if len(insights) >= MAX_IMPACT_ITEMS:
            break

        removed_cond = first_condition_line(diff.get("removed", []))
        added_cond = first_condition_line(diff.get("added", []))

        if not removed_cond or not added_cond:
            continue

        if normalize_spaces(removed_cond).lower() == normalize_spaces(added_cond).lower():
            continue

        old_fragment = shorten_fragment(removed_cond)
        new_fragment = shorten_fragment(added_cond)
        add_unique(
            insights,
            (
                f"En `{diff['file']}` se ajusto una regla de validacion/filtro: "
                f"`{old_fragment}` -> `{new_fragment}`. Esto cambia que casos pasan o se bloquean."
            ),
        )

    if len(insights) < MAX_IMPACT_ITEMS:
        for diff in patch_diffs:
            if len(insights) >= MAX_IMPACT_ITEMS:
                break
            removed_sql = first_sql_line(diff.get("removed", []))
            added_sql = first_sql_line(diff.get("added", []))
            if not removed_sql or not added_sql:
                continue
            if normalize_spaces(removed_sql).lower() == normalize_spaces(added_sql).lower():
                continue

            add_unique(
                insights,
                (
                    f"En `{diff['file']}` hubo ajuste de consulta de datos: "
                    f"`{shorten_fragment(removed_sql)}` -> `{shorten_fragment(added_sql)}`. "
                    "Esto puede cambiar resultados, validaciones o conciliaciones."
                ),
            )

    interface_files = [
        row["filename"] for row in files if "/interfaces/" in row["filename"].lower().replace("\\", "/")
    ]
    if interface_files and len(insights) < MAX_IMPACT_ITEMS:
        add_unique(
            insights,
            f"Se modifico el contrato tecnico `{interface_files[0]}`; revisar consistencia en modulos que lo implementan.",
        )

    if not insights:
        add_unique(
            insights,
            "No se detecta un cambio funcional critico en el diff; el corte parece centrado en mantenimiento o refactor.",
        )

    return insights[:MAX_IMPACT_ITEMS]


def build_feature_highlights(files: list[dict], commits: list[dict], patch_diffs: list[dict]) -> list[str]:
    highlights: list[str] = []
    signals = detect_change_signals(files, patch_diffs)

    if signals["internal_only"]:
        return [
            "No se detecta funcionalidad nueva para usuarios finales; el corte corresponde a ajustes internos del proceso de liberacion."
        ]

    for commit in commits:
        subject = commit["subject"]
        lowered = subject.lower()
        if any(keyword in lowered for keyword in FEATURE_COMMIT_KEYWORDS):
            add_unique(highlights, f"Commit con foco funcional: {subject}.")
        if len(highlights) >= MAX_FEATURE_ITEMS:
            break

    if len(highlights) < MAX_FEATURE_ITEMS:
        for diff in patch_diffs:
            lowered_path = diff["file"].lower().replace("\\", "/")
            if not lowered_path.endswith(".php"):
                continue

            removed_methods = extract_method_names(diff.get("removed", []))
            added_methods = extract_method_names(diff.get("added", []))

            fresh_methods = [name for name in sorted(added_methods) if name not in removed_methods and name != "__construct"]
            if not fresh_methods:
                continue

            method_name = fresh_methods[0]
            if "/controller/" in lowered_path:
                text = f"Posible nuevo flujo en `{diff['file']}`: se detecta la funcion `{method_name}()` en el diff."
            else:
                text = f"Posible capacidad nueva en `{diff['file']}`: se detecta la funcion `{method_name}()` en el diff."

            add_unique(highlights, text)
            if len(highlights) >= MAX_FEATURE_ITEMS:
                break

    if len(highlights) < MAX_FEATURE_ITEMS:
        new_files = [
            row
            for row in files
            if row["status"] == "A"
            and row["filename"] != CHANGELOG_PATH
            and not is_internal_path(row["filename"])
        ]
        if new_files:
            add_unique(
                highlights,
                f"Se agregaron {len(new_files)} archivos funcionales nuevos en este corte, lo que sugiere ampliacion de capacidades visibles.",
            )

    if not highlights:
        highlights.append(
            "No se observa una funcionalidad completamente nueva; predominan mejoras o correcciones sobre funciones existentes."
        )

    return highlights[:MAX_FEATURE_ITEMS]
def to_executive_text(text: str) -> str:
    cleaned = sanitize_bullet(text, max_len=190)
    cleaned = re.sub(r"`[^`]+`", "un modulo", cleaned)

    replacements = (
        ("Commit con foco funcional:", "Se incorporo una mejora funcional:"),
        ("Posible nuevo flujo en", "Se reforzo un proceso en"),
        ("Posible capacidad nueva en", "Se agrego capacidad en"),
        ("se detecta la funcion", "con nueva logica"),
        ("validacion/filtro", "regla de negocio"),
        ("Esto cambia que casos pasan o se bloquean.", "Esto puede cambiar el comportamiento en casos puntuales."),
        (
            "Esto puede cambiar resultados, validaciones o conciliaciones.",
            "Esto puede impactar reportes y resultados operativos.",
        ),
        (
            "No se observa una funcionalidad completamente nueva; predominan mejoras o correcciones sobre funciones existentes.",
            "El corte prioriza correcciones y mejoras sobre funciones existentes.",
        ),
    )

    for source, target in replacements:
        cleaned = cleaned.replace(source, target)

    return cleaned


def normalize_path(path: str) -> str:
    return path.lower().replace("\\", "/")


def is_internal_path(filename: str) -> bool:
    normalized = normalize_path(filename)
    return (
        normalized == CONFIG_PATH.lower()
        or normalized.startswith(".github/workflows/")
        or normalized.startswith("scripts/")
        or normalized.endswith(".md")
        or normalized in {"readme.md", ".gitignore"}
    )


def build_internal_release_notes_summary(files: list[dict], commits: list[dict]) -> list[str]:
    normalized_files = [normalize_path(row.get("filename", "")) for row in files if row.get("filename")]
    unique_files = list(dict.fromkeys(normalized_files))
    lines: list[str] = []

    if "scripts/update_changelog.py" in unique_files:
        add_unique(lines, "Se mejoro la generacion automatica del changelog para describir cambios de forma mas clara y concreta.")

    if any(path.endswith("changelog_business_context.md") for path in unique_files):
        add_unique(lines, "Se actualizaron reglas de redaccion para explicar mejor que se agrego, que se modifico y que se corrigio.")

    if ".github/workflows/changelog.yml" in unique_files:
        add_unique(lines, "Se ajusto el flujo automatico que crea y publica el changelog en cada corte.")

    if CONFIG_PATH.lower() in unique_files:
        add_unique(lines, "Se actualizo el numero y la fecha de version del corte liberado.")

    if not lines:
        for row in commits:
            subject = row.get("subject", "")
            if "changelog" in subject.lower():
                add_unique(lines, "Se hicieron mejoras internas en la calidad del resumen de cambios.")
                break

    if not lines:
        lines.append("Se aplicaron ajustes internos del proceso de liberacion y documentacion.")

    return [sanitize_bullet(item, max_len=200) for item in lines[:4]]


def filter_functional_files(files: list[dict]) -> list[dict]:
    return [row for row in files if row.get("filename") and not is_internal_path(row["filename"])]


def humanize_identifier(raw: str) -> str:
    text = raw.strip()
    if not text:
        return ""

    text = text.replace("\\", "/")
    text = text.split("/")[-1]
    text = re.sub(r"\.(php|twig|html|js|ts|css|scss|sass|sql|yml|yaml|json|md)$", "", text, flags=re.IGNORECASE)
    text = re.sub(r"(controller|repository|entity|type)$", "", text, flags=re.IGNORECASE)
    text = re.sub(r"[_\-]+", " ", text)
    text = re.sub(r"(?<=[a-z0-9])(?=[A-Z])", " ", text)
    text = normalize_spaces(text)

    if not text:
        return ""

    words = []
    for word in text.split():
        words.append(word.upper() if word.isupper() else word.capitalize())

    return " ".join(words)


def module_from_filename(filename: str) -> str:
    raw = filename.replace("\\", "/")
    normalized = raw.lower()

    view_match = re.search(r"/resources/views/([^/]+)", raw, flags=re.IGNORECASE)
    if view_match:
        label = humanize_identifier(view_match.group(1))
        if label:
            return label

    if "/controller/" in normalized:
        label = humanize_identifier(raw.rsplit("/", 1)[-1])
        if label:
            return label

    if "/clases/" in normalized:
        label = humanize_identifier(raw.rsplit("/", 1)[-1])
        if label:
            return label

    fallback = humanize_identifier(raw.rsplit("/", 1)[-1])
    return fallback or "Modulo principal"


def pick_module_name(files: list[dict], predicate=None) -> str:
    for source in (filter_functional_files(files), files):
        for row in source:
            filename = row.get("filename", "")
            if not filename:
                continue
            normalized = normalize_path(filename)
            if predicate and not predicate(normalized):
                continue
            label = module_from_filename(filename)
            if label:
                return label

    return "modulo principal"


def join_human_list(items: list[str], limit: int = 2) -> str:
    selected: list[str] = []
    for raw_item in items:
        cleaned = normalize_spaces(raw_item).strip(" .")
        if not cleaned:
            continue
        if cleaned in selected:
            continue
        selected.append(cleaned)
        if len(selected) >= limit:
            break

    if not selected:
        return ""
    if len(selected) == 1:
        return selected[0]
    return ", ".join(selected[:-1]) + " y " + selected[-1]


def classify_commit_kind(subject: str) -> str:
    lowered = subject.lower()
    if any(token in lowered for token in ("feat", "nueva", "nuevo", "agrega", "agregar", "implementa", "add ")):
        return "agrego"
    if any(token in lowered for token in ("fix", "corr", "error", "bug", "hotfix", "revert")):
        return "corrigio"
    return "modifico"


def commit_subject_for_people(subject: str) -> str:
    text = normalize_spaces(subject)
    text = re.sub(r"^(feat|fix|chore|refactor|docs|style|test)(\([^)]+\))?:\s*", "", text, flags=re.IGNORECASE)

    lowered = text.lower().replace("_", " ")
    replacements = (
        ("error handling", "manejo de errores"),
        ("error reporting", "mensajes de error"),
        ("improve", "mejora"),
        ("enhance", "mejora"),
        ("normalize", "normalizar"),
        ("handling", "manejo"),
        ("report hour", "hora de reporte"),
        ("closing bracket", "cierre de parentesis"),
        ("remove unnecessary", "eliminar elemento innecesario"),
        (" and ", " y "),
        ("ajax", "ajax"),
    )
    for source, target in replacements:
        lowered = lowered.replace(source, target)

    lowered = re.sub(r"[():]+", " ", lowered)
    lowered = normalize_spaces(lowered)
    return shorten_fragment(lowered.strip(" ."), max_len=140)


def infer_method_topic(method_name: str) -> str:
    lowered = method_name.lower()
    if "normalizar" in lowered and "hora" in lowered:
        return "normalizacion de hora"
    if "sonar" in lowered or "audio" in lowered or "play" in lowered:
        return "alertas sonoras"
    if "reporte" in lowered:
        return "proceso de reportes"
    if "valid" in lowered:
        return "validaciones adicionales"

    label = humanize_identifier(method_name).lower()
    if not label:
        return ""
    return f"nuevo proceso {label}"


def detect_patch_flags(patch_diffs: list[dict]) -> dict[str, bool]:
    flags = {
        "has_audio_feedback": False,
        "has_report_hour_normalization": False,
        "has_boolean_param_normalization": False,
        "has_error_count_message": False,
        "has_sam_log_split": False,
        "has_no_recipients_log": False,
    }

    for diff in patch_diffs:
        file_path = diff.get("file", "")
        if not file_path or is_internal_path(file_path):
            continue

        content = " " + " ".join(diff.get("added", []) + diff.get("removed", [])).lower() + " "

        if any(token in content for token in ("playsoundsafe", "sonarok", "sonarnotok", "audok", "audnotok", "audio.play")):
            flags["has_audio_feedback"] = True
        if any(token in content for token in ("normalizarhorareporte", "reporte_hora", "horareporte")):
            flags["has_report_hour_normalization"] = True
        if any(token in content for token in ("filter_validate_boolean", "filter_var(", "testparam", "resetparam")):
            flags["has_boolean_param_normalization"] = True
        if all(token in content for token in ("intentos", "enviados", "fallidos")):
            flags["has_error_count_message"] = True
        if any(token in content for token in ("guardarsam", "tiposam", "reportes programados error")):
            flags["has_sam_log_split"] = True
        if any(token in content for token in ("no hay destinatarios", "destinatarios para el reporte")):
            flags["has_no_recipients_log"] = True

    return flags


def collect_operational_evidence(files: list[dict], commits: list[dict], patch_diffs: list[dict]) -> dict[str, list[str]]:
    evidence = {
        "agrego": [],
        "modifico": [],
        "corrigio": [],
    }

    for diff in patch_diffs:
        filename = diff.get("file", "")
        if not filename or is_internal_path(filename):
            continue

        module = module_from_filename(filename)
        combined = " " + " ".join(diff.get("added", []) + diff.get("removed", [])).lower() + " "

        for kind, keywords, phrase in DIFF_HINT_PATTERNS:
            if any(keyword in combined for keyword in keywords):
                add_unique(evidence[kind], f"{phrase} en {module}")

        removed_cond = first_condition_line(diff.get("removed", []))
        added_cond = first_condition_line(diff.get("added", []))
        if removed_cond and added_cond and normalize_spaces(removed_cond).lower() != normalize_spaces(added_cond).lower():
            add_unique(evidence["modifico"], f"reglas de validacion en {module}")

        removed_methods = extract_method_names(diff.get("removed", []))
        added_methods = extract_method_names(diff.get("added", []))
        fresh_methods = [name for name in sorted(added_methods) if name not in removed_methods and name != "__construct"]
        for method in fresh_methods[:1]:
            topic = infer_method_topic(method)
            if topic:
                add_unique(evidence["agrego"], f"{topic} en {module}")

    new_functional_files = [
        row
        for row in files
        if row.get("status") == "A" and row.get("filename") and not is_internal_path(row["filename"])
    ]
    for row in new_functional_files[:2]:
        module = module_from_filename(row["filename"])
        add_unique(evidence["agrego"], f"nuevo archivo funcional en {module}")

    for commit in commits:
        subject = commit.get("subject", "")
        if not subject:
            continue
        bucket = classify_commit_kind(subject)
        if len(evidence[bucket]) >= 2:
            continue

        item = commit_subject_for_people(subject)
        if not item:
            continue
        if any(token in item for token in ("merge", "pull request", "branch")):
            continue

        add_unique(evidence[bucket], item)

    return evidence


def build_structured_change_lines(files: list[dict], commits: list[dict], patch_diffs: list[dict]) -> list[str]:
    evidence = collect_operational_evidence(files, commits, patch_diffs)
    lines: list[str] = []

    labels = {
        "agrego": "Se agrego",
        "modifico": "Se modifico",
        "corrigio": "Se corrigio",
    }

    for key in ("agrego", "modifico", "corrigio"):
        detail = join_human_list(evidence.get(key, []), limit=2)
        if detail:
            add_unique(lines, f"{labels[key]}: {detail}.")

    return [sanitize_bullet(item, max_len=200) for item in lines[:3]]


def extract_main_bucket(files: list[dict]) -> str:
    if not files:
        return "Sin cambios"

    source_files = filter_functional_files(files) or files

    bucket_counts: dict[str, int] = {}
    for row in source_files:
        bucket = row.get("bucket", "Otros cambios")
        bucket_counts[bucket] = bucket_counts.get(bucket, 0) + 1

    ordered = sorted(bucket_counts.items(), key=lambda item: (-item[1], bucket_sort_key(item[0])))
    return ordered[0][0] if ordered else "Sin cambios"


def pick_first_file(files: list[dict], predicate) -> str:
    for source in (filter_functional_files(files), files):
        for row in source:
            filename = row.get("filename", "")
            if not filename:
                continue
            if predicate(normalize_path(filename)):
                return filename

    if files:
        return files[0].get("filename", "modulo afectado")
    return "modulo afectado"


def detect_change_signals(files: list[dict], patch_diffs: list[dict]) -> dict:
    signals = {
        "internal_only": True,
        "has_ui": False,
        "has_controller": False,
        "has_reports": False,
        "has_repository_or_data": False,
        "has_migration": False,
        "has_config": False,
        "has_ci": False,
        "has_scripts": False,
        "has_docs": False,
        "has_condition_change": False,
        "has_sql_change": False,
        "sql_files": [],
        "condition_files": [],
        "logic_file_count": 0,
        "main_bucket": extract_main_bucket(files),
    }

    for row in files:
        filename = row.get("filename", "")
        normalized = normalize_path(filename)

        if not is_internal_path(normalized):
            signals["internal_only"] = False

        if normalized == CONFIG_PATH.lower():
            signals["has_config"] = True

        if normalized.startswith(".github/workflows/"):
            signals["has_ci"] = True

        if normalized.startswith("scripts/"):
            signals["has_scripts"] = True

        if normalized.endswith(".md"):
            signals["has_docs"] = True

        if "/controller/" in normalized:
            signals["has_controller"] = True

        if "/reportes/" in normalized or "reporte" in normalized or "report" in normalized:
            signals["has_reports"] = True

        if "/resources/views/" in normalized or normalized.endswith((".twig", ".html", ".css", ".scss", ".sass", ".js", ".ts", ".jsx", ".tsx", ".vue")):
            signals["has_ui"] = True

        if "/repository/" in normalized or normalized.endswith(".sql"):
            signals["has_repository_or_data"] = True

        if "/clases/" in normalized or "/entity/" in normalized or "/repository/" in normalized:
            signals["logic_file_count"] += 1

        if "migration" in normalized or "/migrations/" in normalized or normalized.endswith(".migration.php"):
            signals["has_migration"] = True

        if normalized.endswith(".sql"):
            signals["has_sql_change"] = True
            if filename not in signals["sql_files"]:
                signals["sql_files"].append(filename)

    for diff in patch_diffs:
        removed_cond = first_condition_line(diff.get("removed", []))
        added_cond = first_condition_line(diff.get("added", []))
        if removed_cond and added_cond:
            if normalize_spaces(removed_cond).lower() != normalize_spaces(added_cond).lower():
                signals["has_condition_change"] = True
                if diff.get("file") and diff["file"] not in signals["condition_files"]:
                    signals["condition_files"].append(diff["file"])

        removed_sql = first_sql_line(diff.get("removed", []))
        added_sql = first_sql_line(diff.get("added", []))
        if removed_sql and added_sql:
            if normalize_spaces(removed_sql).lower() != normalize_spaces(added_sql).lower():
                signals["has_sql_change"] = True
                if diff.get("file") and diff["file"] not in signals["sql_files"]:
                    signals["sql_files"].append(diff["file"])

    return signals


def build_user_visible_summary(
    files: list[dict],
    commits: list[dict],
    patch_diffs: list[dict],
    integration_trace: dict,
) -> list[str]:
    signals = detect_change_signals(files, patch_diffs)
    flags = detect_patch_flags(patch_diffs)
    lines: list[str] = []

    if signals["internal_only"]:
        lines.extend(build_internal_release_notes_summary(files, commits))
        add_unique(lines, "No hay cambios en pantallas ni en pasos operativos del sistema para este corte.")
        add_unique(lines, "Este ajuste mejora la comunicacion del release para que el equipo entienda mejor el alcance.")
        return [sanitize_bullet(item, max_len=200) for item in lines[:MAX_USER_ITEMS]]

    for item in build_structured_change_lines(files, commits, patch_diffs):
        add_unique(lines, item)

    main_module = pick_module_name(files)
    ui_module = pick_module_name(files, lambda path: "/resources/views/" in path or path.endswith((".twig", ".html", ".js", ".ts")))

    if flags["has_audio_feedback"]:
        add_unique(lines, f"En {ui_module}, ahora escuchara un sonido cuando la accion salga bien y otro cuando falle.")

    if flags["has_report_hour_normalization"]:
        add_unique(lines, f"En {main_module}, la hora de los reportes ahora se interpreta de forma consistente.")

    if flags["has_error_count_message"]:
        add_unique(lines, "Cuando falle un envio de reportes, el mensaje final mostrara intentos, enviados y fallidos.")

    if flags["has_boolean_param_normalization"]:
        add_unique(lines, "Las opciones tipo si/no ahora se interpretan correctamente, evitando respuestas inesperadas.")

    if flags["has_no_recipients_log"]:
        add_unique(lines, "Si un reporte no tiene destinatarios, el sistema lo indicara de forma clara.")

    add_unique(lines, f"Area con mayor impacto operativo: {main_module}.")

    if signals["has_ui"]:
        add_unique(lines, f"Pantalla a revisar primero: {ui_module}.")

    while len(lines) < 4:
        add_unique(lines, f"Validar una operacion completa en {main_module} para confirmar resultados esperados.")

    return [sanitize_bullet(item, max_len=200) for item in lines[:MAX_USER_ITEMS]]


def build_quick_validation(
    files: list[dict],
    commits: list[dict],
    patch_diffs: list[dict],
    integration_trace: dict,
) -> list[str]:
    signals = detect_change_signals(files, patch_diffs)
    flags = detect_patch_flags(patch_diffs)
    checks: list[str] = []

    if signals["internal_only"]:
        internal_notes = build_internal_release_notes_summary(files, commits)
        has_changelog_improvement = any("changelog" in normalize_spaces(item).lower() for item in internal_notes)

        checks = [
            "Confirmar que el pipeline del release termino sin errores.",
            "Verificar que changelog.md se genero para el corte actual.",
            "Validar que version_numero y version_fecha en config.yml coinciden con el despliegue.",
            "Abrir el sistema y confirmar acceso normal sin alertas nuevas.",
        ]

        if has_changelog_improvement:
            checks.insert(2, "Revisar la pantalla de changelog y confirmar que el texto explica claramente que se agrego, modifico y corrigio.")

        return [sanitize_bullet(item, max_len=200) for item in checks[:MAX_VALIDATION_ITEMS]]

    ui_module = pick_module_name(files, lambda path: "/resources/views/" in path or path.endswith((".twig", ".html", ".js", ".ts")))
    flow_module = pick_module_name(files, lambda path: "/controller/" in path or "/clases/" in path)

    if signals["has_ui"]:
        add_unique(checks, f"Abrir {ui_module} y validar carga, botones y mensajes sin errores visuales.")

    if flags["has_audio_feedback"]:
        add_unique(checks, f"En {ui_module}, confirmar sonido de exito cuando la accion termina bien y sonido de alerta cuando falla.")

    if signals["has_controller"]:
        add_unique(checks, f"Ejecutar en {flow_module} un caso exitoso y uno fallido para validar respuesta final al usuario.")

    if signals["has_reports"]:
        add_unique(checks, f"Generar un reporte real en {flow_module} y validar hora, detalle y contenido del resultado.")

    if flags["has_report_hour_normalization"] or flags["has_error_count_message"]:
        add_unique(checks, "Verificar que el resumen final muestre intentos, enviados y fallidos con hora consistente.")

    if flags["has_no_recipients_log"]:
        add_unique(checks, "Probar un reporte sin destinatarios y confirmar que queda registro claro en bitacora.")

    if signals["has_condition_change"]:
        add_unique(checks, "Probar un caso que antes pasaba y otro que antes se bloqueaba para validar reglas actualizadas.")

    add_unique(checks, "Revisar bitacora de errores despues de las pruebas para confirmar que no hay excepciones nuevas.")
    add_unique(checks, "Confirmar que la version mostrada en el sistema corresponde al corte liberado.")

    while len(checks) < 4:
        add_unique(checks, f"Validar filtros y busqueda basica en {flow_module}.")

    return [sanitize_bullet(item, max_len=200) for item in checks[:MAX_VALIDATION_ITEMS]]


def classify_risk_level(signals: dict, files: list[dict]) -> str:
    if signals["internal_only"]:
        return "Bajo"

    high_by_scope = len(files) >= 18 and signals["logic_file_count"] >= 8
    high_by_logic = signals["has_migration"] or (
        signals["has_sql_change"] and signals["has_condition_change"] and (signals["has_controller"] or signals["logic_file_count"] >= 6)
    )

    if high_by_scope or high_by_logic:
        return "Alto"

    if signals["has_sql_change"] or signals["has_condition_change"] or signals["has_controller"] or signals["has_repository_or_data"]:
        return "Medio"

    return "Bajo"


def build_risks_and_symptoms(
    files: list[dict],
    commits: list[dict],
    patch_diffs: list[dict],
    integration_trace: dict,
) -> list[str]:
    signals = detect_change_signals(files, patch_diffs)
    risk_level = classify_risk_level(signals, files)
    lines: list[str] = []

    if signals["internal_only"]:
        lines.append(
            "Riesgo Bajo: puede existir confusion si el equipo no revisa el nuevo texto del changelog. Sintoma esperado: dudas sobre el alcance del corte."
        )
        lines.append(
            "Riesgo Bajo: posible desalineacion de version publicada. Sintoma esperado: numero/fecha de version no coinciden con lo liberado."
        )
        return [sanitize_bullet(item, max_len=200) for item in lines[:MAX_RISK_ITEMS]]

    main_module = pick_module_name(files)
    lines.append(
        f"Riesgo {risk_level}: cambios en {main_module}. Sintoma esperado: variacion en pasos habituales del proceso. Donde mirar primero: modulo {main_module}."
    )

    if signals["has_sql_change"]:
        sql_module = pick_module_name(files, lambda path: path.endswith(".sql") or "/repository/" in path or "/reportes/" in path)
        sql_level = "Alto" if risk_level == "Alto" else "Medio"
        lines.append(
            f"Riesgo {sql_level}: pueden variar totales o resultados de consulta. Sintoma esperado: diferencias en conciliaciones. Donde mirar primero: reportes de {sql_module}."
        )

    if signals["has_condition_change"]:
        cond_module = pick_module_name(files, lambda path: "/controller/" in path or "/clases/" in path)
        lines.append(
            f"Riesgo Medio: cambiaron reglas del proceso. Sintoma esperado: casos que antes pasaban ahora se bloquean (o viceversa). Donde mirar primero: {cond_module}."
        )

    if signals["has_ui"]:
        ui_module = pick_module_name(files, lambda path: "/resources/views/" in path or path.endswith((".js", ".ts", ".twig", ".html")))
        lines.append(
            f"Riesgo Bajo: ajuste visual en pantalla. Sintoma esperado: comportamiento inesperado en botones o mensajes. Donde mirar primero: {ui_module}."
        )

    while len(lines) < 2:
        lines.append(
            f"Riesgo {risk_level}: revisar el flujo principal en {main_module}. Sintoma esperado: respuesta fuera de lo habitual."
        )

    return [sanitize_bullet(item, max_len=200) for item in lines[:MAX_RISK_ITEMS]]


def build_rollback_suggestion(
    start_sha: str,
    end_sha: str,
    files: list[dict],
    integration_trace: dict,
) -> list[str]:
    lines: list[str] = []

    lines.append(f"Rollback rapido: revertir el commit de corte con `git revert --no-edit {end_sha}`.")

    if start_sha and integration_trace.get("total_commits", 0) > 1:
        lines.append(
            f"Rollback completo del rango: `git revert --no-edit {start_sha}..{end_sha}` para deshacer lo integrado desde el corte anterior."
        )

    if any(row.get("filename") == CONFIG_PATH for row in files):
        lines.append("Despues del rollback, validar app/config/config.yml para ajustar version_numero y version_fecha si aplica.")

    return [sanitize_bullet(item, max_len=200) for item in lines[:MAX_ROLLBACK_ITEMS]]


def merge_section_with_fallback(primary: list[str], fallback: list[str], max_items: int, min_items: int) -> list[str]:
    rows: list[str] = []

    for source in (primary, fallback):
        for item in source:
            cleaned = sanitize_bullet(item, max_len=MAX_AI_BULLET_LEN)
            if not cleaned:
                continue
            add_unique(rows, cleaned)
            if len(rows) >= max_items:
                break
        if len(rows) >= max_items:
            break

    if len(rows) < min_items:
        for item in fallback:
            cleaned = sanitize_bullet(item, max_len=MAX_AI_BULLET_LEN)
            if not cleaned:
                continue
            add_unique(rows, cleaned)
            if len(rows) >= min_items:
                break

    if not rows and fallback:
        rows = [sanitize_bullet(fallback[0], max_len=MAX_AI_BULLET_LEN)]

    return rows[:max_items]


def load_business_context() -> str:
    raw_context = os.environ.get("CHANGELOG_BUSINESS_CONTEXT", "").strip()
    if raw_context:
        return raw_context[:8000]

    env_file = os.environ.get("CHANGELOG_BUSINESS_CONTEXT_FILE", "").strip()
    candidates = [env_file] if env_file else []
    candidates.extend(DEFAULT_BUSINESS_CONTEXT_PATHS)

    for candidate in candidates:
        if not candidate:
            continue
        try:
            content = read_text(candidate).strip()
        except OSError:
            continue
        if content:
            return content[:8000]

    return ""


def build_default_executive_summary(
    files: list[dict],
    commits: list[dict],
    patch_diffs: list[dict],
    impact_insights: list[str],
    feature_highlights: list[str],
    integration_trace: dict,
) -> list[str]:
    lines: list[str] = []
    signals = detect_change_signals(files, patch_diffs)
    risk_level = classify_risk_level(signals, files)

    if signals["internal_only"]:
        for item in build_internal_release_notes_summary(files, commits):
            add_unique(lines, item)
        add_unique(lines, "No se detectan cambios visibles en pantallas, reportes o procesos operativos para usuarios finales.")
        add_unique(lines, "Impacto esperado: mejor comunicacion del alcance de cada version, con continuidad normal del servicio.")
        return [sanitize_bullet(item, max_len=200) for item in lines[:MAX_EXECUTIVE_ITEMS]]

    main_module = pick_module_name(files)
    add_unique(
        lines,
        f"Esta version impacta principalmente {main_module}, con efecto directo en operacion diaria del area usuaria.",
    )

    for item in build_structured_change_lines(files, commits, patch_diffs):
        add_unique(lines, item)

    if signals["has_reports"] or signals["has_sql_change"]:
        add_unique(
            lines,
            "Puede haber variaciones en reportes y conciliaciones; conviene validar una muestra conocida antes de cerrar el dia.",
        )

    add_unique(lines, f"Sensibilidad operativa estimada: riesgo {risk_level}.")

    if not lines:
        lines.append("No hay evidencia suficiente para resumir este corte.")

    return [sanitize_bullet(item, max_len=200) for item in lines[:MAX_EXECUTIVE_ITEMS]]

def build_default_technical_summary(
    files: list[dict],
    commits: list[dict],
    impact_insights: list[str],
    feature_highlights: list[str],
    integration_trace: dict,
) -> list[str]:
    lines: list[str] = []

    add_unique(lines, f"Commits analizados: {len(commits)}; archivos con cambios: {len(files)}.")

    for item in impact_insights[:3]:
        add_unique(lines, sanitize_bullet(item))

    for item in feature_highlights[:2]:
        add_unique(lines, sanitize_bullet(item))

    merge_count = int(integration_trace.get("merge_count", 0) or 0)
    if merge_count > 0:
        add_unique(lines, f"Se detectaron {merge_count} commit(s) de merge en el rango de integracion.")

    if not lines:
        lines.append("No hay evidencia suficiente para elaborar detalle tecnico.")

    return [sanitize_bullet(item, max_len=200) for item in lines[:MAX_TECHNICAL_ITEMS]]


def repo_base_url() -> str:
    server = os.environ.get("GITHUB_SERVER_URL", "").strip()
    repo = os.environ.get("GITHUB_REPOSITORY", "").strip()
    if not server or not repo:
        return ""
    return f"{server}/{repo}"


def format_pr_reference(pr_number: str) -> str:
    base = repo_base_url()
    if base:
        return f"#{pr_number} ({base}/pull/{pr_number})"
    return f"#{pr_number}"


def extract_json_object(text: str) -> str:
    stripped = text.strip()

    if stripped.startswith("```"):
        stripped = re.sub(r"^```(?:json)?\s*", "", stripped, flags=re.IGNORECASE)
        stripped = re.sub(r"\s*```$", "", stripped)

    first = stripped.find("{")
    last = stripped.rfind("}")
    if first >= 0 and last > first:
        return stripped[first : last + 1]

    return stripped


def normalize_ai_lines(value: object, max_items: int, strip_checklist: bool = False) -> list[str]:
    if isinstance(value, str):
        value = [value]

    if not isinstance(value, list):
        return []

    rows: list[str] = []
    for item in value:
        if not isinstance(item, str):
            continue

        cleaned = sanitize_bullet(item, max_len=MAX_AI_BULLET_LEN)
        if strip_checklist:
            cleaned = re.sub(r"^\s*[-*]\s*(?:\[[ xX]\]\s*)?", "", cleaned)
            cleaned = re.sub(r"^\s*\[[ xX]\]\s*", "", cleaned)
            cleaned = cleaned.strip()

        if not cleaned:
            continue

        add_unique(rows, cleaned)
        if len(rows) >= max_items:
            break

    return rows


def is_management_friendly_line(text: str) -> bool:
    lowered = f" {normalize_spaces(text).lower()} "
    if re.search(r"\b[0-9a-f]{8,40}\b", lowered):
        return False
    return not any(token in lowered for token in MANAGEMENT_TECH_TOKENS)


def is_specific_user_line(text: str) -> bool:
    lowered = normalize_spaces(text).lower()
    if not lowered:
        return False

    if any(phrase in lowered for phrase in GENERIC_USER_PHRASES):
        return False

    if lowered.startswith(("se agrego:", "se modifico:", "se corrigio:")):
        return True

    return any(token in lowered for token in ("agrego", "modifico", "corrigio", "ahora", "ya no", "cuando"))


def build_ai_context(
    version_number: str,
    version_date: str,
    start_sha: str,
    end_sha: str,
    files: list[dict],
    commits: list[dict],
    patch_diffs: list[dict],
    impact_insights: list[str],
    feature_highlights: list[str],
    integration_trace: dict,
    business_context: str,
) -> str:
    signals = detect_change_signals(files, patch_diffs)

    commit_lines = [
        f"- {sanitize_bullet(row['subject'], max_len=180)} ({row['author']}, {row['date']})"
        for row in commits[:MAX_AI_CONTEXT_COMMITS]
    ]
    file_lines = [
        f"- [{row['status']}] {row['filename']} ({row['bucket']})"
        for row in files[:MAX_AI_CONTEXT_FILES]
    ]

    pr_numbers = integration_trace.get("pr_numbers", [])
    source_branches = integration_trace.get("source_branches", [])

    lines = [
        f"Version: {version_number}",
        f"Fecha de version: {version_date}",
        f"Rango git: {short_sha(start_sha)}..{short_sha(end_sha)}",
        f"Rama de corte: {os.environ.get('GITHUB_REF_NAME', '').strip() or 'main'}",
        f"Tipo de integracion: {integration_trace.get('integration_type', 'Commits directos')}",
        f"Cantidad de commits considerados: {len(commits)}",
        f"Cantidad de archivos: {len(files)}",
        f"PR detectados: {', '.join('#' + pr for pr in pr_numbers) if pr_numbers else 'ninguno'}",
        f"Ramas origen detectadas: {', '.join(source_branches) if source_branches else 'ninguna'}",
        f"Bucket principal: {signals.get('main_bucket', 'Sin cambios')}",
        f"Hay cambios SQL: {'si' if signals.get('has_sql_change') else 'no'}",
        f"Hay cambios de condiciones/reglas: {'si' if signals.get('has_condition_change') else 'no'}",
        "Audiencia principal: soporte, implementacion y gerencia no tecnica.",
        "",
        "Contexto de negocio disponible:",
        business_context if business_context else "No se proporciono contexto de negocio explicito para este repositorio.",
        "",
        "Impacto tecnico observado:",
        *[f"- {sanitize_bullet(item)}" for item in impact_insights],
        "",
        "Mejoras observadas:",
        *[f"- {sanitize_bullet(item)}" for item in feature_highlights],
        "",
        "Commits de referencia:",
        *commit_lines,
        "",
        "Archivos de referencia:",
        *file_lines,
    ]

    return "\n".join(lines)


def request_ai_summaries(
    version_number: str,
    version_date: str,
    start_sha: str,
    end_sha: str,
    files: list[dict],
    commits: list[dict],
    patch_diffs: list[dict],
    impact_insights: list[str],
    feature_highlights: list[str],
    integration_trace: dict,
    business_context: str,
) -> tuple[dict[str, list[str]], str]:
    api_key = os.environ.get("OPENAI_API_KEY", "").strip()
    if not api_key:
        return {}, ""

    model = os.environ.get("CHANGELOG_AI_MODEL", "").strip() or "gpt-4o-mini"
    custom_endpoint = os.environ.get("CHANGELOG_AI_ENDPOINT", "").strip()
    base_url = os.environ.get("CHANGELOG_AI_BASE_URL", "").strip() or "https://api.openai.com/v1"
    endpoint = custom_endpoint or f"{base_url.rstrip('/')}/chat/completions"

    system_prompt = (
        "Eres un analista senior de release notes en espanol para un sistema de inventario y operaciones comerciales. "
        "Responde SOLO con JSON valido, sin texto adicional, con este esquema exacto: "
        "{\"resumen_gerencial\": [\"...\"], \"que_cambia_para_usuario\": [\"...\"], \"validacion_rapida\": [\"...\"], \"riesgos_y_sintomas\": [\"...\"], \"detalle_tecnico\": [\"...\"], \"rollback_sugerido\": [\"...\"]}. "
        "Reglas obligatorias: no inventes datos, pantallas, modulos ni resultados; usa solo evidencia provista; "
        "si no hay evidencia escribe frases honestas como 'No se detecta cambio visible para el usuario final en este corte'; "
        "cada bullet debe tener maximo 200 caracteres; en 'validacion_rapida' no incluyas '- [ ]'; "
        "en 'resumen_gerencial' y 'que_cambia_para_usuario' evita tecnicismos (commit, hash, rutas de archivo, clases o codigo); "
        "en 'que_cambia_para_usuario' incluye evidencia concreta y, cuando exista, separa claramente lo agregado, lo modificado y lo corregido con prefijos: 'Se agrego:', 'Se modifico:' y 'Se corrigio:'; "
        "evita frases vagas como 'modulo afectado' o 'puede notar cambios'."
    )

    user_prompt = build_ai_context(
        version_number=version_number,
        version_date=version_date,
        start_sha=start_sha,
        end_sha=end_sha,
        files=files,
        commits=commits,
        patch_diffs=patch_diffs,
        impact_insights=impact_insights,
        feature_highlights=feature_highlights,
        integration_trace=integration_trace,
        business_context=business_context,
    )

    payload = {
        "model": model,
        "temperature": 0.2,
        "messages": [
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_prompt},
        ],
    }

    body = json.dumps(payload).encode("utf-8")
    request = urllib.request.Request(
        endpoint,
        data=body,
        headers={
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json",
        },
        method="POST",
    )

    try:
        with urllib.request.urlopen(request, timeout=AI_REQUEST_TIMEOUT_SECONDS) as response:
            raw = response.read().decode("utf-8", errors="replace")
    except urllib.error.HTTPError as error:
        error_body = error.read().decode("utf-8", errors="replace")
        print(f"AI summary unavailable ({error.code}): {shorten_fragment(error_body, 180)}")
        return {}, ""
    except (urllib.error.URLError, TimeoutError, OSError) as error:
        print(f"AI summary unavailable: {error}")
        return {}, ""

    try:
        data = json.loads(raw)
    except json.JSONDecodeError as error:
        print(f"AI summary unavailable (invalid JSON response): {error}")
        return {}, ""

    choices = data.get("choices") or []
    if not choices:
        return {}, ""

    content = choices[0].get("message", {}).get("content", "")
    if isinstance(content, list):
        parts: list[str] = []
        for chunk in content:
            if not isinstance(chunk, dict):
                continue
            text_value = chunk.get("text")
            if isinstance(text_value, str) and text_value:
                parts.append(text_value)
        content = "\n".join(parts)

    if not isinstance(content, str) or not content.strip():
        return {}, ""

    candidate = extract_json_object(content)
    try:
        parsed = json.loads(candidate)
    except json.JSONDecodeError as error:
        print(f"AI summary unavailable (invalid JSON payload): {error}")
        return {}, ""

    if not isinstance(parsed, dict):
        print("AI summary unavailable (payload sin objeto JSON util).")
        return {}, ""

    sections = {
        "resumen_gerencial": normalize_ai_lines(parsed.get("resumen_gerencial"), MAX_EXECUTIVE_ITEMS),
        "que_cambia_para_usuario": normalize_ai_lines(parsed.get("que_cambia_para_usuario"), MAX_USER_ITEMS),
        "validacion_rapida": normalize_ai_lines(parsed.get("validacion_rapida"), MAX_VALIDATION_ITEMS, strip_checklist=True),
        "riesgos_y_sintomas": normalize_ai_lines(parsed.get("riesgos_y_sintomas"), MAX_RISK_ITEMS),
        "detalle_tecnico": normalize_ai_lines(parsed.get("detalle_tecnico"), MAX_TECHNICAL_ITEMS),
        "rollback_sugerido": normalize_ai_lines(parsed.get("rollback_sugerido"), MAX_ROLLBACK_ITEMS),
    }

    return sections, model


def build_release_sections(
    version_number: str,
    version_date: str,
    start_sha: str,
    end_sha: str,
    files: list[dict],
    commits: list[dict],
    patch_diffs: list[dict],
    impact_insights: list[str],
    feature_highlights: list[str],
    integration_trace: dict,
    business_context: str,
) -> tuple[list[str], list[str], list[str], list[str], list[str], list[str], str]:
    signals = detect_change_signals(files, patch_diffs)

    fallback_executive = build_default_executive_summary(
        files=files,
        commits=commits,
        patch_diffs=patch_diffs,
        impact_insights=impact_insights,
        feature_highlights=feature_highlights,
        integration_trace=integration_trace,
    )
    fallback_user = build_user_visible_summary(
        files=files,
        commits=commits,
        patch_diffs=patch_diffs,
        integration_trace=integration_trace,
    )
    fallback_validation = build_quick_validation(
        files=files,
        commits=commits,
        patch_diffs=patch_diffs,
        integration_trace=integration_trace,
    )
    fallback_risks = build_risks_and_symptoms(
        files=files,
        commits=commits,
        patch_diffs=patch_diffs,
        integration_trace=integration_trace,
    )
    fallback_technical = build_default_technical_summary(
        files=files,
        commits=commits,
        impact_insights=impact_insights,
        feature_highlights=feature_highlights,
        integration_trace=integration_trace,
    )
    fallback_rollback = build_rollback_suggestion(
        start_sha=start_sha,
        end_sha=end_sha,
        files=files,
        integration_trace=integration_trace,
    )

    ai_sections, ai_model = request_ai_summaries(
        version_number=version_number,
        version_date=version_date,
        start_sha=start_sha,
        end_sha=end_sha,
        files=files,
        commits=commits,
        patch_diffs=patch_diffs,
        impact_insights=impact_insights,
        feature_highlights=feature_highlights,
        integration_trace=integration_trace,
        business_context=business_context,
    )

    ai_executive = [
        item for item in ai_sections.get("resumen_gerencial", []) if is_management_friendly_line(item)
    ]
    ai_user = [
        item for item in ai_sections.get("que_cambia_para_usuario", []) if is_management_friendly_line(item) and is_specific_user_line(item)
    ]

    if signals["internal_only"]:
        executive = fallback_executive
        user = fallback_user
        validation = fallback_validation
        risks = fallback_risks
        rollback = fallback_rollback
    else:
        executive = merge_section_with_fallback(ai_executive, fallback_executive, MAX_EXECUTIVE_ITEMS, 2)
        user = merge_section_with_fallback(ai_user, fallback_user, MAX_USER_ITEMS, 3)
        validation = merge_section_with_fallback(
            ai_sections.get("validacion_rapida", []),
            fallback_validation,
            MAX_VALIDATION_ITEMS,
            4,
        )
        risks = merge_section_with_fallback(
            ai_sections.get("riesgos_y_sintomas", []),
            fallback_risks,
            MAX_RISK_ITEMS,
            2,
        )
        rollback = merge_section_with_fallback(
            ai_sections.get("rollback_sugerido", []),
            fallback_rollback,
            MAX_ROLLBACK_ITEMS,
            1,
        )

    technical = merge_section_with_fallback(
        ai_sections.get("detalle_tecnico", []),
        fallback_technical,
        MAX_TECHNICAL_ITEMS,
        3,
    )

    return executive, user, validation, risks, technical, rollback, ai_model

def build_entry(
    marker: str,
    version_number: str,
    version_date: str,
    actor: str,
    branch_name: str,
    start_sha: str,
    end_sha: str,
    compare_url: str,
    files: list[dict],
    commits: list[dict],
    impact_insights: list[str],
    feature_highlights: list[str],
    executive_summary: list[str],
    user_visible_summary: list[str],
    quick_validation: list[str],
    risks_and_symptoms: list[str],
    rollback_suggestion: list[str],
    technical_summary: list[str],
    integration_trace: dict,
    ai_model: str,
) -> str:
    bucket_map = build_bucket_map(files)

    lines: list[str] = [marker, f"## Version {version_number}", ""]
    lines.append(f"**Fecha:** {version_date}")
    lines.append(f"**Autor del corte:** @{actor}" if actor else "**Autor del corte:** N/D")
    lines.append(f"**Rama del corte:** {branch_name}")
    lines.append(f"**RangoGit:** {short_sha(start_sha)}..{short_sha(end_sha)}")
    lines.append(f"**Commit corte:** {end_sha}")
    lines.append(f"**Tipo de integracion:** {integration_trace.get('integration_type', 'Commits directos')}")
    if ai_model:
        lines.append(f"**Resumen IA:** {ai_model}")
    if compare_url:
        lines.append(f"**Comparacion:** {compare_url}")

    lines.extend(
        [
            "",
            "**Contexto del corte:**",
            f"- Commits totales en el rango: {integration_trace.get('total_commits', len(commits))}",
            f"- Commits funcionales considerados: {len(commits)}",
            f"- Archivos con cambios: {len(files)}",
            f"- Commits de merge detectados: {integration_trace.get('merge_count', 0)}",
            f"- Rama donde se genero el corte: {branch_name}",
        ]
    )

    lines.extend(["", "**Que cambia para el usuario (operacion):**"])
    for item in user_visible_summary:
        lines.append(f"- {sanitize_bullet(item, max_len=200)}")

    lines.extend(["", "**Validacion rapida (5 minutos):**"])
    for item in quick_validation:
        plain = re.sub(r"^\s*[-*]\s*(?:\[[ xX]\]\s*)?", "", item).strip()
        plain = re.sub(r"^\s*\[[ xX]\]\s*", "", plain).strip()
        if not plain:
            continue
        lines.append(f"- [ ] {sanitize_bullet(plain, max_len=200)}")

    lines.extend(["", "**Resumen ejecutivo (gerencia):**"])
    for item in executive_summary:
        lines.append(f"- {sanitize_bullet(item, max_len=200)}")

    lines.extend(["", "**Riesgos y sintomas:**"])
    for item in risks_and_symptoms:
        lines.append(f"- {sanitize_bullet(item, max_len=200)}")

    lines.extend(["", "**Rollback sugerido:**"])
    for item in rollback_suggestion:
        lines.append(f"- {sanitize_bullet(item, max_len=200)}")

    lines.extend(["", "**Detalle tecnico (equipo):**"])
    for item in technical_summary:
        lines.append(f"- {sanitize_bullet(item, max_len=200)}")

    lines.extend(["", "**Trazabilidad (PR/Merge):**"])
    lines.append(f"- Tipo detectado: {integration_trace.get('integration_type', 'Commits directos')}.")

    pr_numbers = integration_trace.get("pr_numbers", [])
    if pr_numbers:
        rendered_prs = [format_pr_reference(value) for value in pr_numbers[:MAX_COMMITS_IN_ENTRY]]
        lines.append(f"- PR detectados: {', '.join(rendered_prs)}.")
        remaining_prs = len(pr_numbers) - len(rendered_prs)
        if remaining_prs > 0:
            lines.append(f"- (+{remaining_prs} PR adicionales)")
    else:
        lines.append("- PR detectados: ninguno en este rango.")

    source_branches = integration_trace.get("source_branches", [])
    if source_branches:
        shown = source_branches[:MAX_INTEGRATION_COMMITS]
        lines.append(f"- Ramas origen detectadas: {', '.join(shown)}.")
        remaining = len(source_branches) - len(shown)
        if remaining > 0:
            lines.append(f"- (+{remaining} ramas origen adicionales)")
    else:
        lines.append("- Ramas origen detectadas: ninguna.")

    target_branches = integration_trace.get("target_branches", [])
    if target_branches:
        shown = target_branches[:MAX_INTEGRATION_COMMITS]
        lines.append(f"- Ramas destino detectadas: {', '.join(shown)}.")

    merge_commits = integration_trace.get("merge_commits", [])
    if merge_commits:
        displayed = merge_commits[:MAX_INTEGRATION_COMMITS]
        for row in displayed:
            subject = sanitize_bullet(row["subject"], max_len=140)
            lines.append(f"- Merge commit: {short_sha(row['sha'])} - {subject} ({row['author']}, {row['date']})")

        remaining_merges = len(merge_commits) - len(displayed)
        if remaining_merges > 0:
            lines.append(f"- (+{remaining_merges} merge commits adicionales)")

    lines.extend(["", "**Impacto en el sistema (estimado):**"])
    for item in impact_insights:
        lines.append(f"- {item}")

    lines.extend(["", "**Nuevas funcionalidades o mejoras visibles:**"])
    for item in feature_highlights:
        lines.append(f"- {item}")

    lines.extend(["", "**Cambios por area:**"])

    if bucket_map:
        for bucket_name in sorted(bucket_map.keys(), key=bucket_sort_key):
            file_count = len(bucket_map[bucket_name])
            label = "archivo" if file_count == 1 else "archivos"
            lines.append(f"- {bucket_name}: {file_count} {label}.")
    else:
        lines.append("- No se detectaron archivos en el rango.")

    lines.extend(["", "**Archivos destacados:**"])

    if files:
        displayed_files = files[:MAX_FILES_IN_ENTRY]
        for row in displayed_files:
            lines.append(f"- [{row['status']}] [{row['bucket']}] {row['filename']}")

        remaining_files = len(files) - len(displayed_files)
        if remaining_files > 0:
            lines.append(f"- (+{remaining_files} archivos adicionales)")
    else:
        lines.append("- Sin archivos para mostrar.")

    lines.extend(["", "**Commits incluidos (referencia):**"])

    if commits:
        displayed_commits = commits[:MAX_COMMITS_IN_ENTRY]
        for row in displayed_commits:
            lines.append(
                f"- {short_sha(row['sha'])} [{branch_name}] {row['subject']} ({row['author']}, {row['date']})"
            )

        remaining_commits = len(commits) - len(displayed_commits)
        if remaining_commits > 0:
            lines.append(f"- (+{remaining_commits} commits adicionales)")
    else:
        lines.append("- Sin commits para mostrar.")

    lines.extend(["", "---", ""])
    return "\n".join(lines)


def current_branch(end_sha: str) -> str:
    for key in ("GITHUB_REF_NAME", "GITHUB_HEAD_REF"):
        value = os.environ.get(key, "").strip()
        if value:
            normalized = normalize_branch_name(value)
            if normalized:
                return normalized

    branch = git("branch", "--show-current", allow_fail=True)
    if branch:
        normalized = normalize_branch_name(branch)
        if normalized:
            return normalized

    named_ref = git("name-rev", "--name-only", end_sha, allow_fail=True)
    if named_ref:
        normalized = normalize_branch_name(named_ref)
        if normalized and normalized.lower() != "undefined":
            return normalized

    return "main"


def current_actor(end_sha: str) -> str:
    actor = os.environ.get("GITHUB_ACTOR", "").strip()
    if actor:
        return actor
    return git("show", "-s", "--format=%an", end_sha, allow_fail=True)


def compare_url(start_sha: str, end_sha: str) -> str:
    base = repo_base_url()
    if not base:
        return ""
    if start_sha:
        return f"{base}/compare/{start_sha}...{end_sha}"
    return f"{base}/commit/{end_sha}"


def main() -> None:
    end_sha_env = os.environ.get("GITHUB_SHA", "").strip()
    end_sha = git("rev-parse", end_sha_env or "HEAD")

    config_text = git("show", f"{end_sha}:{CONFIG_PATH}", allow_fail=True)
    if not config_text:
        config_text = read_text(CONFIG_PATH)

    version_number, version_date = extract_version_from_config(config_text)
    if not version_number:
        version_number = "version-no-detectada"
    if not version_date:
        version_date = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M UTC")

    config_history = list_config_history()
    previous_cut_sha = config_history[1] if len(config_history) > 1 else ""

    start_sha, range_spec = resolve_range(previous_cut_sha, end_sha)

    files = list_changed_files(start_sha, end_sha, range_spec)
    commit_data = list_commits(start_sha, end_sha, range_spec)
    commits = commit_data["display"]
    integration_trace = detect_integration_trace(commit_data["all"])

    patch_text = collect_patch(start_sha, end_sha, range_spec)
    patch_diffs = parse_patch_by_file(patch_text)

    actor = current_actor(end_sha)
    branch_name = current_branch(end_sha)
    url = compare_url(start_sha, end_sha)
    marker = f"<!-- changelog-cut:{end_sha} -->"

    impact_insights = build_impact_insights(files, commits, patch_diffs)
    feature_highlights = build_feature_highlights(files, commits, patch_diffs)
    business_context = load_business_context()

    (
        executive_summary,
        user_visible_summary,
        quick_validation,
        risks_and_symptoms,
        technical_summary,
        rollback_suggestion,
        ai_model,
    ) = build_release_sections(
        version_number=version_number,
        version_date=version_date,
        start_sha=start_sha,
        end_sha=end_sha,
        files=files,
        commits=commits,
        patch_diffs=patch_diffs,
        impact_insights=impact_insights,
        feature_highlights=feature_highlights,
        integration_trace=integration_trace,
        business_context=business_context,
    )

    try:
        existing = read_text(CHANGELOG_PATH)
    except FileNotFoundError:
        existing = ""

    if marker in existing:
        print(f"Entry already exists for cut {end_sha}.")
        return

    entry = build_entry(
        marker=marker,
        version_number=version_number,
        version_date=version_date,
        actor=actor,
        branch_name=branch_name,
        start_sha=start_sha,
        end_sha=end_sha,
        compare_url=url,
        files=files,
        commits=commits,
        impact_insights=impact_insights,
        feature_highlights=feature_highlights,
        executive_summary=executive_summary,
        user_visible_summary=user_visible_summary,
        quick_validation=quick_validation,
        risks_and_symptoms=risks_and_symptoms,
        rollback_suggestion=rollback_suggestion,
        technical_summary=technical_summary,
        integration_trace=integration_trace,
        ai_model=ai_model,
    )

    dry_run = os.environ.get("DRY_RUN", "").strip() == "1"
    if dry_run:
        print(entry)
        return

    if existing:
        write_text(CHANGELOG_PATH, entry + existing.lstrip())
    else:
        write_text(CHANGELOG_PATH, entry)

    print(
        "Changelog actualizado "
        f"(version {version_number}, rango {short_sha(start_sha)}..{short_sha(end_sha)})."
    )


if __name__ == "__main__":
    main()
