#!/usr/bin/env python3
"""
Voice-Linter v1.0 — Autismus-Stiftung
Maschinelle Validation gegen brand-context.md §27 (Banned Patterns).

Usage:
    python3 voice-lint.py [files...]
    python3 voice-lint.py --html https://autismusstiftung.de/spenden/
    python3 voice-lint.py --strict path/to/draft.md   # exit 1 on warnings too

Returns:
    Exit 0 = clean
    Exit 1 = errors found
    Exit 2 = warnings found (only with --strict)

Reference: https://autismusstiftung.de/brand-context.md
"""
import sys, re, pathlib, argparse, urllib.request

# Banned patterns from brand-context.md §27
RULES = {
    # ERROR-Level (Pflicht)
    'person-first-noun': {
        'level': 'error',
        'pattern': re.compile(r'\b(Autisten|Betroffene[rn]?|Behinderte[rn]?|die\s+Betroffenen|die\s+Behinderten)\b', re.I),
        'message': '§05/§27.4 Verbotene Distanz-Floskel — verwende „Mensch im Autismus-Spektrum"',
    },
    'person-first-adj': {
        'level': 'warning',
        'pattern': re.compile(r'\bautistisch[ers]?\s+(Kind|Mensch|Mann|Frau|Junge|Mädchen|Person)\b', re.I),
        'message': '§05 — bevorzugt „Kind im Autismus-Spektrum" statt „autistisches Kind"',
    },
    'mailto-link': {
        'level': 'error',
        'pattern': re.compile(r'(?:href=["\']mailto:|<\s*a\s+[^>]*href=["\']mailto:)', re.I),
        'message': '§06/§27.8 Mailto-Links verboten — verwende `[at]/[punkt]`-Obfuskation für Web',
    },
    'hype-language': {
        'level': 'error',
        'pattern': re.compile(r'\b(revolutionär|bahnbrechend|einzigartig|erstklassig|weltklasse|state[\s-]of[\s-]the[\s-]art|cutting[\s-]edge)\b', re.I),
        'message': '§27.1 Hype-Sprache — sachlich bleiben',
    },
    'pity-trigger': {
        'level': 'error',
        'pattern': re.compile(r'\b(arme\s+(Kinder|Familien|Menschen)|leiden\s+unter|Schicksal|verzweifelt|Hoffnungsschimmer|Tragödie)\b', re.I),
        'message': '§27.2 Mitleid-Trigger — verletzt Würde',
    },
    'pressure-hook': {
        'level': 'error',
        'pattern': re.compile(r'(letzte\s+Chance!?|Sie\s+müssen\s+jetzt|wenn\s+Sie\s+nicht\s+\w+,?\s+dann|jetzt\s+oder\s+nie|dringend!|Achtung!)', re.I),
        'message': '§27.3 Druck-Hooks — manipulativ',
    },
    'pseudo-empathy': {
        'level': 'warning',
        'pattern': re.compile(r'(verstehen\s+Sie\s+sicherlich|es\s+muss\s+schwer\s+(sein|gewesen\s+sein)\s+für\s+Sie|wir\s+können\s+nur\s+erahnen)', re.I),
        'message': '§27.5 Pseudo-Empathie — herablassend',
    },
    'ai-leak': {
        'level': 'warning',
        'pattern': re.compile(r'(im\s+Folgenden\s+werden|dieses\s+Dokument\s+behandelt|lassen\s+Sie\s+uns|im\s+Wesentlichen|es\s+ist\s+wichtig\s+zu\s+beachten,?\s+dass)', re.I),
        'message': '§27.6 AI-Verräter-Phrase — outet KI-Schreibstil',
    },
    'unjustified-conjunctive': {
        'level': 'warning',
        'pattern': re.compile(r'(könnten\s+Sie\s+eventuell|würde\s+ich\s+vorschlagen,?\s+dass|möglicherweise\s+wäre\s+es\s+ratsam)', re.I),
        'message': '§27.7 Floskel-Konjunktiv ohne Grund — wirkt zögerlich',
    },
    'autist-noun': {
        'level': 'error',
        'pattern': re.compile(r'\bAutist\b(?!ismus)', re.I),
        'message': '§05 ABSOLUTES VERBOT — verwende „Mensch im Autismus-Spektrum"',
    },
    # Strukturelle Prüfungen
    'too-many-h2': {
        'level': 'warning',
        'pattern': None,  # custom check
        'message': '§17 — maximal 5 H2 pro Seite',
        'check': lambda txt: max(0, len(re.findall(r'^##\s+(?!#)', txt, re.M)) - 5),
    },
    'redundant-headlines': {
        'level': 'warning',
        'pattern': None,
        'message': '§06 keine Doppelungen — Inhalte einmal an einem Ort',
        'check': lambda txt: _count_duplicate_headings(txt),
    },
}


def _count_duplicate_headings(text):
    """Count duplicate H2/H3 headings in a single document."""
    headings = re.findall(r'^#{2,3}\s+(.+?)$', text, re.M)
    seen = {}
    dups = 0
    for h in headings:
        key = h.strip().lower()
        if key in seen:
            dups += 1
        else:
            seen[key] = True
    return dups


def fetch_url(url):
    """Fetch HTML from URL. Strips <script> and <style> blocks."""
    req = urllib.request.Request(url, headers={'User-Agent': 'voice-lint/1.0'})
    with urllib.request.urlopen(req, timeout=15) as resp:
        html = resp.read().decode('utf-8', errors='ignore')
    # Strip scripts and styles for content-only check
    html = re.sub(r'<script[^>]*>.*?</script>', '', html, flags=re.DOTALL | re.I)
    html = re.sub(r'<style[^>]*>.*?</style>', '', html, flags=re.DOTALL | re.I)
    return html


def strip_meta_blocks(text):
    """Remove fenced code blocks and ❌-prefixed lines (used in reference docs to define banned patterns)."""
    # Remove fenced code blocks (```...```)
    text = re.sub(r'```.*?```', '', text, flags=re.DOTALL)
    # Remove inline code (`...`)
    text = re.sub(r'`[^`\n]+`', '', text)
    # Remove ❌ lines (negative examples in tables/prose)
    text = re.sub(r'^[^\n]*❌[^\n]*$', '', text, flags=re.M)
    # Remove lines that are pure quotation showing what's verboten (heuristic: contains both „ and ")
    return text


def lint(text, source='<input>', strip_meta=False):
    errors = []
    warnings = []

    if strip_meta:
        text = strip_meta_blocks(text)

    for rule_name, rule in RULES.items():
        if rule['pattern'] is not None:
            for m in rule['pattern'].finditer(text):
                line = text[:m.start()].count('\n') + 1
                snippet = text[max(0, m.start()-20):min(len(text), m.end()+20)].replace('\n', ' ')
                entry = (source, line, rule_name, m.group(), rule['message'], snippet, rule['level'])
                if rule['level'] == 'error':
                    errors.append(entry)
                else:
                    warnings.append(entry)
        elif 'check' in rule:
            count = rule['check'](text)
            if count > 0:
                entry = (source, 1, rule_name, f'count={count}', rule['message'], '', rule['level'])
                if rule['level'] == 'error':
                    errors.append(entry)
                else:
                    warnings.append(entry)

    return errors, warnings


def format_entry(entry):
    src, line, rule, hit, msg, snippet, level = entry
    icon = '✗' if level == 'error' else '⚠'
    out = f'{icon} {src}:{line}: [{rule}] „{hit}" — {msg}'
    if snippet:
        out += f'\n   Context: …{snippet.strip()}…'
    return out


def main():
    parser = argparse.ArgumentParser(description='Voice-Linter for Autismus-Stiftung brand voice')
    parser.add_argument('paths', nargs='*', help='File paths or - for stdin')
    parser.add_argument('--html', dest='url', help='Fetch URL and lint its HTML body')
    parser.add_argument('--strict', action='store_true', help='Exit 1 also on warnings')
    parser.add_argument('--quiet', action='store_true', help='Only show errors, not warnings')
    parser.add_argument('--strip-meta', action='store_true', help='Skip code blocks and ❌-marked lines (use for brand-context.md and reference docs)')
    args = parser.parse_args()

    all_errors = []
    all_warnings = []

    if args.url:
        text = fetch_url(args.url)
        e, w = lint(text, source=args.url, strip_meta=args.strip_meta)
        all_errors.extend(e); all_warnings.extend(w)

    if not args.paths and not args.url:
        # Read from stdin
        text = sys.stdin.read()
        e, w = lint(text, source='<stdin>')
        all_errors.extend(e); all_warnings.extend(w)
    else:
        for p in args.paths:
            if p == '-':
                text = sys.stdin.read()
                e, w = lint(text, source='<stdin>', strip_meta=args.strip_meta)
            else:
                path = pathlib.Path(p)
                if not path.exists():
                    print(f'✗ {p}: file not found', file=sys.stderr)
                    sys.exit(2)
                text = path.read_text(encoding='utf-8', errors='ignore')
                