#!/usr/bin/env python3
"""
Build PHT Top 1000 Apple/Pear/Citrus Database - Version 2
Combines USA facilities + research companies + generates report
"""

import csv
import json
import re
from typing import Dict, List, Set
from collections import defaultdict
from datetime import datetime

# Scoring system
SCORE_MAP = {
    'XXLarge': 100,
    'XLarge': 90,
    'Large': 80,
    'Medium': 70,
    'Small': 60
}

def extract_fruit_types(produce_field: str) -> Set[str]:
    """Extract fruit types from produce field"""
    if not produce_field:
        return set()
    
    produce_lower = produce_field.lower()
    fruits = set()
    
    if any(x in produce_lower for x in ['apple', 'apples']):
        fruits.add('Apples')
    if any(x in produce_lower for x in ['pear', 'pears']):
        fruits.add('Pears')
    if any(x in produce_lower for x in ['citrus', 'orange', 'lemon', 'grapefruit', 'lime', 'mandarin']):
        fruits.add('Citrus')
    
    return fruits

def is_target_fruit(produce_field: str) -> bool:
    """Check if facility handles apple/pear/citrus"""
    fruits = extract_fruit_types(produce_field)
    return len(fruits) > 0

def assign_score_from_size(size_class: str, rooms: str, sqft: str, notes: str) -> int:
    """Assign score based on size indicators"""
    
    if size_class in SCORE_MAP:
        return SCORE_MAP[size_class]
    
    room_count = 0
    if rooms:
        match = re.search(r'(\d+)', rooms.replace('+', '').replace('~', ''))
        if match:
            room_count = int(match.group(1))
    
    sqft_num = 0
    if sqft:
        sqft_clean = sqft.replace(',', '').replace('Estimated ', '')
        match = re.search(r'(\d+)', sqft_clean)
        if match:
            sqft_num = int(match.group(1))
    
    if room_count >= 50 or sqft_num >= 1000000:
        return 100
    elif room_count >= 30 or sqft_num >= 500000:
        return 90
    elif room_count >= 20 or sqft_num >= 200000:
        return 80
    elif room_count >= 10 or sqft_num >= 100000:
        return 70
    else:
        return 60

def normalize_company_name(name: str) -> str:
    """Normalize company name for deduplication"""
    if not name:
        return ""
    
    name = re.sub(r'\s+(LLC|Inc\.|Inc|Co\.|Co|Company|Corp\.|Corp|Ltd\.|Ltd|LP|LLP|SA|GmbH|Pty|Limited)$', '', name, flags=re.IGNORECASE)
    name = re.sub(r'[^\w\s]', '', name).lower().strip()
    
    return name

def extract_usa_facilities(csv_path: str) -> List[Dict]:
    """Extract apple/pear/citrus facilities from USA CSV"""
    
    facilities = []
    
    with open(csv_path, 'r', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        
        for row in reader:
            produce = row.get('Primary Produce', '')
            
            if is_target_fruit(produce):
                fruits = extract_fruit_types(produce)
                fruit_str = ', '.join(sorted(fruits))
                
                score = row.get('Score', '')
                if score:
                    try:
                        score = int(score)
                    except:
                        score = assign_score_from_size(
                            row.get('Size Classification', ''),
                            row.get('Total Rooms', ''),
                            row.get('Square Footage', ''),
                            row.get('Notes', '')
                        )
                else:
                    score = assign_score_from_size(
                        row.get('Size Classification', ''),
                        row.get('Total Rooms', ''),
                        row.get('Square Footage', ''),
                        row.get('Notes', '')
                    )
                
                facility = {
                    'Company': row.get('Company', '').strip(),
                    'Website': row.get('Website', '').strip(),
                    'Country': 'USA',
                    'Region': row.get('Region', '').strip(),
                    'Fruit': fruit_str,
                    'CA Storage': row.get('CA/MA', '').strip(),
                    'Score': score,
                    'Size': row.get('Size Classification', '').strip(),
                    'Notes': row.get('Notes', '').strip()[:500],
                    'Contacts': ''
                }
                
                facilities.append(facility)
    
    print(f"✓ Extracted {len(facilities)} apple/pear/citrus facilities from USA CSV")
    return facilities

def load_research_companies(json_path: str) -> List[Dict]:
    """Load researched international companies"""
    
    with open(json_path, 'r') as f:
        companies = json.load(f)
    
    print(f"✓ Loaded {len(companies)} researched international companies")
    return companies

def deduplicate_facilities(facilities: List[Dict]) -> List[Dict]:
    """Deduplicate by company name + country"""
    
    seen = set()
    deduped = []
    duplicates = []
    
    for facility in facilities:
        key = (normalize_company_name(facility['Company']), facility['Country'])
        if key not in seen:
            seen.add(key)
            deduped.append(facility)
        else:
            duplicates.append(facility['Company'])
    
    if duplicates:
        print(f"  Removed {len(duplicates)} duplicates:")
        for dup in duplicates[:10]:
            print(f"    - {dup}")
        if len(duplicates) > 10:
            print(f"    ... and {len(duplicates) - 10} more")
    
    return deduped

def generate_gap_analysis(current_count: int, target: int = 1000) -> Dict:
    """Generate analysis of what's needed to reach target"""
    
    gap = target - current_count
    
    # Suggested distribution based on major apple/pear/citrus producing regions
    suggestions = {
        'China': int(gap * 0.30),  # 30% - world's #1 apple producer
        'Europe (Poland, Italy, France, Spain)': int(gap * 0.20),  # 20%
        'Turkey': int(gap * 0.10),  # 10%
        'South America (Argentina, Chile, Brazil)': int(gap * 0.10),  # 10%
        'South Africa': int(gap * 0.08),  # 8%
        'Australia/New Zealand': int(gap * 0.08),  # 8%
        'India': int(gap * 0.07),  # 7%
        'Other (Japan, Canada, etc.)': int(gap * 0.07),  # 7%
    }
    
    # Adjust for rounding
    total_suggested = sum(suggestions.values())
    if total_suggested < gap:
        suggestions['China'] += (gap - total_suggested)
    
    return {
        'gap': gap,
        'target': target,
        'current': current_count,
        'suggestions': suggestions
    }

def export_to_csv(facilities: List[Dict], output_path: str):
    """Export facilities to CSV"""
    
    if not facilities:
        print("⚠ No facilities to export")
        return
    
    fieldnames = ['Company', 'Website', 'Country', 'Region', 'Fruit', 'CA Storage', 'Score', 'Size', 'Notes', 'Contacts']
    
    with open(output_path, 'w', newline='', encoding='utf-8') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()
        writer.writerows(facilities)
    
    print(f"✓ Exported to {output_path}")

def main():
    """Main processing function"""
    
    print("=" * 80)
    print("PHT TOP 1000 APPLE/PEAR/CITRUS DATABASE BUILDER v2")
    print(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    print("=" * 80)
    
    # Step 1: Extract USA facilities
    print("\n[1/7] Extracting USA facilities...")
    usa_facilities = extract_usa_facilities('/Users/max/.openclaw/workspace/postharvest/verified-scored-facilities.csv')
    
    # Step 2: Load research companies
    print("\n[2/7] Loading researched international companies...")
    research_companies = load_research_companies('/Users/max/.openclaw/workspace/postharvest/research_companies.json')
    
    # Step 3: Combine all sources
    print("\n[3/7] Combining data sources...")
    all_facilities = usa_facilities + research_companies
    print(f"  Total before deduplication: {len(all_facilities)}")
    
    # Step 4: Deduplicate
    print("\n[4/7] Deduplicating...")
    deduped = deduplicate_facilities(all_facilities)
    print(f"✓ {len(all_facilities)} → {len(deduped)} after deduplication")
    
    # Step 5: Sort by score
    print("\n[5/7] Sorting by score (descending)...")
    deduped.sort(key=lambda x: x['Score'], reverse=True)
    print(f"  Top score: {deduped[0]['Score']} - {deduped[0]['Company']} ({deduped[0]['Country']})")
    print(f"  Lowest score: {deduped[-1]['Score']} - {deduped[-1]['Company']} ({deduped[-1]['Country']})")
    
    # Step 6: Gap analysis
    print("\n[6/7] Gap analysis...")
    gap_analysis = generate_gap_analysis(len(deduped))
    
    print(f"  Current count: {gap_analysis['current']}")
    print(f"  Target: {gap_analysis['target']}")
    print(f"  Gap: {gap_analysis['gap']}")
    print(f"\n  Suggested research distribution:")
    for region, count in gap_analysis['suggestions'].items():
        print(f"    - {region}: {count} companies")
    
    # Step 7: Export results
    print("\n[7/7] Exporting results...")
    
    # Export current database
    export_to_csv(deduped, '/Users/max/.openclaw/workspace/postharvest/top_1000_current.csv')
    
    # Export JSON for processing
    with open('/Users/max/.openclaw/workspace/postharvest/top_1000_current.json', 'w') as f:
        json.dump(deduped, f, indent=2)
    
    # Generate statistics
    stats = {
        'generated_at': datetime.now().isoformat(),
        'total_facilities': len(deduped),
        'target': 1000,
        'gap': gap_analysis['gap'],
        'countries': {},
        'fruits': {},
        'avg_score': sum(f['Score'] for f in deduped) / len(deduped),
        'score_distribution': {
            '100+': len([f for f in deduped if f['Score'] >= 100]),
            '90-99': len([f for f in deduped if 90 <= f['Score'] < 100]),
            '80-89': len([f for f in deduped if 80 <= f['Score'] < 90]),
            '70-79': len([f for f in deduped if 70 <= f['Score'] < 80]),
            '<70': len([f for f in deduped if f['Score'] < 70]),
        }
    }
    
    # Country breakdown
    for f in deduped:
        country = f['Country']
        stats['countries'][country] = stats['countries'].get(country, 0) + 1
    
    # Fruit breakdown
    for f in deduped:
        for fruit in f['Fruit'].split(', '):
            if fruit:
                stats['fruits'][fruit] = stats['fruits'].get(fruit, 0) + 1
    
    with open('/Users/max/.openclaw/workspace/postharvest/top_1000_stats.json', 'w') as f:
        json.dump(stats, f, indent=2)
    
    print("\n" + "=" * 80)
    print("SUMMARY STATISTICS")
    print("=" * 80)
    print(f"Total facilities: {stats['total_facilities']}")
    print(f"Average score: {stats['avg_score']:.1f}")
    print(f"\nCountries ({len(stats['countries'])}):")
    for country, count in sorted(stats['countries'].items(), key=lambda x: x[1], reverse=True):
        print(f"  {country}: {count}")
    
    print(f"\nFruits:")
    for fruit, count in sorted(stats['fruits'].items(), key=lambda x: x[1], reverse=True):
        print(f"  {fruit}: {count}")
    
    print(f"\nScore distribution:")
    for range_name, count in stats['score_distribution'].items():
        print(f"  {range_name}: {count}")
    
    print("\n" + "=" * 80)
    print("FILES GENERATED:")
    print("  - top_1000_current.csv (ready for review)")
    print("  - top_1000_current.json (machine-readable)")
    print("  - top_1000_stats.json (statistics)")
    print("=" * 80)
    print(f"\n⚠ NEXT STEPS: Research {gap_analysis['gap']} more companies to reach 1,000")
    print("  Focus on: China, Europe (Poland, Italy, France, Spain), Turkey")
    print("=" * 80)

if __name__ == '__main__':
    main()
