#!/usr/bin/env python3
import csv
import json
import re
from difflib import SequenceMatcher

# Load website mapping
with open('website_mapping.json', 'r') as f:
    website_mapping = json.load(f)

def similarity(a, b):
    return SequenceMatcher(None, a.lower(), b.lower()).ratio()

def fuzzy_match_company(company_name, existing_companies, threshold=0.85):
    """Check if company name matches any existing companies using fuzzy matching"""
    best_match = None
    best_score = 0
    
    for existing in existing_companies:
        score = similarity(company_name, existing)
        if score > best_score and score >= threshold:
            best_score = score
            best_match = existing
    
    return best_match, best_score

def normalize_company_name(name):
    """Normalize company name for better matching"""
    if not name:
        return ""
    # Remove common suffixes and clean up
    name = re.sub(r'\s+(Inc\.?|LLC\.?|Ltd\.?|Company|Corp\.?|Corporation|Co\.?)(\s|$)', '', name, flags=re.IGNORECASE)
    name = re.sub(r'\s+', ' ', name).strip()
    return name

def get_website_for_company(company_name, original_website):
    """Get the correct website for a company, checking our mapping first"""
    if original_website and original_website != "https://waapple.org/suppliers/":
        return original_website
    
    # Check our mapping
    for mapped_name, website in website_mapping.items():
        if similarity(company_name, mapped_name) > 0.8:
            return website
    
    # If waapple.org, we'll need to search for it
    return original_website

def process_csv_files():
    # Final standardized columns
    final_columns = [
        "Company Name", "Website", "Country", "State/Region", "Primary Fruit", 
        "CA Storage Confirmed", "Employee Count", "Est. Revenue ($M)", 
        "Contact Name", "Contact Title", "Contact Email", "Contact Phone", 
        "Notes", "Source", "Qualified"
    ]
    
    companies = {}  # Company name -> company data
    company_names = []  # For fuzzy matching
    
    # Process MASTER-LIST-MERGED.csv
    print("Processing MASTER-LIST-MERGED.csv...")
    with open('MASTER-LIST-MERGED.csv', 'r', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            company_name = row.get('Company Name', '').strip()
            if not company_name:
                continue
                
            # Check for duplicates
            match, score = fuzzy_match_company(company_name, company_names)
            if match:
                print(f"Found duplicate: {company_name} matches {match} (score: {score:.2f})")
                continue
                
            # Extract data for final format
            company_data = {
                "Company Name": company_name,
                "Website": get_website_for_company(company_name, row.get('Website', '')),
                "Country": row.get('Country', ''),
                "State/Region": row.get('State/Region', ''),
                "Primary Fruit": row.get('Primary Fruit', ''),
                "CA Storage Confirmed": row.get('CA Storage Confirmed', ''),
                "Employee Count": "",  # Not in this file
                "Est. Revenue ($M)": row.get('Est. Revenue ($M)', ''),
                "Contact Name": row.get('Contact 1 Name', ''),
                "Contact Title": row.get('Contact 1 Title', ''),
                "Contact Email": row.get('Contact 1 Email', ''),
                "Contact Phone": row.get('Contact 1 Phone', ''),
                "Notes": row.get('Notes', ''),
                "Source": row.get('Source', ''),
                "Qualified": row.get('Qualified', '')
            }
            
            companies[company_name] = company_data
            company_names.append(company_name)
    
    # Process NEW-PROSPECTS-2026-02-07.csv
    print("Processing NEW-PROSPECTS-2026-02-07.csv...")
    with open('NEW-PROSPECTS-2026-02-07.csv', 'r', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            company_name = row.get('Company Name', '').strip()
            if not company_name:
                continue
                
            # Check for duplicates
            match, score = fuzzy_match_company(company_name, company_names)
            if match:
                print(f"Found duplicate: {company_name} matches {match} (score: {score:.2f})")
                continue
                
            company_data = {
                "Company Name": company_name,
                "Website": get_website_for_company(company_name, row.get('Website', '')),
                "Country": row.get('Country', ''),
                "State/Region": row.get('State/Region', ''),
                "Primary Fruit": row.get('Primary Fruit', ''),
                "CA Storage Confirmed": row.get('CA Storage Confirmed', ''),
                "Employee Count": "",
                "Est. Revenue ($M)": row.get('Est. Revenue ($M)', ''),
                "Contact Name": row.get('Contact 1 Name', ''),
                "Contact Title": row.get('Contact 1 Title', ''),
                "Contact Email": row.get('Contact 1 Email', ''),
                "Contact Phone": row.get('Contact 1 Phone', ''),
                "Notes": row.get('Notes', ''),
                "Source": row.get('Source', ''),
                "Qualified": row.get('Qualified', '')
            }
            
            companies[company_name] = company_data
            company_names.append(company_name)
    
    # Process RESEARCH-PROSPECTS-2026-02-07.csv
    print("Processing RESEARCH-PROSPECTS-2026-02-07.csv...")
    with open('RESEARCH-PROSPECTS-2026-02-07.csv', 'r', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            company_name = row.get('Company Name', '').strip()
            if not company_name:
                continue
                
            # Check for duplicates
            match, score = fuzzy_match_company(company_name, company_names)
            if match:
                print(f"Found duplicate: {company_name} matches {match} (score: {score:.2f})")
                continue
                
            company_data = {
                "Company Name": company_name,
                "Website": get_website_for_company(company_name, row.get('Website', '')),
                "Country": row.get('Country', ''),
                "State/Region": row.get('State/Region', ''),
                "Primary Fruit": row.get('Primary Fruit', ''),
                "CA Storage Confirmed": row.get('CA Storage Confirmed', ''),
                "Employee Count": "",
                "Est. Revenue ($M)": row.get('Est. Revenue ($M)', ''),
                "Contact Name": row.get('Contact 1 Name', ''),
                "Contact Title": row.get('Contact 1 Title', ''),
                "Contact Email": row.get('Contact 1 Email', ''),
                "Contact Phone": row.get('Contact 1 Phone', ''),
                "Notes": row.get('Notes', ''),
                "Source": row.get('Source', ''),
                "Qualified": row.get('Qualified', '')
            }
            
            companies[company_name] = company_data
            company_names.append(company_name)
    
    # Sort by Country, State, Company Name
    sorted_companies = sorted(companies.values(), key=lambda x: (
        x.get('Country', ''),
        x.get('State/Region', ''),
        x.get('Company Name', '')
    ))
    
    # Write final CSV
    print(f"Writing {len(sorted_companies)} companies to MASTER-LIST-FINAL.csv...")
    with open('MASTER-LIST-FINAL.csv', 'w', newline='', encoding='utf-8') as f:
        writer = csv.DictWriter(f, fieldnames=final_columns)
        writer.writeheader()
        writer.writerows(sorted_companies)
    
    print(f"Merge complete! {len(sorted_companies)} companies in final list.")
    
    # Report companies with waapple.org that still need website lookup
    waapple_companies = [c for c in sorted_companies if c.get('Website') == 'https://waapple.org/suppliers/']
    if waapple_companies:
        print(f"\nCompanies still needing website lookup ({len(waapple_companies)}):")
        for company in waapple_companies:
            print(f"  - {company['Company Name']}")

if __name__ == "__main__":
    process_csv_files()