import pymysql
from telethon.sync import TelegramClient
from telethon.sessions import StringSession
from telethon.tl.functions.messages import GetHistoryRequest
from telethon.tl.types import MessageEntityTextUrl
from datetime import datetime, timedelta, timezone
import re
import time
import sys
import os

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

from API.jupAPI import JupAPI
from API.pumpAPI import PumpAPI
from utils.logger import *
from utils.config import *
from utils.ClickHouseManager import ClickHouseManager

# Telegram API
api_id = 26048456
api_hash = '27ae6b4c3d2d3f66d31c0e1022dbbec7'

# Вставь сюда заранее сгенерированную строку сессии
string_session = '1BJWap1wBuxlI-5OXiBKpzh30xDfIzFniBzgEi0_VElmK0Ajw1mTMQe5qIQXWSDUZBhrhvc9ClxGBcNbOMx44HcKRpMTMlQ2fGeb3k3ByjvX4YqU_Q1M5fwxdqtLSBImne8F-AOLaIuPn-egKgSfRysxsv5GbqSsJ0nN9v0ueRTNIKzeSHAwfkZq1QPAQZrv3Md7qNpyb78XR4SX0pZ-9UQwNadVGN_bFyERU3Mk1Zc3pQHCfLU1i6rMHjfXg1Svkkrr-yw8XwZBpb98G9szTrgCubZr5Tp3OVA2zEpJBXQq8ZA6TtyyiUOUFbww5kilS07-MfXcdWzedmU0PpCqUrnuu8BieXn8='
channel_username = 'pumpfun_letsbonk_migrations'

# Создаём клиент с готовой сессией
client = TelegramClient(StringSession(string_session), api_id, api_hash)

def extract_urls(msg):
    urls = []
    if msg.entities:
        for entity in msg.entities:
            if isinstance(entity, MessageEntityTextUrl):
                urls.append(entity.url)
    return urls

def extract_solscan_account(urls):
    for url in urls:
        match = re.search(r'https://solscan\.io/account/([A-Za-z0-9]{32,})', url)
        if match:
            return match.group(1)
    return None

def parse_message(msg):
    text = msg.message

    # Only process messages that start with "PUMPFUN MIGRATED"
    if not text or "PUMPFUN MIGRATED" not in text[:50]:
        return None

    lines = text.splitlines()

    # Extract mint address from the line with 🧬 Mint:
    token_hash = None
    creator_hash = None

    for line in lines:
        # Look for mint address (🧬 Mint: followed by the address)
        mint_match = re.search(r'Mint:\s*([1-9A-HJ-NP-Za-km-z]{32,})', line)
        if mint_match:
            token_hash = mint_match.group(1)

        # Look for dev address in text (fallback method)
        dev_match = re.search(r'Dev:.*?profile/([1-9A-HJ-NP-Za-km-z]{32,})\)', line)
        if dev_match:
            creator_hash = dev_match.group(1)

    # Extract creator hash from URL entities (Telegram links)
    urls = extract_urls(msg)
    if not creator_hash:
        for url in urls:
            # Look for cielo.finance profile links
            profile_match = re.search(r'cielo\.finance/profile/([1-9A-HJ-NP-Za-km-z]{32,})', url)
            if profile_match:
                creator_hash = profile_match.group(1)
                break

    # Use message timestamp as migration_started
    migration_started = msg.date
    if hasattr(migration_started, 'replace'):
        migration_started = migration_started.replace(tzinfo=None)

    if token_hash:
        return {
            "token_hash": token_hash,
            "migration_started": migration_started,
            "creator_hash": creator_hash
        }
    return None

async def fetch_messages(days):
    migrated = []  # Сюда будем собирать все разобранные сообщения

    start_date = (datetime.now() - timedelta(days)).replace(hour=0, minute=0, second=0, microsecond=0)
    end_date = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)

    await client.start()
    entity = await client.get_entity(channel_username)

    offset_id = 0
    limit = 100
    stop_parsing = False

    while not stop_parsing:
        history = await client(GetHistoryRequest(
            peer=entity,
            offset_id=offset_id,
            offset_date=None,
            add_offset=0,
            limit=limit,
            max_id=0,
            min_id=0,
            hash=0
        ))

        messages = history.messages
        if not messages:
            break

        for msg in messages:
            msg_time = msg.date
            if hasattr(msg_time, 'replace'):
                msg_time = msg_time.replace(tzinfo=None)
            elif isinstance(msg_time, str):
                msg_time = datetime.strptime(msg_time, '%Y-%m-%d %H:%M:%S')

            if msg_time < start_date:
                stop_parsing = True
                break
            if msg_time >= end_date:
                continue

            parsed = parse_message(msg)
            if parsed:
                migrated.append({
                    "token_hash": parsed['token_hash'],
                    "migration_started": parsed['migration_started'],
                    "creator_hash": parsed['creator_hash']
                })

        offset_id = messages[-1].id
        time.sleep(1)

    return migrated

def get_tokens_info(token_list):
    """
    Получает время миграций и создателей токенов.
    Комбинирует данные из Telegram сообщений и PumpAPI().get_pump_token_info()
    """
    migrated = []

    for i, token_data in enumerate(token_list):
        # Add delay between requests to avoid rate limiting
        if i > 0:
            time.sleep(0.1)  # 100ms delay between requests
            
        # Если token_data это словарь (из fetch_messages)
        if isinstance(token_data, dict):
            token_mint = token_data.get("token_hash")
        else:
            # Если это строка (token hash)
            token_mint = token_data
            token_data = {"token_hash": token_mint}
            
        if not token_mint:
            continue
            
        # Получаем информацию из pump.fun API
        pump_info = PumpAPI.get_pump_token_info(token_mint)
        if not pump_info:
            pump_logger.warning(f"No PumpAPI info for token {token_mint}")
            # Если нет данных из pump.fun, используем только данные из Telegram и пробуем JupAPI
            if token_data.get("migration_started"):
                migration_started = token_data["migration_started"]
                # If it's a string, try to parse it, otherwise assume it's already a datetime
                if isinstance(migration_started, str):
                    try:
                        migration_started = datetime.strptime(migration_started, '%m/%d/%Y %H:%M:%S')
                    except:
                        pump_logger.warning(f"Could not parse migration date for {token_mint}")
                        continue
                elif not isinstance(migration_started, datetime):
                    pump_logger.warning(f"Invalid migration_started type for {token_mint}")
                    continue
                    
                # Пробуем получить недостающие данные из JupAPI
                symbol = None
                icon = None
                creator_hash = token_data.get("creator_hash")
                
                pump_logger.info(f"Trying JupAPI for token {token_mint} (PumpAPI failed)...")
                jup_info = JupAPI().get_token_pool_info(token_mint)
                if jup_info:
                    symbol = jup_info.get("symbol")
                    icon = jup_info.get("icon")
                    creator_hash = creator_hash or jup_info.get("creator_hash")
                    pump_logger.info(f"Got data from JupAPI for {token_mint}")
                else:
                    pump_logger.warning(f"JupAPI also failed for {token_mint}")
                    
                migrated.append({
                    "token_hash": token_mint,
                    "migration_started": migration_started,
                    "creator_hash": creator_hash,
                    "symbol": symbol,
                    "icon": icon,
                    "name": None
                })
                pump_logger.info(f"Processed token {i+1}/{len(token_list)} from Telegram+JupAPI: {token_mint}")
            continue

        # Комбинируем данные из двух источников
        token_hash = pump_info.get("mint") or token_mint
        creator_hash = pump_info.get("creator") or token_data.get("creator_hash")
        
        # Парсим дату миграции из Telegram данных
        if token_data.get("migration_started"):
            migration_started = token_data["migration_started"]
            # If it's a string, try to parse it, otherwise assume it's already a datetime
            if isinstance(migration_started, str):
                try:
                    migration_started = datetime.strptime(migration_started, '%m/%d/%Y %H:%M:%S')
                except:
                    pump_logger.warning(f"Could not parse migration date for {token_mint}")
                    continue
            elif not isinstance(migration_started, datetime):
                pump_logger.warning(f"Invalid migration_started type for {token_mint}")
                continue
        else:
            pump_logger.warning(f"No migration_started for token {token_mint}")
            continue
        
        # Приоритет: данные из PumpAPI для метаданных токена
        symbol = pump_info.get("symbol")  # В pump API может не быть symbol
        icon = pump_info.get("image_uri")
        name = pump_info.get("name")
        
        # Если какие-то данные отсутствуют, пробуем получить их из JupAPI
        if not symbol or not icon or not creator_hash:
            if not symbol:
                pump_logger.info(f"Symbol missing for {token_mint}, trying JupAPI...")
            if not icon:
                pump_logger.info(f"Icon missing for {token_mint}, trying JupAPI...")
            if not creator_hash:
                pump_logger.info(f"creator_hash missing for {token_mint}, trying JupAPI...")
            jup_info = JupAPI().get_token_pool_info(token_mint)
            if jup_info:
                # Заполняем недостающие поля из JupAPI
                symbol = symbol or jup_info.get("symbol")
                icon = icon or jup_info.get("icon")
                creator_hash = creator_hash or jup_info.get("creator_hash")
                pump_logger.info(f"Enhanced data from JupAPI for {token_mint}")
            else:
                pump_logger.warning(f"JupAPI also returned no data for {token_mint}")

        migrated.append({
            "token_hash": token_hash,
            "migration_started": migration_started,
            "creator_hash": creator_hash,
            "symbol": symbol,
            "icon": icon,
            "name": name
        })
        
        pump_logger.info(f"Processed token {i+1}/{len(token_list)}: {name or symbol or token_mint}")

    return migrated

def insert_tokens_to_database(migrated_tokens, pool = 'PUMP'):
    """
    Insert migrated tokens to the ClickHouse tokens database
    """
    if not migrated_tokens:
        pump_logger.warning("No tokens to insert into database")
        return False
    
    try:
        # Initialize ClickHouse manager
        db_manager = ClickHouseManager()
        
        # Check which tokens already exist in the database
        token_addresses = [token.get('token_hash', '') for token in migrated_tokens]
        existing_tokens = set()

        if token_addresses:
            # Batch check tokens to avoid query size limit (ClickHouse has 262KB query size limit)
            batch_size = 100  # Check 100 tokens at a time
            try:
                for i in range(0, len(token_addresses), batch_size):
                    batch = token_addresses[i:i + batch_size]
                    placeholders = ','.join([f"'{addr}'" for addr in batch])
                    check_query = f"SELECT token_address FROM tokens_info.tokens WHERE token_address IN ({placeholders})"

                    result = db_manager.execute_query(check_query)
                    if result:
                        existing_tokens.update({row[0] for row in result})

                pump_logger.info(f"Found {len(existing_tokens)} existing tokens in database")
            except Exception as e:
                pump_logger.warning(f"Could not check for existing tokens: {e}")
        
        # Prepare data for insertion according to tokens table schema
        data_to_insert = []
        current_time = int(time.time())
        skipped_count = 0
        
        for token in migrated_tokens:
            token_address = token.get('token_hash', '')
            
            # Skip if token already exists
            if token_address in existing_tokens:
                pump_logger.info(f"Skipping existing token: {token_address}")
                skipped_count += 1
                continue
            
            creator = token.get('creator_hash', '')
    
            # Migration timestamp - convert from datetime to unix timestamp
            migration_time = token.get('migration_started')
            if isinstance(migration_time, datetime):
                migrated = int(migration_time.timestamp())
            else:
                migrated = current_time
            
            processed_date = current_time
            processed = 0  
            quality = 'unsorted'  
            shortName = token.get('symbol', '') 
            image = token.get('icon', '')  
            
            data_to_insert.append((
                token_address,
                creator,
                pool,
                migrated,
                processed_date,
                processed,
                quality,
                shortName,
                image
            ))
        
        if not data_to_insert:
            pump_logger.info(f"No new tokens to insert. Skipped {skipped_count} existing tokens")
            return True
        
        # Define columns according to the tokens table schema
        columns = [
            'token_address',
            'creator',
            'pool',
            'migrated',
            'processed_date',
            'processed',
            'quality',
            'shortName',
            'image'
        ]

        # Execute bulk insert in batches to avoid query size limit
        batch_size = 100
        total_success = True
        inserted_count = 0

        for i in range(0, len(data_to_insert), batch_size):
            batch = data_to_insert[i:i + batch_size]
            success = db_manager.execute_insert('tokens_info.tokens', columns, batch)

            if success:
                inserted_count += len(batch)
                pump_logger.debug(f"Inserted batch {i//batch_size + 1}: {len(batch)} tokens")
            else:
                pump_logger.error(f"Failed to insert batch {i//batch_size + 1}")
                total_success = False

        if total_success and inserted_count > 0:
            pump_logger.info(f"✅ Successfully inserted {inserted_count} new migrated tokens into database (skipped {skipped_count} existing)")
            return True
        elif inserted_count > 0:
            pump_logger.warning(f"⚠️ Partially inserted {inserted_count}/{len(data_to_insert)} tokens (skipped {skipped_count} existing)")
            return True
        else:
            pump_logger.error("❌ Failed to insert tokens into database")
            return False
            
    except Exception as e:
        pump_logger.error(f"❌ Error inserting tokens to database: {e}")
        return False

async def pump_migrated(days=2):
    """
    Получает pump токены из Telegram канала, проверяет время миграции и создателя, 
    получает дополнительную информацию из pump.fun API
    """
    async with client:
        pump_tokens = await fetch_messages(days)
        pump_logger.info(f"PumpTokens {len(pump_tokens)} tokens from Telegram")

    # Обрабатываем каждый токен и получаем дополнительную информацию
    migrated_tokens = get_tokens_info(pump_tokens)
    pump_logger.info(f"Successfully processed {len(migrated_tokens)} tokens with migration data")
    
    # Insert all migrated tokens into database
    pump_logger.info(f"Inserting {len(migrated_tokens)} tokens into database...")
    success = insert_tokens_to_database(migrated_tokens)
    
    if success:
        pump_logger.info("✅ All migrated tokens successfully added to database")
    else:
        pump_logger.error("❌ Failed to add some or all tokens to database")
    
    return migrated_tokens

if __name__ == "__main__":
    import asyncio
    asyncio.run(pump_migrated(days=7))  # Тестируем с 1 днем для ускорения