from django.core.management.base import BaseCommand
from ml_service.tasks import schedule_catch_up_ml_patterns
import json


class Command(BaseCommand):
    help = 'Manually trigger ML patterns catch-up task. This will analyze patterns for all users who need it (users with stamps but missing or outdated patterns).'

    def add_arguments(self, parser):
        parser.add_argument(
            '--async',
            action='store_true',
            help='Run asynchronously via Celery (default: synchronous execution)',
        )
        parser.add_argument(
            '--verbose',
            action='store_true',
            help='Show detailed output',
        )

    def handle(self, *args, **options):
        is_async = options.get('async', False)
        verbose = options.get('verbose', False)

        if is_async:
            self.stdout.write(self.style.WARNING('Queuing ML catch-up task asynchronously...'))
            self.stdout.write('Note: Celery worker must be running for this to execute.')
            
            try:
                result = schedule_catch_up_ml_patterns.delay()
                self.stdout.write(
                    self.style.SUCCESS(f'✓ Task queued successfully!')
                )
                self.stdout.write(f'Task ID: {result.id}')
                self.stdout.write('')
                self.stdout.write('To check task status, use:')
                self.stdout.write(f'  from celery.result import AsyncResult')
                self.stdout.write(f'  result = AsyncResult("{result.id}")')
                self.stdout.write(f'  print(result.state)  # PENDING, SUCCESS, FAILURE, etc.')
                self.stdout.write(f'  print(result.get())  # Get result (blocks until complete)')
            except Exception as e:
                self.stdout.write(
                    self.style.ERROR(f'✗ Failed to queue task: {str(e)}')
                )
                self.stdout.write('Make sure Celery worker is running and Redis is accessible.')
                return
        else:
            self.stdout.write(self.style.WARNING('Running ML catch-up task synchronously...'))
            self.stdout.write('This may take a while depending on the number of users.')
            self.stdout.write('All user tasks will be executed immediately (not queued).')
            self.stdout.write('')
            
            try:
                # Pass execute_synchronously=True to process users immediately
                result = schedule_catch_up_ml_patterns(execute_synchronously=True)
                
                if verbose:
                    self.stdout.write('')
                    self.stdout.write(self.style.SUCCESS('Task completed successfully!'))
                    self.stdout.write('')
                    self.stdout.write('Result:')
                    self.stdout.write(json.dumps(result, indent=2, default=str))
                else:
                    status = result.get('status', 'unknown')
                    if status == 'success':
                        total_users = result.get('total_users_with_stamps', 0)
                        users_needing = result.get('users_needing_analysis', 0)
                        users_processed = result.get('users_processed', 0)
                        users_failed = result.get('users_failed', 0)
                        users_queued = result.get('users_queued', 0)  # For async mode
                        
                        self.stdout.write('')
                        self.stdout.write(self.style.SUCCESS('✓ Task completed successfully!'))
                        self.stdout.write('')
                        self.stdout.write(f'Total users with stamps: {total_users}')
                        self.stdout.write(f'Users needing analysis: {users_needing}')
                        
                        if users_processed is not None:
                            # Synchronous mode
                            self.stdout.write(f'Users processed: {users_processed}')
                            if users_failed > 0:
                                self.stdout.write(self.style.WARNING(f'Users failed: {users_failed}'))
                        else:
                            # Async mode
                            self.stdout.write(f'Users queued for analysis: {users_queued}')
                            if users_queued > 0:
                                self.stdout.write('')
                                self.stdout.write(self.style.WARNING(
                                    f'Note: {users_queued} user(s) have been queued for pattern analysis.'
                                ))
                                self.stdout.write('Make sure Celery worker is running to process these tasks.')
                    elif status == 'skipped':
                        self.stdout.write('')
                        self.stdout.write(self.style.WARNING('Task skipped: Another instance is already running.'))
                        self.stdout.write('This is normal if the scheduled task is currently executing.')
                    else:
                        self.stdout.write('')
                        self.stdout.write(self.style.ERROR(f'Task completed with status: {status}'))
                        if 'error' in result:
                            self.stdout.write(f'Error: {result.get("error")}')
                
            except Exception as e:
                self.stdout.write('')
                self.stdout.write(
                    self.style.ERROR(f'✗ Task failed with error: {str(e)}')
                )
                if verbose:
                    import traceback
                    self.stdout.write('')
                    self.stdout.write('Traceback:')
                    self.stdout.write(traceback.format_exc())
                return

