File size: 33,882 Bytes
467f05c
 
 
 
23c5657
467f05c
0349da5
467f05c
 
 
 
 
 
 
1faea13
 
467f05c
 
 
 
 
3e18b42
467f05c
23c5657
 
 
467f05c
0349da5
 
 
90cba36
 
 
1faea13
 
 
3e18b42
 
 
467f05c
 
 
 
d1150e7
 
 
 
 
0349da5
 
 
 
 
 
 
 
 
 
 
d1150e7
0349da5
d1150e7
 
 
 
0349da5
 
 
 
 
 
 
 
 
 
 
 
 
d1150e7
0349da5
 
 
 
 
 
 
d1150e7
 
0349da5
d1150e7
 
 
0349da5
 
d1150e7
0349da5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d1150e7
0349da5
b571ee2
0349da5
 
 
b571ee2
467f05c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3e18b42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467f05c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1faea13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467f05c
1faea13
467f05c
3e18b42
 
 
 
 
467f05c
 
 
 
3e18b42
 
 
 
 
 
 
 
467f05c
 
 
 
 
 
 
 
 
1faea13
 
 
 
467f05c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d1150e7
 
 
 
 
 
 
 
 
 
 
467f05c
3e18b42
467f05c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3e18b42
 
 
 
 
467f05c
d1150e7
0349da5
 
 
 
 
 
 
 
d1150e7
0349da5
 
d1150e7
 
 
 
 
467f05c
 
 
 
 
 
 
 
 
 
1faea13
 
 
b571ee2
467f05c
b571ee2
d1150e7
0349da5
 
 
 
 
d1150e7
 
 
 
 
467f05c
23c5657
 
 
 
0349da5
 
 
 
 
 
23c5657
0349da5
 
 
 
 
 
 
b571ee2
23c5657
b571ee2
 
23c5657
b571ee2
 
 
 
 
 
 
 
 
23c5657
b571ee2
 
23c5657
b571ee2
 
 
 
 
d1150e7
 
 
 
b571ee2
 
 
 
 
 
 
 
 
 
467f05c
d1150e7
 
467f05c
d1150e7
467f05c
 
 
90cba36
 
 
 
 
 
 
 
 
 
 
467f05c
31e9844
467f05c
 
 
90cba36
 
467f05c
 
 
 
 
 
 
90cba36
 
 
 
 
 
 
 
 
d1150e7
0349da5
d1150e7
 
 
 
 
 
 
 
23c5657
 
 
 
d1150e7
 
 
23c5657
467f05c
d1150e7
 
 
 
0349da5
 
d1150e7
23c5657
 
 
90cba36
 
0349da5
90cba36
7e2aaf9
90cba36
 
 
 
0349da5
 
 
 
 
 
 
 
 
1faea13
0349da5
 
467f05c
 
 
 
 
 
 
 
d1150e7
467f05c
0349da5
467f05c
 
 
 
 
0349da5
 
467f05c
 
 
 
23c5657
0349da5
467f05c
 
 
 
0349da5
23c5657
 
467f05c
 
 
 
 
 
 
 
 
 
 
 
1faea13
 
0cd991c
467f05c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3e18b42
 
 
 
 
 
 
 
 
 
467f05c
 
 
 
 
 
 
 
 
 
3e18b42
467f05c
 
 
3e18b42
 
 
 
 
 
d1150e7
 
467f05c
 
 
d1150e7
 
 
 
 
467f05c
3e18b42
 
 
 
 
 
 
 
467f05c
3e18b42
 
 
467f05c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
#!/usr/bin/env python

"""

Simplified fine-tuning script for DeepSeek-R1-Distill-Qwen-14B-unsloth-bnb-4bit

- Optimized for A100 GPU with pre-tokenized datasets

- Research training only (no inference)

- CLOUD BASED TRAINING - Hugging Face Spaces

"""

import os
import logging
import json
import torch
import argparse
import shutil
from pathlib import Path
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer, AutoConfig, BitsAndBytesConfig
from transformers.data.data_collator import DataCollatorMixin
from peft import LoraConfig, get_peft_model
from dotenv import load_dotenv
from huggingface_hub import HfApi, upload_folder

# Basic environment setup for A100
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True,max_split_size_mb:512"
os.environ["NCCL_P2P_DISABLE"] = "1"  # Can help with A100 multi-GPU setups

# Force GPU mode in Space if we're using a pre-quantized model
os.environ["FORCE_GPU"] = "1"

# Disable tokenizers parallelism warning
os.environ["TOKENIZERS_PARALLELISM"] = "false"

# Create triton directory to avoid warning
os.makedirs(os.path.expanduser("~/.triton/autotune"), exist_ok=True)

# Default dataset with proper namespace
DEFAULT_DATASET = "George-API/phi4-cognitive-dataset"

# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# Determine if we're running in HF Space
def is_running_in_space():
    """Check if we're running in a Hugging Face Space"""
    return os.environ.get("SPACE_ID") is not None

# Check if a model is pre-quantized (4-bit or 8-bit)
def is_model_pre_quantized(model_name):
    """Check if model is already pre-quantized based on name"""
    pre_quantized_keywords = ["bnb-4bit", "4bit", "8bit", "quantized", "unsloth"]
    return any(keyword in model_name.lower() for keyword in pre_quantized_keywords)

# Check if GPU is available
def is_gpu_available():
    """Simple check if CUDA is available according to PyTorch"""
    return torch.cuda.is_available()

# Check if fully compatible CUDA is available for training
def is_cuda_fully_available(model_name):
    """

    Check if CUDA is fully available for training with bitsandbytes.

    More strict than torch.cuda.is_available() - requires full GPU compatibility.

    """
    # If model is pre-quantized and we're in a Space with GPU selected, trust it
    if is_running_in_space() and is_model_pre_quantized(model_name) and is_gpu_available():
        logger.info("Pre-quantized model detected with GPU in Hugging Face Space - using GPU mode")
        return True
    
    # For non-Space environments, or non-pre-quantized models, do detailed checks
    
    # If FORCE_GPU is set, trust that
    if os.environ.get("FORCE_GPU") == "1":
        logger.info("GPU mode forced by environment variable")
        return True
    
    # If running in Space and FORCE_GPU not explicitly set, be cautious
    if is_running_in_space() and os.environ.get("FORCE_GPU") != "1":
        # Check if CUDA is actually available
        if is_gpu_available():
            logger.info("GPU detected in Hugging Face Space")
            return True
        else:
            logger.warning("No GPU detected in Hugging Face Space despite hardware selection")
            return False
    
    # If CUDA is not available according to PyTorch, we definitely can't use it
    if not is_gpu_available():
        logger.warning("CUDA not available according to PyTorch")
        return False
    
    # Only test bitsandbytes if necessary (not for pre-quantized models)
    if not is_model_pre_quantized(model_name):
        try:
            import bitsandbytes as bnb
            logger.info("BitsAndBytes package is installed")
            
            # Try to create a dummy 4-bit computation to verify compatibility
            try:
                dummy = torch.zeros(1, device="cuda")
                a = bnb.nn.Linear4bit(1, 1)
                a.to(device="cuda")
                result = a(dummy)
                logger.info("BitsAndBytes with CUDA is working correctly")
                return True
            except Exception as e:
                logger.warning(f"BitsAndBytes CUDA compatibility test failed: {str(e)}")
                return False
                
        except ImportError:
            logger.warning("BitsAndBytes package not installed - cannot use 4-bit quantization")
            return False
        except Exception as e:
            logger.warning(f"Unexpected error checking BitsAndBytes: {str(e)}")
            return False
    
    # For pre-quantized models without bitsandbytes test
    return is_gpu_available()

# Create a marker file to indicate training is active
def create_training_marker(output_dir):
    os.makedirs(output_dir, exist_ok=True)
    with open("TRAINING_ACTIVE", "w") as f:
        f.write(f"Training active in {output_dir}")
    
    with open(os.path.join(output_dir, "RESEARCH_TRAINING_ONLY"), "w") as f:
        f.write("This model is for research training only. No interactive outputs.")

# Remove the training marker file
def remove_training_marker():
    if os.path.exists("TRAINING_ACTIVE"):
        os.remove("TRAINING_ACTIVE")
        logger.info("Removed training active marker")

# Function to upload model to Hugging Face Hub
def upload_to_huggingface(output_dir, repo_name=None, private=False):
    """

    Upload the trained model to Hugging Face Hub

    

    Args:

        output_dir: Directory containing the model files

        repo_name: Name of the repository on HF Hub (default: derived from output_dir)

        private: Whether the repository should be private (default: False)

    

    Returns:

        str: URL of the uploaded model on HF Hub

    """
    logger.info(f"Uploading model from {output_dir} to Hugging Face Hub")
    
    # Get HF token from environment
    token = os.environ.get("HF_TOKEN")
    if not token:
        logger.error("HF_TOKEN environment variable not set. Please set it to upload to Hugging Face Hub.")
        logger.error("You can get a token from https://huggingface.co/settings/tokens")
        raise ValueError("HF_TOKEN not set")
    
    # Get or create repo name
    if not repo_name:
        # Use the output directory name as the repository name
        repo_name = os.path.basename(os.path.normpath(output_dir))
        logger.info(f"Using repository name: {repo_name}")
    
    # Get HF username
    api = HfApi(token=token)
    user_info = api.whoami()
    username = user_info["name"]
    
    # Create full repository name
    full_repo_name = f"{username}/{repo_name}"
    logger.info(f"Creating repository: {full_repo_name}")
    
    # Create repository if it doesn't exist
    api.create_repo(
        repo_id=full_repo_name,
        exist_ok=True,
        private=private
    )
    
    # Upload model files
    logger.info(f"Uploading files from {output_dir} to {full_repo_name}")
    api.upload_folder(
        folder_path=output_dir,
        repo_id=full_repo_name,
        commit_message="Upload model files"
    )
    
    # Create model card
    model_card = f"""

# {repo_name}



This model was fine-tuned using the script at https://github.com/George-API/phi4-cognitive-dataset.



## Model details

- Base model: DeepSeek-R1-Distill-Qwen-14B-unsloth-bnb-4bit

- Dataset: {DEFAULT_DATASET}

- Training: Research only

    """
    
    with open(os.path.join(output_dir, "README.md"), "w") as f:
        f.write(model_card)
    
    # Upload the model card
    api.upload_file(
        path_or_fileobj=os.path.join(output_dir, "README.md"),
        path_in_repo="README.md",
        repo_id=full_repo_name,
        commit_message="Add model card"
    )
    
    logger.info(f"Model successfully uploaded to https://huggingface.co/{full_repo_name}")
    return f"https://huggingface.co/{full_repo_name}"

# Custom data collator for pre-tokenized data
class PreTokenizedCollator(DataCollatorMixin):
    def __init__(self, pad_token_id=0, tokenizer=None):
        self.pad_token_id = pad_token_id
        self.tokenizer = tokenizer  # Keep reference to tokenizer for fallback
        
    def __call__(self, features):
        # Extract features properly from the batch
        processed_features = []
        for feature in features:
            # If input_ids is directly available, use it
            if 'input_ids' in feature and isinstance(feature['input_ids'], list):
                processed_features.append(feature)
                continue
                
            # If input_ids is not available, try to extract from conversations
            if 'input_ids' not in feature and 'conversations' in feature:
                conversations = feature['conversations']
                
                if isinstance(conversations, list) and len(conversations) > 0:
                    # Case 1: If conversations has 'input_ids' field (pre-tokenized)
                    if isinstance(conversations[0], dict) and 'input_ids' in conversations[0]:
                        feature['input_ids'] = conversations[0]['input_ids']
                    
                    # Case 2: If conversations itself contains input_ids
                    elif all(isinstance(x, int) for x in conversations):
                        feature['input_ids'] = conversations
                    
                    # Case 3: If conversations has 'content' field
                    elif isinstance(conversations[0], dict) and 'content' in conversations[0]:
                        content = conversations[0]['content']
                        
                        # If content is already tokens, use directly
                        if isinstance(content, list) and all(isinstance(x, int) for x in content):
                            feature['input_ids'] = content
                        # If content is a string and we have tokenizer, tokenize as fallback
                        elif isinstance(content, str) and self.tokenizer:
                            logger.warning("Tokenizing string content as fallback")
                            feature['input_ids'] = self.tokenizer.encode(content, add_special_tokens=False)
            
            # Ensure input_ids is present and is a list of integers
            if 'input_ids' in feature:
                if isinstance(feature['input_ids'], str) and self.tokenizer:
                    feature['input_ids'] = self.tokenizer.encode(feature['input_ids'], add_special_tokens=False)
                elif not isinstance(feature['input_ids'], list):
                    try:
                        feature['input_ids'] = list(feature['input_ids'])
                    except Exception as e:
                        logger.error(f"Could not convert input_ids to list: {e}")
                        continue
                
                processed_features.append(feature)
        
        if len(processed_features) == 0:
            raise ValueError("No valid examples found. Check dataset structure.")
            
        # Determine max length in this batch
        batch_max_len = max(len(x["input_ids"]) for x in processed_features)
        
        # Initialize batch tensors
        batch = {
            "input_ids": torch.ones((len(processed_features), batch_max_len), dtype=torch.long) * self.pad_token_id,
            "attention_mask": torch.zeros((len(processed_features), batch_max_len), dtype=torch.long),
            "labels": torch.ones((len(processed_features), batch_max_len), dtype=torch.long) * -100  # -100 is ignored in loss
        }
        
        # Fill batch tensors
        for i, feature in enumerate(processed_features):
            input_ids = feature["input_ids"]
            seq_len = len(input_ids)
            
            # Convert to tensor if it's a list
            if isinstance(input_ids, list):
                input_ids = torch.tensor(input_ids, dtype=torch.long)
                
            # Copy data to batch tensors
            batch["input_ids"][i, :seq_len] = input_ids
            batch["attention_mask"][i, :seq_len] = 1
            
            # If there are labels, use them, otherwise use input_ids
            if "labels" in feature:
                labels = feature["labels"]
                if isinstance(labels, list):
                    labels = torch.tensor(labels, dtype=torch.long)
                batch["labels"][i, :len(labels)] = labels
            else:
                batch["labels"][i, :seq_len] = input_ids
        
        return batch

# Preprocess dataset to ensure all entries are pre-tokenized
def preprocess_dataset(dataset, tokenizer):
    """Ensure dataset is fully pre-tokenized to avoid tokenization during training"""
    logger.info("Pre-processing dataset to ensure all entries are tokenized")
    
    def process_example(example):
        # If already has input_ids as list of integers, keep as is
        if 'input_ids' in example and isinstance(example['input_ids'], list) and all(isinstance(x, int) for x in example['input_ids']):
            return example
            
        # If has conversations with content field
        if 'conversations' in example:
            conversations = example['conversations']
            if isinstance(conversations, list) and len(conversations) > 0:
                # If conversations has content field, tokenize it
                if isinstance(conversations[0], dict) and 'content' in conversations[0]:
                    content = conversations[0]['content']
                    if isinstance(content, str):
                        example['input_ids'] = tokenizer.encode(content, add_special_tokens=False)
                        return example
                
        # For any other format, try to extract text and tokenize
        text = None
        if 'text' in example:
            text = example['text']
        elif 'content' in example:
            text = example['content']
            
        if text and isinstance(text, str):
            example['input_ids'] = tokenizer.encode(text, add_special_tokens=False)
            
        return example
    
    return dataset.map(process_example)

# Load and prepare dataset with proper sorting
def load_and_prepare_dataset(dataset_name, config, tokenizer=None):
    """Load and prepare the dataset for fine-tuning with proper sorting"""
    # Use the default dataset if the provided one matches the default name without namespace
    if dataset_name == "phi4-cognitive-dataset":
        dataset_name = DEFAULT_DATASET
        logger.info(f"Using full dataset path: {dataset_name}")
    
    logger.info(f"Loading dataset: {dataset_name}")
    
    try:
        # Load dataset
        try:
            dataset = load_dataset(dataset_name)
        except Exception as e:
            if "doesn't exist on the Hub or cannot be accessed" in str(e):
                logger.error(f"Dataset '{dataset_name}' not found. Make sure it exists and is accessible.")
                logger.error(f"If using a private dataset, check your HF_TOKEN is set in your environment.")
                logger.error(f"If missing namespace, try using the full path: 'George-API/phi4-cognitive-dataset'")
            raise
        
        # Extract the split we want to use (usually 'train')
        if 'train' in dataset:
            dataset = dataset['train']
        
        # Get the dataset config
        dataset_config = config.get("dataset_config", {})
        sort_field = dataset_config.get("sort_by_field", "prompt_number")
        
        # Preprocess dataset to ensure all entries are pre-tokenized
        if tokenizer is not None:
            dataset = preprocess_dataset(dataset, tokenizer)
        
        # Sort in ascending order by specified field
        logger.info(f"Sorting dataset by {sort_field} in ascending order")
        dataset = dataset.sort(sort_field)
        
        # Print dataset info
        logger.info(f"Dataset loaded with {len(dataset)} entries")
        logger.info(f"Dataset columns: {dataset.column_names}")
        
        # Print sample for debugging
        if len(dataset) > 0:
            logger.info(f"Sample entry structure: {list(dataset[0].keys())}")
        
        return dataset
    
    except Exception as e:
        logger.error(f"Error loading dataset: {str(e)}")
        raise

# Load a simpler, smaller model for CPU mode
def get_small_model_name(original_model_name):
    """Get a smaller model name for CPU mode"""
    # If using DeepSeek-R1-Distill-Qwen-14B, use a smaller model
    if "DeepSeek" in original_model_name and "14B" in original_model_name:
        logger.info("Using smaller model for CPU mode")
        return "distilgpt2"  # Much smaller model
    
    # Otherwise just use the original model
    return original_model_name

# Main training function
def train(config_path, dataset_name, output_dir, upload_to_hub=False, hub_repo_name=None, private_repo=False):
    # Load environment variables
    load_dotenv()
    
    # Load config
    with open(config_path, 'r') as f:
        config = json.load(f)
    
    # Create training marker
    create_training_marker(output_dir)
    
    try:
        # Extract configs
        model_config = config.get("model_config", {})
        training_config = config.get("training_config", {})
        hardware_config = config.get("hardware_config", {})
        lora_config = config.get("lora_config", {})
        dataset_config = config.get("dataset_config", {})
        
        # Log dataset info before loading
        logger.info(f"Will load dataset: {dataset_name}")
        if dataset_name != DEFAULT_DATASET and "phi4-cognitive-dataset" in dataset_name:
            logger.warning(f"Dataset name may need namespace prefix. Current: {dataset_name}")
        
        # Load model settings
        original_model_name = model_config.get("model_name_or_path")
            
        # Special handling for pre-quantized models like unsloth models
        is_pre_quantized = is_model_pre_quantized(original_model_name)
        if is_pre_quantized:
            logger.info(f"Detected pre-quantized model: {original_model_name}")
            
        # Determine if we can use CUDA with bitsandbytes
        can_use_4bit = is_cuda_fully_available(original_model_name)
        
        # For CPU mode, use a smaller model (unless pre-quantized)
        if not can_use_4bit and is_running_in_space() and not is_pre_quantized:
            model_name = get_small_model_name(original_model_name)
            logger.warning(f"Using smaller model {model_name} in CPU mode for Hugging Face Space")
        else:
            model_name = original_model_name
            
        logger.info(f"Using model: {model_name}")
        
        # Initialize tokenizer
        logger.info("Loading tokenizer")
        tokenizer = AutoTokenizer.from_pretrained(
            model_name, 
            trust_remote_code=True
        )
        tokenizer.pad_token = tokenizer.eos_token
        
        # Load and prepare dataset with proper sorting
        dataset = load_and_prepare_dataset(dataset_name, config, tokenizer)
        
        # Get quantization config
        quant_config = config.get("quantization_config", {})
        
        # Determine if we should use 4-bit quantization
        # Pre-quantized models always use their built-in quantization
        if is_pre_quantized:
            use_4bit = True
            logger.info("Using pre-quantized model with built-in quantization")
        elif can_use_4bit and quant_config.get("load_in_4bit", True):
            use_4bit = True
            logger.info("Using 4-bit quantization with CUDA")
        else:
            use_4bit = False
            logger.warning("Using CPU mode without quantization")
        
        # Determine compute dtype based on hardware config
        compute_dtype = torch.bfloat16 if hardware_config.get("bf16", False) else torch.float16
        logger.info(f"Using compute dtype: {compute_dtype}")
        
        # For pre-quantized models, always use device_map="auto"
        if is_pre_quantized and is_gpu_available():
            logger.info("Loading pre-quantized model with GPU support")
            model = AutoModelForCausalLM.from_pretrained(
                model_name,
                device_map="auto",
                torch_dtype=compute_dtype,
                trust_remote_code=True,
                use_cache=model_config.get("use_cache", False)
            )
        # Create model with proper configuration for non-pre-quantized models
        elif use_4bit and not is_pre_quantized:
            logger.info(f"Loading model with 4-bit quantization")
            
            # Create quantization config for GPU
            bnb_compute_dtype = torch.bfloat16 if quant_config.get("bnb_4bit_compute_dtype", "float16") == "bfloat16" else torch.float16
            bnb_config = BitsAndBytesConfig(
                load_in_4bit=True,
                bnb_4bit_compute_dtype=bnb_compute_dtype,
                bnb_4bit_quant_type=quant_config.get("bnb_4bit_quant_type", "nf4"),
                bnb_4bit_use_double_quant=quant_config.get("bnb_4bit_use_double_quant", True)
            )
            
            # Load 4-bit quantized model for GPU
            model = AutoModelForCausalLM.from_pretrained(
                model_name,
                quantization_config=bnb_config,
                device_map="auto",
                torch_dtype=compute_dtype,
                trust_remote_code=True,
                use_cache=model_config.get("use_cache", False),
                attn_implementation=hardware_config.get("attn_implementation", "flash_attention_2")
            )
        else:
            # CPU fallback (or non-quantized GPU) mode
            logger.warning("Loading model in CPU fallback mode (no 4-bit quantization)")
            
            # Force CPU (safest option in HF Spaces)
            device_map = "cpu"
            dtype = torch.float32
            logger.info("Forcing CPU mode for stability")
            
            # Load model without quantization
            model = AutoModelForCausalLM.from_pretrained(
                model_name,
                device_map=device_map,
                torch_dtype=dtype,
                trust_remote_code=True,
                use_cache=model_config.get("use_cache", False),
                low_cpu_mem_usage=True
            )
        
        # Apply rope scaling if configured and available
        if "rope_scaling" in model_config and hasattr(model.config, "rope_scaling"):
            logger.info(f"Applying rope scaling: {model_config['rope_scaling']}")
            model.config.rope_scaling = model_config["rope_scaling"]
        
        # Create LoRA config
        logger.info("Creating LoRA configuration")
        
        # For pre-quantized models, we need proper target modules
        default_target_modules = ["q_proj", "k_proj", "v_proj", "o_proj"]
        
        # For pre-quantized models, especially Unsloth ones, we need to be careful with the target modules
        if is_pre_quantized:
            # For Unsloth models, use special configuration
            if "unsloth" in model_name.lower():
                default_target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
                logger.info("Using Unsloth-specific LoRA target modules")
        
        lora_config_obj = LoraConfig(
            r=lora_config.get("r", 8),
            lora_alpha=lora_config.get("lora_alpha", 32),
            lora_dropout=lora_config.get("lora_dropout", 0.05),
            bias=lora_config.get("bias", "none"),
            task_type="CAUSAL_LM",  # Explicitly set the task type
            target_modules=lora_config.get("target_modules", default_target_modules)
        )
        
        # Apply LoRA to model
        logger.info("Applying LoRA to model")
        model = get_peft_model(model, lora_config_obj)
        logger.info("Successfully applied LoRA")
        
        # Ensure model parameters that need gradients are properly set
        if is_pre_quantized:
            logger.info("Verifying gradient settings for pre-quantized model")
            for name, param in model.named_parameters():
                if 'lora' in name:  # Only LoRA parameters should be trained
                    if not param.requires_grad:
                        logger.warning(f"LoRA parameter {name} doesn't have requires_grad=True, fixing...")
                        param.requires_grad = True
        
        # Always use minimal batch size for HF Space CPU
        if is_running_in_space() and not can_use_4bit and not is_pre_quantized:
            per_device_train_batch_size = 1
            logger.warning("Using minimal batch size for CPU training in Hugging Face Space")
        else:
            # Determine batch size based on available hardware
            if torch.cuda.is_available():
                gpu_info = torch.cuda.get_device_properties(0)
                logger.info(f"GPU: {gpu_info.name}, VRAM: {gpu_info.total_memory / 1e9:.2f} GB")
                
                # Check if it's an A100 or high-memory GPU
                if "A100" in gpu_info.name or "A10G" in gpu_info.name or gpu_info.total_memory > 40e9:
                    logger.info("Detected A100 GPU - optimizing for A100")
                    per_device_train_batch_size = training_config.get("per_device_train_batch_size", 3)
                else:
                    # Use a smaller batch size for other GPUs
                    per_device_train_batch_size = 2
                    logger.info(f"Using conservative batch size for non-A100 GPU: {per_device_train_batch_size}")
            else:
                # Use minimal batch size for CPU
                per_device_train_batch_size = 1
                logger.warning("No GPU detected - using minimal batch size for CPU training")
        
        # Use full training parameters for pre-quantized models or GPU mode
        if is_pre_quantized or can_use_4bit or not is_running_in_space():
            num_train_epochs = training_config.get("num_train_epochs", 3)
            gradient_accumulation_steps = training_config.get("gradient_accumulation_steps", 2)
            fp16 = torch.cuda.is_available() and hardware_config.get("fp16", False)
            bf16 = torch.cuda.is_available() and hardware_config.get("bf16", True)
            # Disable gradient checkpointing for pre-quantized models as it can cause gradient issues
            gradient_checkpointing = torch.cuda.is_available() and hardware_config.get("gradient_checkpointing", True) and not is_pre_quantized
            dataloader_workers = training_config.get("dataloader_num_workers", 4)
            eval_strategy = training_config.get("eval_strategy", "no")
            load_best_model_at_end = False  # Must be False when eval_strategy is "no"
            
            if is_pre_quantized:
                logger.info("Disabled gradient checkpointing for pre-quantized model to avoid gradient issues")
            
            logger.info("Using full training parameters for GPU mode")
        else:
            # For Space CPU training mode, use minimal parameters
            num_train_epochs = 1
            gradient_accumulation_steps = 1
            fp16 = False
            bf16 = False
            gradient_checkpointing = False
            dataloader_workers = 0
            eval_strategy = "no"
            load_best_model_at_end = False
            logger.warning("Using minimal parameters for CPU training in Space")
        
        # Configure reporting backends
        reports = training_config.get("report_to", ["tensorboard"])
        
        # Create training arguments
        logger.info("Creating training arguments")
        training_args = TrainingArguments(
            output_dir=output_dir,
            num_train_epochs=num_train_epochs,
            per_device_train_batch_size=per_device_train_batch_size,
            gradient_accumulation_steps=gradient_accumulation_steps,
            learning_rate=training_config.get("learning_rate", 2e-5),
            lr_scheduler_type=training_config.get("lr_scheduler_type", "cosine"),
            warmup_ratio=training_config.get("warmup_ratio", 0.03),
            weight_decay=training_config.get("weight_decay", 0.01),
            optim=training_config.get("optim", "adamw_torch"),
            fp16=fp16,
            bf16=bf16,
            max_grad_norm=training_config.get("max_grad_norm", 0.3),
            logging_steps=training_config.get("logging_steps", 10),
            save_steps=training_config.get("save_steps", 200),
            save_total_limit=training_config.get("save_total_limit", 3),
            eval_strategy=eval_strategy,
            load_best_model_at_end=load_best_model_at_end,
            report_to=reports,
            logging_first_step=training_config.get("logging_first_step", True),
            disable_tqdm=training_config.get("disable_tqdm", False),
            remove_unused_columns=False,
            gradient_checkpointing=gradient_checkpointing,
            dataloader_num_workers=dataloader_workers,
            group_by_length=training_config.get("group_by_length", True)
        )
        
        # Create trainer with pre-tokenized collator
        logger.info("Creating trainer with pre-tokenized collator")
        trainer = Trainer(
            model=model,
            args=training_args,
            train_dataset=dataset,
            data_collator=PreTokenizedCollator(
                pad_token_id=tokenizer.pad_token_id, 
                tokenizer=tokenizer
            ),
            # Add label_names to avoid warning
            compute_metrics=None,
            tokenizer=tokenizer  # Provide tokenizer for proper padding
        )
        
        # Start training
        logger.info("Starting training - RESEARCH PHASE ONLY")
        trainer.train()
        
        # Save the model
        logger.info(f"Saving model to {output_dir}")
        trainer.save_model(output_dir)
        
        # Save LoRA adapter separately
        lora_output_dir = os.path.join(output_dir, "lora_adapter")
        model.save_pretrained(lora_output_dir)
        logger.info(f"Saved LoRA adapter to {lora_output_dir}")
        
        # Save tokenizer
        tokenizer_output_dir = os.path.join(output_dir, "tokenizer")
        tokenizer.save_pretrained(tokenizer_output_dir)
        logger.info(f"Saved tokenizer to {tokenizer_output_dir}")
        
        # Save config for reference
        with open(os.path.join(output_dir, "training_config.json"), "w") as f:
            json.dump(config, f, indent=2)
        
        logger.info("Training complete - RESEARCH PHASE ONLY")
        
        # Upload to Hugging Face Hub if requested
        if upload_to_hub:
            hub_url = upload_to_huggingface(
                output_dir=output_dir,
                repo_name=hub_repo_name,
                private=private_repo
            )
            logger.info(f"Model uploaded to Hugging Face Hub: {hub_url}")
        
        return output_dir
    
    finally:
        # Always remove the training marker when done
        remove_training_marker()

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Fine-tune DeepSeek model (Research Only)")
    parser.add_argument("--config", type=str, default="transformers_config.json", 
                      help="Path to the configuration file")
    parser.add_argument("--dataset", type=str, default=DEFAULT_DATASET, 
                      help="Dataset name or path")
    parser.add_argument("--output_dir", type=str, default="fine_tuned_model", 
                      help="Output directory for the fine-tuned model")
    parser.add_argument("--upload_to_hub", action="store_true",
                      help="Upload the model to Hugging Face Hub after training")
    parser.add_argument("--hub_repo_name", type=str, default=None,
                      help="Repository name for the model on Hugging Face Hub")
    parser.add_argument("--private_repo", action="store_true",
                      help="Make the Hugging Face Hub repository private")
    parser.add_argument("--force_cpu", action="store_true",
                      help="Force CPU mode even if CUDA is available")
    
    args = parser.parse_args()
    
    # Force CPU mode if requested
    if args.force_cpu:
        os.environ["FORCE_GPU"] = "0"
        logger.info("Forcing CPU mode as requested")
    
    try:
        output_path = train(
            args.config, 
            args.dataset, 
            args.output_dir,
            upload_to_hub=args.upload_to_hub,
            hub_repo_name=args.hub_repo_name,
            private_repo=args.private_repo
        )
        print(f"Research training completed. Model saved to: {output_path}")
        
        if args.upload_to_hub:
            print("Model was also uploaded to Hugging Face Hub.")
    except Exception as e:
        logging.error(f"Training failed: {str(e)}")
        remove_training_marker()  # Clean up marker if training fails
        raise