|
|
|
|
|
|
|
""" |
|
Fine-tuning script for DeepSeek-R1-Distill-Qwen-14B-bnb-4bit using unsloth |
|
RESEARCH TRAINING PHASE ONLY - No output generation |
|
WORKS WITH PRE-TOKENIZED DATASET - No re-tokenization |
|
""" |
|
|
|
import os |
|
import json |
|
import logging |
|
import argparse |
|
import numpy as np |
|
from dotenv import load_dotenv |
|
import torch |
|
from datasets import load_dataset |
|
import transformers |
|
from transformers import AutoTokenizer, TrainingArguments, Trainer, AutoModelForCausalLM, AutoConfig |
|
from transformers.data.data_collator import DataCollatorMixin |
|
from peft import LoraConfig |
|
from unsloth import FastLanguageModel |
|
|
|
|
|
os.environ["TRANSFORMERS_NO_FLASH_ATTENTION"] = "1" |
|
|
|
|
|
try: |
|
import tensorboard |
|
TENSORBOARD_AVAILABLE = True |
|
except ImportError: |
|
TENSORBOARD_AVAILABLE = False |
|
print("Tensorboard not available. Will skip tensorboard logging.") |
|
|
|
|
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', |
|
handlers=[ |
|
logging.StreamHandler(), |
|
logging.FileHandler("training.log") |
|
] |
|
) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
DEFAULT_DATASET = "George-API/phi4-cognitive-dataset" |
|
|
|
def load_config(config_path): |
|
"""Load the transformers config from JSON file""" |
|
logger.info(f"Loading config from {config_path}") |
|
with open(config_path, 'r') as f: |
|
config = json.load(f) |
|
return config |
|
|
|
def load_and_prepare_dataset(dataset_name, config): |
|
""" |
|
Load and prepare the dataset for fine-tuning. |
|
Sort entries by prompt_number as required. |
|
NO TOKENIZATION - DATASET IS ALREADY TOKENIZED |
|
""" |
|
|
|
if dataset_name == "phi4-cognitive-dataset": |
|
dataset_name = DEFAULT_DATASET |
|
|
|
logger.info(f"Loading dataset: {dataset_name}") |
|
|
|
try: |
|
|
|
dataset = load_dataset(dataset_name) |
|
|
|
|
|
if 'train' in dataset: |
|
dataset = dataset['train'] |
|
|
|
|
|
dataset_config = config.get("dataset_config", {}) |
|
sort_field = dataset_config.get("sort_by_field", "prompt_number") |
|
sort_direction = dataset_config.get("sort_direction", "ascending") |
|
|
|
|
|
logger.info(f"Sorting dataset by {sort_field} in {sort_direction} order") |
|
if sort_direction == "ascending": |
|
dataset = dataset.sort(sort_field) |
|
else: |
|
dataset = dataset.sort(sort_field, reverse=True) |
|
|
|
|
|
if "shuffle_seed" in dataset_config: |
|
shuffle_seed = dataset_config.get("shuffle_seed") |
|
logger.info(f"Shuffling dataset with seed {shuffle_seed}") |
|
dataset = dataset.shuffle(seed=shuffle_seed) |
|
|
|
|
|
logger.info(f"Dataset loaded with {len(dataset)} entries") |
|
logger.info(f"Dataset columns: {dataset.column_names}") |
|
|
|
|
|
if len(dataset) > 0: |
|
sample = dataset[0] |
|
logger.info(f"Sample entry structure: {list(sample.keys())}") |
|
if 'conversations' in sample: |
|
logger.info(f"Sample conversations structure: {sample['conversations'][:1]}") |
|
|
|
return dataset |
|
|
|
except Exception as e: |
|
logger.error(f"Error loading dataset: {str(e)}") |
|
logger.info("Available datasets in the Hub:") |
|
|
|
print(f"Failed to load dataset: {dataset_name}") |
|
print(f"Make sure the dataset exists and is accessible.") |
|
print(f"If it's a private dataset, ensure your HF_TOKEN has access to it.") |
|
raise |
|
|
|
def tokenize_string(text, tokenizer): |
|
"""Tokenize a string using the provided tokenizer""" |
|
if not text: |
|
return [] |
|
|
|
|
|
tokens = tokenizer.encode(text, add_special_tokens=False) |
|
return tokens |
|
|
|
|
|
class PreTokenizedCollator(DataCollatorMixin): |
|
""" |
|
Data collator for pre-tokenized datasets. |
|
Expects input_ids and labels already tokenized. |
|
""" |
|
def __init__(self, pad_token_id=0, tokenizer=None): |
|
self.pad_token_id = pad_token_id |
|
self.tokenizer = tokenizer |
|
|
|
def __call__(self, features): |
|
|
|
if len(features) > 0: |
|
logger.info(f"Sample feature keys: {list(features[0].keys())}") |
|
|
|
|
|
processed_features = [] |
|
for feature in features: |
|
|
|
if 'input_ids' not in feature and 'conversations' in feature: |
|
|
|
conversations = feature['conversations'] |
|
|
|
|
|
logger.info(f"Conversations type: {type(conversations)}") |
|
if isinstance(conversations, list) and len(conversations) > 0: |
|
logger.info(f"First conversation type: {type(conversations[0])}") |
|
logger.info(f"First conversation: {conversations[0]}") |
|
|
|
|
|
if isinstance(conversations, list) and len(conversations) > 0: |
|
|
|
if isinstance(conversations[0], dict) and 'content' in conversations[0]: |
|
content = conversations[0]['content'] |
|
logger.info(f"Found content field: {type(content)}") |
|
|
|
|
|
if isinstance(content, str) and self.tokenizer: |
|
logger.info(f"Tokenizing string content: {content[:50]}...") |
|
feature['input_ids'] = self.tokenizer.encode(content, add_special_tokens=False) |
|
|
|
elif isinstance(content, list) and all(isinstance(x, int) for x in content): |
|
feature['input_ids'] = content |
|
|
|
else: |
|
logger.warning(f"Unexpected content format: {type(content)}") |
|
|
|
|
|
elif isinstance(conversations[0], dict) and 'input_ids' in conversations[0]: |
|
feature['input_ids'] = conversations[0]['input_ids'] |
|
|
|
|
|
elif all(isinstance(x, int) for x in conversations): |
|
feature['input_ids'] = conversations |
|
|
|
|
|
elif all(isinstance(x, str) for x in conversations) and self.tokenizer: |
|
|
|
full_text = " ".join(conversations) |
|
feature['input_ids'] = self.tokenizer.encode(full_text, add_special_tokens=False) |
|
|
|
|
|
if 'input_ids' in feature: |
|
|
|
if isinstance(feature['input_ids'], str) and self.tokenizer: |
|
logger.info(f"Converting string input_ids to tokens: {feature['input_ids'][:50]}...") |
|
feature['input_ids'] = self.tokenizer.encode(feature['input_ids'], add_special_tokens=False) |
|
|
|
elif not isinstance(feature['input_ids'], list): |
|
try: |
|
feature['input_ids'] = list(feature['input_ids']) |
|
except: |
|
logger.error(f"Could not convert input_ids to list: {type(feature['input_ids'])}") |
|
|
|
processed_features.append(feature) |
|
|
|
|
|
if len(processed_features) > 0 and 'input_ids' not in processed_features[0]: |
|
logger.error(f"Could not find input_ids in features. Available keys: {list(processed_features[0].keys())}") |
|
if 'conversations' in processed_features[0]: |
|
logger.error(f"Conversations structure: {processed_features[0]['conversations'][:1]}") |
|
raise ValueError("Could not find input_ids in dataset. Please check dataset structure.") |
|
|
|
|
|
batch_max_len = max(len(x["input_ids"]) for x in processed_features) |
|
|
|
|
|
batch = { |
|
"input_ids": torch.ones((len(processed_features), batch_max_len), dtype=torch.long) * self.pad_token_id, |
|
"attention_mask": torch.zeros((len(processed_features), batch_max_len), dtype=torch.long), |
|
"labels": torch.ones((len(processed_features), batch_max_len), dtype=torch.long) * -100 |
|
} |
|
|
|
|
|
for i, feature in enumerate(processed_features): |
|
input_ids = feature["input_ids"] |
|
seq_len = len(input_ids) |
|
|
|
|
|
if isinstance(input_ids, list): |
|
input_ids = torch.tensor(input_ids, dtype=torch.long) |
|
|
|
|
|
batch["input_ids"][i, :seq_len] = input_ids |
|
batch["attention_mask"][i, :seq_len] = 1 |
|
|
|
|
|
if "labels" in feature: |
|
labels = feature["labels"] |
|
if isinstance(labels, list): |
|
labels = torch.tensor(labels, dtype=torch.long) |
|
batch["labels"][i, :len(labels)] = labels |
|
else: |
|
batch["labels"][i, :seq_len] = input_ids |
|
|
|
return batch |
|
|
|
def create_training_marker(output_dir): |
|
"""Create a marker file to indicate training is active""" |
|
|
|
with open("TRAINING_ACTIVE", "w") as f: |
|
f.write(f"Training active in {output_dir}") |
|
|
|
|
|
os.makedirs(output_dir, exist_ok=True) |
|
with open(os.path.join(output_dir, "RESEARCH_TRAINING_ONLY"), "w") as f: |
|
f.write("This model is for research training only. No interactive outputs.") |
|
|
|
def remove_training_marker(): |
|
"""Remove the training marker file""" |
|
if os.path.exists("TRAINING_ACTIVE"): |
|
os.remove("TRAINING_ACTIVE") |
|
logger.info("Removed training active marker") |
|
|
|
def load_model_safely(model_name, max_seq_length, dtype=None): |
|
""" |
|
Load the model in a safe way that works with Qwen models |
|
by trying different loading strategies. |
|
""" |
|
try: |
|
logger.info(f"Attempting to load model with unsloth optimizations: {model_name}") |
|
|
|
try: |
|
|
|
logger.info("Loading model with flash attention DISABLED") |
|
model, tokenizer = FastLanguageModel.from_pretrained( |
|
model_name=model_name, |
|
max_seq_length=max_seq_length, |
|
dtype=dtype, |
|
load_in_4bit=True, |
|
use_flash_attention=False |
|
) |
|
logger.info("Model loaded successfully with unsloth with 4-bit quantization and flash attention disabled") |
|
return model, tokenizer |
|
|
|
except TypeError as e: |
|
|
|
if "unexpected keyword argument" in str(e): |
|
logger.warning(f"Unsloth loading error with 4-bit: {e}") |
|
logger.info("Trying alternative loading method for Qwen model...") |
|
|
|
|
|
model, tokenizer = FastLanguageModel.from_pretrained( |
|
model_name=model_name, |
|
max_seq_length=max_seq_length, |
|
dtype=dtype, |
|
use_flash_attention=False, |
|
) |
|
logger.info("Model loaded successfully with unsloth using alternative method") |
|
return model, tokenizer |
|
else: |
|
|
|
raise |
|
|
|
except Exception as e: |
|
|
|
logger.warning(f"Unsloth loading failed: {e}") |
|
logger.info("Falling back to standard Hugging Face loading...") |
|
|
|
|
|
config = AutoConfig.from_pretrained(model_name, trust_remote_code=True) |
|
if hasattr(config, "use_flash_attention"): |
|
config.use_flash_attention = False |
|
logger.info("Disabled flash attention in model config") |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_name, |
|
config=config, |
|
device_map="auto", |
|
torch_dtype=dtype or torch.float16, |
|
load_in_4bit=True |
|
) |
|
logger.info("Model loaded successfully with standard HF loading and flash attention disabled") |
|
return model, tokenizer |
|
|
|
def train(config_path, dataset_name, output_dir): |
|
"""Main training function - RESEARCH TRAINING PHASE ONLY""" |
|
|
|
load_dotenv() |
|
config = load_config(config_path) |
|
|
|
|
|
model_config = config.get("model_config", {}) |
|
training_config = config.get("training_config", {}) |
|
hardware_config = config.get("hardware_config", {}) |
|
lora_config = config.get("lora_config", {}) |
|
dataset_config = config.get("dataset_config", {}) |
|
|
|
|
|
hardware_config["use_flash_attention"] = False |
|
logger.info("Flash attention has been DISABLED due to GPU compatibility issues") |
|
|
|
|
|
training_phase_only = dataset_config.get("training_phase_only", True) |
|
if not training_phase_only: |
|
logger.warning("This script is meant for research training phase only") |
|
logger.warning("Setting training_phase_only=True") |
|
|
|
|
|
logger.info("IMPORTANT: Using pre-tokenized dataset - No tokenization will be performed") |
|
|
|
|
|
output_dir = output_dir or training_config.get("output_dir", "fine_tuned_model") |
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
|
|
create_training_marker(output_dir) |
|
|
|
try: |
|
|
|
logger.info("RESEARCH TRAINING PHASE ACTIVE - No output generation") |
|
logger.info("Configuration Summary:") |
|
model_name = model_config.get("model_name_or_path") |
|
logger.info(f"Model: {model_name}") |
|
logger.info(f"Dataset: {dataset_name if dataset_name != 'phi4-cognitive-dataset' else DEFAULT_DATASET}") |
|
logger.info(f"Output directory: {output_dir}") |
|
logger.info("IMPORTANT: Using already 4-bit quantized model - not re-quantizing") |
|
|
|
|
|
dataset = load_and_prepare_dataset(dataset_name, config) |
|
|
|
|
|
logger.info("Loading tokenizer (for model initialization only, not for tokenizing data)") |
|
tokenizer = AutoTokenizer.from_pretrained( |
|
model_name, |
|
trust_remote_code=True |
|
) |
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
logger.info("Initializing model with unsloth (preserving 4-bit quantization)") |
|
max_seq_length = training_config.get("max_seq_length", 2048) |
|
|
|
|
|
logger.info("Creating LoRA configuration") |
|
lora_config_obj = LoraConfig( |
|
r=lora_config.get("r", 16), |
|
lora_alpha=lora_config.get("lora_alpha", 32), |
|
lora_dropout=lora_config.get("lora_dropout", 0.05), |
|
bias=lora_config.get("bias", "none"), |
|
target_modules=lora_config.get("target_modules", ["q_proj", "k_proj", "v_proj", "o_proj"]) |
|
) |
|
|
|
|
|
logger.info("Loading pre-quantized model safely") |
|
dtype = torch.float16 if hardware_config.get("fp16", True) else None |
|
model, tokenizer = load_model_safely(model_name, max_seq_length, dtype) |
|
|
|
|
|
logger.info("Applying LoRA to model") |
|
|
|
|
|
logger.info("Using standard PEFT method to apply LoRA") |
|
from peft import get_peft_model |
|
model = get_peft_model(model, lora_config_obj) |
|
logger.info("Successfully applied LoRA with standard PEFT") |
|
|
|
|
|
logger.info("Using pre-tokenized dataset - skipping tokenization step") |
|
training_dataset = dataset |
|
|
|
|
|
reports = [] |
|
if TENSORBOARD_AVAILABLE: |
|
reports.append("tensorboard") |
|
logger.info("Tensorboard available and enabled for reporting") |
|
else: |
|
logger.warning("Tensorboard not available - metrics won't be logged to tensorboard") |
|
|
|
if os.getenv("WANDB_API_KEY"): |
|
reports.append("wandb") |
|
logger.info("Wandb API key found, enabling wandb reporting") |
|
|
|
|
|
if not reports: |
|
reports = ["none"] |
|
logger.warning("No reporting backends available - training metrics won't be logged") |
|
|
|
|
|
training_args = TrainingArguments( |
|
output_dir=output_dir, |
|
num_train_epochs=training_config.get("num_train_epochs", 3), |
|
per_device_train_batch_size=training_config.get("per_device_train_batch_size", 2), |
|
gradient_accumulation_steps=training_config.get("gradient_accumulation_steps", 4), |
|
learning_rate=training_config.get("learning_rate", 2e-5), |
|
lr_scheduler_type=training_config.get("lr_scheduler_type", "cosine"), |
|
warmup_ratio=training_config.get("warmup_ratio", 0.03), |
|
weight_decay=training_config.get("weight_decay", 0.01), |
|
optim=training_config.get("optim", "adamw_torch"), |
|
logging_steps=training_config.get("logging_steps", 10), |
|
save_steps=training_config.get("save_steps", 200), |
|
save_total_limit=training_config.get("save_total_limit", 3), |
|
fp16=hardware_config.get("fp16", True), |
|
bf16=hardware_config.get("bf16", False), |
|
max_grad_norm=training_config.get("max_grad_norm", 0.3), |
|
report_to=reports, |
|
logging_first_step=training_config.get("logging_first_step", True), |
|
disable_tqdm=training_config.get("disable_tqdm", False), |
|
|
|
remove_unused_columns=False |
|
) |
|
|
|
|
|
trainer = Trainer( |
|
model=model, |
|
args=training_args, |
|
train_dataset=training_dataset, |
|
data_collator=PreTokenizedCollator(pad_token_id=tokenizer.pad_token_id, tokenizer=tokenizer), |
|
) |
|
|
|
|
|
logger.info("Starting training - RESEARCH PHASE ONLY") |
|
trainer.train() |
|
|
|
|
|
logger.info(f"Saving model to {output_dir}") |
|
trainer.save_model(output_dir) |
|
|
|
|
|
lora_output_dir = os.path.join(output_dir, "lora_adapter") |
|
model.save_pretrained(lora_output_dir) |
|
logger.info(f"Saved LoRA adapter to {lora_output_dir}") |
|
|
|
|
|
tokenizer_output_dir = os.path.join(output_dir, "tokenizer") |
|
tokenizer.save_pretrained(tokenizer_output_dir) |
|
logger.info(f"Saved tokenizer to {tokenizer_output_dir}") |
|
|
|
|
|
with open(os.path.join(output_dir, "training_config.json"), "w") as f: |
|
json.dump(config, f, indent=2) |
|
|
|
logger.info("Training complete - RESEARCH PHASE ONLY") |
|
return output_dir |
|
|
|
finally: |
|
|
|
remove_training_marker() |
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser(description="Fine-tune Unsloth/DeepSeek-R1-Distill-Qwen-14B-4bit model (RESEARCH ONLY)") |
|
parser.add_argument("--config", type=str, default="transformers_config.json", |
|
help="Path to the transformers config JSON file") |
|
parser.add_argument("--dataset", type=str, default="phi4-cognitive-dataset", |
|
help="Dataset name or path") |
|
parser.add_argument("--output_dir", type=str, default=None, |
|
help="Output directory for the fine-tuned model") |
|
|
|
args = parser.parse_args() |
|
|
|
|
|
try: |
|
output_path = train(args.config, args.dataset, args.output_dir) |
|
print(f"Research training completed. Model saved to: {output_path}") |
|
except Exception as e: |
|
logger.error(f"Training failed: {str(e)}") |
|
remove_training_marker() |
|
raise |