panikos commited on
Commit
7f92701
·
verified ·
1 Parent(s): dc53b08

Upload test_training_llama_small_batch.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. test_training_llama_small_batch.py +84 -0
test_training_llama_small_batch.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = ["trl>=0.12.0", "peft>=0.7.0", "trackio", "transformers>=4.40.0", "datasets>=2.18.0", "accelerate>=0.28.0"]
3
+ # ///
4
+
5
+ from datasets import load_dataset
6
+ from peft import LoraConfig
7
+ from trl import SFTTrainer, SFTConfig
8
+ import trackio
9
+
10
+ print("=" * 80)
11
+ print("TEST RUN: Biomedical Llama Fine-Tuning (100 examples)")
12
+ print("=" * 80)
13
+
14
+ print("\n[1/4] Loading dataset...")
15
+ dataset = load_dataset("panikos/biomedical-llama-training")
16
+
17
+ # Use first 100 examples for test
18
+ train_dataset = dataset["train"].select(range(100))
19
+ eval_dataset = dataset["validation"].select(range(20))
20
+
21
+ print(f" Train: {len(train_dataset)} examples")
22
+ print(f" Eval: {len(eval_dataset)} examples")
23
+
24
+ print("\n[2/4] Configuring LoRA...")
25
+ lora_config = LoraConfig(
26
+ r=16,
27
+ lora_alpha=32,
28
+ target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
29
+ lora_dropout=0.05,
30
+ bias="none",
31
+ task_type="CAUSAL_LM"
32
+ )
33
+ print(" LoRA rank: 16, alpha: 32")
34
+
35
+ print("\n[3/4] Initializing trainer...")
36
+ trainer = SFTTrainer(
37
+ model="meta-llama/Llama-3.1-8B-Instruct",
38
+ train_dataset=train_dataset,
39
+ eval_dataset=eval_dataset,
40
+ peft_config=lora_config,
41
+ args=SFTConfig(
42
+ output_dir="llama-biomedical-test",
43
+ num_train_epochs=1,
44
+ per_device_train_batch_size=1, # REDUCED from 2 to 1
45
+ gradient_accumulation_steps=8, # INCREASED from 4 to 8
46
+ learning_rate=2e-4,
47
+ lr_scheduler_type="cosine",
48
+ warmup_ratio=0.1,
49
+ logging_steps=5,
50
+ eval_strategy="steps",
51
+ eval_steps=20,
52
+ save_strategy="epoch",
53
+ push_to_hub=True,
54
+ hub_model_id="panikos/llama-biomedical-test",
55
+ hub_private_repo=True,
56
+ bf16=True,
57
+ gradient_checkpointing=False, # DISABLED for stability
58
+ report_to="trackio",
59
+ project="biomedical-llama-training",
60
+ run_name="test-run-100-examples-v3"
61
+ )
62
+ )
63
+
64
+ print("\n[4/4] Starting training...")
65
+ print(" Model: meta-llama/Llama-3.1-8B-Instruct")
66
+ print(" Method: SFT with LoRA")
67
+ print(" Epochs: 1")
68
+ print(" Batch size: 1 x 8 = 8 (effective) - optimized for memory")
69
+ print(" Gradient checkpointing: DISABLED")
70
+ print()
71
+
72
+ trainer.train()
73
+
74
+ print("\n" + "=" * 80)
75
+ print("Pushing model to Hub...")
76
+ print("=" * 80)
77
+ trainer.push_to_hub()
78
+
79
+ print("\n" + "=" * 80)
80
+ print("TEST COMPLETE!")
81
+ print("=" * 80)
82
+ print("\nModel: https://huggingface.co/panikos/llama-biomedical-test")
83
+ print("Dashboard: https://panikos-trackio.hf.space/")
84
+ print("Ready for full production training!")