ericflo commited on
Commit
86b5e8a
1 Parent(s): 5fa2176

Create sft.py

Browse files
Files changed (1) hide show
  1. sft.py +186 -0
sft.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ """
3
+ pip install -U transformers accelerate trl wandb wheel packaging peft bitsandbytes liger-kernel flash_attn
4
+
5
+ python sft.py \
6
+ --run_name="llama3.1-8b-continued2" \
7
+ --model_name_or_path="meta-llama/Meta-Llama-3.1-8B" \
8
+ --dataset_name="mlfoundations/dclm-baseline-1.0-parquet,mlabonne/FineTome-100k" \
9
+ --report_to="wandb" \
10
+ --optim="adamw_torch_fused" \
11
+ --lr_scheduler_type="cosine" \
12
+ --max_steps=10000000 \
13
+ --max_seq_length=64000 \
14
+ --learning_rate=0.0001 \
15
+ --attn_implementation="flash_attention_2" \
16
+ --save_strategy="steps" \
17
+ --save_steps 50 \
18
+ --save_total_limit=10 \
19
+ --per_device_train_batch_size=1 \
20
+ --gradient_accumulation_steps=8 \
21
+ --logging_steps=1 \
22
+ --num_train_epochs=1 \
23
+ --load_in_4bit \
24
+ --push_to_hub \
25
+ --hub_model_id="ericflo/Llama-3.1-8B-ContinuedTraining2-LoRA" \
26
+ --hub_strategy="all_checkpoints" \
27
+ --gradient_checkpointing \
28
+ --use_peft \
29
+ --lora_r=128 \
30
+ --lora_alpha=256 \
31
+ --lora_dropout=0.05 \
32
+ --use_liger=true \
33
+ --packing=true \
34
+ --torch_dtype="bfloat16" \
35
+ --output_dir="continuedtraining2_output"
36
+ """
37
+
38
+ import logging
39
+ import os
40
+ import random
41
+ from contextlib import nullcontext
42
+
43
+ from trl.commands.cli_utils import init_zero_verbose, SFTScriptArguments, TrlParser
44
+ from trl.env_utils import strtobool
45
+
46
+ TRL_USE_RICH = strtobool(os.getenv("TRL_USE_RICH", "0"))
47
+
48
+ if TRL_USE_RICH:
49
+ init_zero_verbose()
50
+ FORMAT = "%(message)s"
51
+
52
+ from rich.console import Console
53
+ from rich.logging import RichHandler
54
+
55
+ import torch
56
+ from datasets import load_dataset, interleave_datasets
57
+
58
+ from tqdm.rich import tqdm
59
+ from transformers import AutoTokenizer
60
+
61
+ from trl import (
62
+ ModelConfig,
63
+ RichProgressCallback,
64
+ SFTConfig,
65
+ SFTTrainer,
66
+ get_peft_config,
67
+ get_quantization_config,
68
+ get_kbit_device_map,
69
+ )
70
+
71
+ tqdm.pandas()
72
+
73
+ if TRL_USE_RICH:
74
+ logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()], level=logging.INFO)
75
+
76
+ print("Loading tokenizers...")
77
+ METAML_TOK = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
78
+ CHATML_TOK = AutoTokenizer.from_pretrained("NousResearch/Hermes-3-Llama-3.1-8B")
79
+ print("Tokenizers loaded.")
80
+
81
+ def formatting_prompts_func(example):
82
+ try:
83
+ language = example.get('language')
84
+ url = example.get('url')
85
+ text = example.get('text')
86
+ title = example.get('title')
87
+ conversations = example.get('conversations')
88
+ source = example.get('source')
89
+ repo_name = example.get('max_stars_repo_name')
90
+ repo_path = example.get('max_stars_repo_path')
91
+ star_count = example.get('max_stars_count')
92
+ content = example.get('content')
93
+ # mlfoundations/dclm-baseline-1.0-parquet
94
+ if language and url and text:
95
+ return f'{language} {url} {text}'
96
+ elif title and url and text: # wikimedia/wikipedia
97
+ return f'{title} {url} {text}'
98
+ elif conversations: # mlabonne/FineTome-100k
99
+ rows = [{
100
+ "role": {"system": "system", "gpt": "assistant", "human": "user"}[row["from"]],
101
+ "content": row["value"],
102
+ } for row in conversations]
103
+ tok = random.choice([METAML_TOK, CHATML_TOK])
104
+ return f'{source} {tok.apply_chat_template(rows, tokenize=False)}'
105
+ elif "max_stars_repo_name" in example: # bigcode/starcoderdata
106
+ return f'{example["max_stars_repo_name"]} {example["max_stars_repo_path"]} {example["max_stars_count"]} {example["content"]}'
107
+ print(f"Unknown example: {example}")
108
+ raise ValueError(f"Unknown example: {example}")
109
+ except Exception as e:
110
+ print(e)
111
+ raise e
112
+
113
+ if __name__ == "__main__":
114
+ parser = TrlParser((SFTScriptArguments, SFTConfig, ModelConfig))
115
+ args, training_args, model_config = parser.parse_args_and_config()
116
+
117
+ # Force use our print callback
118
+ if TRL_USE_RICH:
119
+ training_args.disable_tqdm = True
120
+ console = Console()
121
+
122
+ ################
123
+ # Model init kwargs & Tokenizer
124
+ ################
125
+ model_config.lora_target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
126
+ quantization_config = get_quantization_config(model_config)
127
+ model_kwargs = dict(
128
+ revision=model_config.model_revision,
129
+ trust_remote_code=model_config.trust_remote_code,
130
+ attn_implementation=model_config.attn_implementation,
131
+ torch_dtype=model_config.torch_dtype,
132
+ use_cache=False if training_args.gradient_checkpointing else True,
133
+ device_map=get_kbit_device_map() if quantization_config is not None else None,
134
+ quantization_config=quantization_config,
135
+ )
136
+ training_args.model_init_kwargs = model_kwargs
137
+ tokenizer = AutoTokenizer.from_pretrained(
138
+ model_config.model_name_or_path, trust_remote_code=model_config.trust_remote_code, use_fast=True
139
+ )
140
+ tokenizer.pad_token = tokenizer.eos_token
141
+
142
+ ################
143
+ # Dataset
144
+ ################
145
+ dataset_names = args.dataset_name.split(',')
146
+ train_datasets = [load_dataset(name, split="train", streaming=True) for name in dataset_names]
147
+ train_datasets.append(load_dataset("bigcode/starcoderdata", data_dir="python", split="train", streaming=True))
148
+ train_datasets.append(load_dataset("wikimedia/wikipedia", "20231101.en", split="train", streaming=True))
149
+ train_datasets.append(load_dataset("wikimedia/wikipedia", "20231101.es", split="train", streaming=True))
150
+ train_datasets.append(load_dataset("wikimedia/wikipedia", "20231101.fr", split="train", streaming=True))
151
+ interleaved_dataset = interleave_datasets(train_datasets)
152
+ eval_dataset = interleaved_dataset.take(100)
153
+ train_dataset = interleaved_dataset.skip(100)
154
+
155
+ print(train_dataset)
156
+ print(eval_dataset)
157
+
158
+ ################
159
+ # Optional rich context managers
160
+ ###############
161
+ init_context = nullcontext() if not TRL_USE_RICH else console.status("[bold green]Initializing the SFTTrainer...")
162
+ save_context = (
163
+ nullcontext()
164
+ if not TRL_USE_RICH
165
+ else console.status(f"[bold green]Training completed! Saving the model to {training_args.output_dir}")
166
+ )
167
+
168
+ ################
169
+ # Training
170
+ ################
171
+ with init_context:
172
+ trainer = SFTTrainer(
173
+ model=model_config.model_name_or_path,
174
+ args=training_args,
175
+ train_dataset=train_dataset,
176
+ eval_dataset=eval_dataset,
177
+ tokenizer=tokenizer,
178
+ peft_config=get_peft_config(model_config),
179
+ callbacks=[RichProgressCallback] if TRL_USE_RICH else None,
180
+ formatting_func=formatting_prompts_func,
181
+ )
182
+
183
+ trainer.train()
184
+
185
+ with save_context:
186
+ trainer.save_model(training_args.output_dir)