base_model: NousResearch/Meta-Llama-3-8B model_type: LlamaForCausalLM tokenizer_type: AutoTokenizer # PreTrainedTokenizerFast load_in_8bit: false load_in_4bit: true strict: false datasets: - path: alexandrainst/lexdk-open type: completion - path: syvai/wiki-da type: completion dataset_prepared_path: last_run_prepared val_set_size: 0.001 output_dir: ./out/qlora-llama3-70b adapter: qlora lora_r: 32 lora_alpha: 16 lora_dropout: 0.05 lora_target_linear: true lora_fan_in_fan_out: lora_target_modules: wandb_project: llama-3 wandb_entity: wandb_watch: wandb_name: wandb_log_model: train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: false sequence_len: 8192 sample_packing: true pad_to_sequence_len: true eval_sample_packing: false gradient_accumulation_steps: 1 micro_batch_size: 1 num_epochs: 2 optimizer: adamw_bnb_8bit lr_scheduler: cosine learning_rate: 0.0002 train_on_inputs: false group_by_length: false bf16: true fp16: false tf32: false gradient_checkpointing: true early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true warmup_steps: 10 evals_per_epoch: 10 eval_table_size: saves_per_epoch: 2 debug: deepspeed: weight_decay: 0.1 fsdp: fsdp_config: special_tokens: pad_token: <|end_of_text|>