danielhanchen commited on
Commit
4efb21a
1 Parent(s): 7921828

Upload config

Browse files
Files changed (1) hide show
  1. config.json +3 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "meta-llama/Meta-Llama-3.1-405B",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -17,6 +17,7 @@
17
  "num_attention_heads": 128,
18
  "num_hidden_layers": 126,
19
  "num_key_value_heads": 8,
 
20
  "pretraining_tp": 1,
21
  "quantization_config": {
22
  "_load_in_4bit": true,
@@ -45,6 +46,7 @@
45
  "tie_word_embeddings": false,
46
  "torch_dtype": "bfloat16",
47
  "transformers_version": "4.44.2",
 
48
  "use_cache": true,
49
  "vocab_size": 128256
50
  }
 
1
  {
2
+ "_name_or_path": "unsloth/Meta-Llama-3.1-405B",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
17
  "num_attention_heads": 128,
18
  "num_hidden_layers": 126,
19
  "num_key_value_heads": 8,
20
+ "pad_token_id": 128004,
21
  "pretraining_tp": 1,
22
  "quantization_config": {
23
  "_load_in_4bit": true,
 
46
  "tie_word_embeddings": false,
47
  "torch_dtype": "bfloat16",
48
  "transformers_version": "4.44.2",
49
+ "unsloth_version": "2024.9",
50
  "use_cache": true,
51
  "vocab_size": 128256
52
  }