thangvip commited on
Commit
69ce9c4
1 Parent(s): 076a778

push model

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +61 -0
  2. added_tokens.json +5 -0
  3. all_results.json +8 -0
  4. checkpoint-7568/added_tokens.json +5 -0
  5. checkpoint-7568/config.json +28 -0
  6. checkpoint-7568/generation_config.json +6 -0
  7. checkpoint-7568/merges.txt +0 -0
  8. checkpoint-7568/model.safetensors +3 -0
  9. checkpoint-7568/optimizer.pt +3 -0
  10. checkpoint-7568/rng_state.pth +3 -0
  11. checkpoint-7568/scheduler.pt +3 -0
  12. checkpoint-7568/special_tokens_map.json +20 -0
  13. checkpoint-7568/tokenizer.json +0 -0
  14. checkpoint-7568/tokenizer_config.json +43 -0
  15. checkpoint-7568/trainer_state.json +126 -0
  16. checkpoint-7568/training_args.bin +3 -0
  17. checkpoint-7568/vocab.json +0 -0
  18. checkpoint-7584/added_tokens.json +5 -0
  19. checkpoint-7584/config.json +28 -0
  20. checkpoint-7584/generation_config.json +6 -0
  21. checkpoint-7584/merges.txt +0 -0
  22. checkpoint-7584/model.safetensors +3 -0
  23. checkpoint-7584/optimizer.pt +3 -0
  24. checkpoint-7584/rng_state.pth +3 -0
  25. checkpoint-7584/scheduler.pt +3 -0
  26. checkpoint-7584/special_tokens_map.json +20 -0
  27. checkpoint-7584/tokenizer.json +0 -0
  28. checkpoint-7584/tokenizer_config.json +43 -0
  29. checkpoint-7584/trainer_state.json +126 -0
  30. checkpoint-7584/training_args.bin +3 -0
  31. checkpoint-7584/vocab.json +0 -0
  32. checkpoint-7600/added_tokens.json +5 -0
  33. checkpoint-7600/config.json +28 -0
  34. checkpoint-7600/generation_config.json +6 -0
  35. checkpoint-7600/merges.txt +0 -0
  36. checkpoint-7600/model.safetensors +3 -0
  37. checkpoint-7600/optimizer.pt +3 -0
  38. checkpoint-7600/rng_state.pth +3 -0
  39. checkpoint-7600/scheduler.pt +3 -0
  40. checkpoint-7600/special_tokens_map.json +20 -0
  41. checkpoint-7600/tokenizer.json +0 -0
  42. checkpoint-7600/tokenizer_config.json +43 -0
  43. checkpoint-7600/trainer_state.json +126 -0
  44. checkpoint-7600/training_args.bin +3 -0
  45. checkpoint-7600/vocab.json +0 -0
  46. checkpoint-7616/added_tokens.json +5 -0
  47. checkpoint-7616/config.json +28 -0
  48. checkpoint-7616/generation_config.json +6 -0
  49. checkpoint-7616/merges.txt +0 -0
  50. checkpoint-7616/model.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: sail/Sailor-0.5B
4
+ tags:
5
+ - trl
6
+ - sft
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: vwen-0.5
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # vwen-0.5
17
+
18
+ This model is a fine-tuned version of [sail/Sailor-0.5B](https://huggingface.co/sail/Sailor-0.5B) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 1.8915
21
+
22
+ ## Model description
23
+
24
+ More information needed
25
+
26
+ ## Intended uses & limitations
27
+
28
+ More information needed
29
+
30
+ ## Training and evaluation data
31
+
32
+ More information needed
33
+
34
+ ## Training procedure
35
+
36
+ ### Training hyperparameters
37
+
38
+ The following hyperparameters were used during training:
39
+ - learning_rate: 0.0001
40
+ - train_batch_size: 1
41
+ - eval_batch_size: 1
42
+ - seed: 42
43
+ - gradient_accumulation_steps: 16
44
+ - total_train_batch_size: 16
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: linear
47
+ - num_epochs: 1
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss |
52
+ |:-------------:|:-----:|:----:|:---------------:|
53
+ | 1.8915 | 0.11 | 7644 | 1.8915 |
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.39.0
59
+ - Pytorch 2.2.1+cu121
60
+ - Datasets 2.18.0
61
+ - Tokenizers 0.15.2
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.11,
3
+ "train_loss": 1.9877741447859942,
4
+ "train_runtime": 107245.5846,
5
+ "train_samples": 1156437,
6
+ "train_samples_per_second": 10.783,
7
+ "train_steps_per_second": 0.674
8
+ }
checkpoint-7568/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-7568/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "sail/Sailor-0.5B",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151643,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 2816,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 21,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 24,
18
+ "num_key_value_heads": 16,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": 32768,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.39.0",
25
+ "use_cache": false,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151936
28
+ }
checkpoint-7568/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "max_new_tokens": 2048,
5
+ "transformers_version": "4.39.0"
6
+ }
checkpoint-7568/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-7568/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79cb7261ef90180a443055a42335fb04910cbfffc2a1809d29714b212885a60f
3
+ size 2478313760
checkpoint-7568/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebbfa38aec018143e0c1a8d7a237b9b60036764720fe274b572ee6104bb54485
3
+ size 4956808758
checkpoint-7568/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:755e14817720c41543c1a422a0b78e32e590221dc68b88ed0640eed88bd5f198
3
+ size 14180
checkpoint-7568/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a380d5509102fb4ef8afb72ca9d9ebb750c25f01d982a83babdee4dab74265b
3
+ size 1064
checkpoint-7568/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-7568/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-7568/tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|endoftext|>",
37
+ "errors": "replace",
38
+ "model_max_length": 2048,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
checkpoint-7568/trainer_state.json ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.1047078223889412,
5
+ "eval_steps": 500,
6
+ "global_step": 7568,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "grad_norm": 1.4067256450653076,
14
+ "learning_rate": 9.930821699849192e-05,
15
+ "loss": 2.2227,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.01,
20
+ "grad_norm": 2.9092910289764404,
21
+ "learning_rate": 9.861643399698382e-05,
22
+ "loss": 2.1156,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.02,
27
+ "grad_norm": 1.1342206001281738,
28
+ "learning_rate": 9.792465099547574e-05,
29
+ "loss": 2.0774,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.03,
34
+ "grad_norm": 3.928356409072876,
35
+ "learning_rate": 9.723286799396766e-05,
36
+ "loss": 2.0468,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.03,
41
+ "grad_norm": 0.9985896944999695,
42
+ "learning_rate": 9.654108499245957e-05,
43
+ "loss": 2.0288,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 0.04,
48
+ "grad_norm": 1.6732710599899292,
49
+ "learning_rate": 9.584930199095148e-05,
50
+ "loss": 1.9954,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 0.05,
55
+ "grad_norm": 0.9810203313827515,
56
+ "learning_rate": 9.515751898944339e-05,
57
+ "loss": 1.9757,
58
+ "step": 3500
59
+ },
60
+ {
61
+ "epoch": 0.06,
62
+ "grad_norm": 0.8682870864868164,
63
+ "learning_rate": 9.446573598793531e-05,
64
+ "loss": 1.9651,
65
+ "step": 4000
66
+ },
67
+ {
68
+ "epoch": 0.06,
69
+ "grad_norm": 0.9405160546302795,
70
+ "learning_rate": 9.377395298642722e-05,
71
+ "loss": 1.9498,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 0.07,
76
+ "grad_norm": 0.8670147061347961,
77
+ "learning_rate": 9.308216998491913e-05,
78
+ "loss": 1.935,
79
+ "step": 5000
80
+ },
81
+ {
82
+ "epoch": 0.08,
83
+ "grad_norm": 0.9158061146736145,
84
+ "learning_rate": 9.239038698341104e-05,
85
+ "loss": 1.9252,
86
+ "step": 5500
87
+ },
88
+ {
89
+ "epoch": 0.08,
90
+ "grad_norm": 0.8484827280044556,
91
+ "learning_rate": 9.169860398190296e-05,
92
+ "loss": 1.9001,
93
+ "step": 6000
94
+ },
95
+ {
96
+ "epoch": 0.09,
97
+ "grad_norm": 0.8079173564910889,
98
+ "learning_rate": 9.100682098039488e-05,
99
+ "loss": 1.9174,
100
+ "step": 6500
101
+ },
102
+ {
103
+ "epoch": 0.1,
104
+ "grad_norm": 0.8451229929924011,
105
+ "learning_rate": 9.031503797888679e-05,
106
+ "loss": 1.8939,
107
+ "step": 7000
108
+ },
109
+ {
110
+ "epoch": 0.1,
111
+ "grad_norm": 0.7751355171203613,
112
+ "learning_rate": 8.962325497737871e-05,
113
+ "loss": 1.8915,
114
+ "step": 7500
115
+ }
116
+ ],
117
+ "logging_steps": 500,
118
+ "max_steps": 72277,
119
+ "num_input_tokens_seen": 0,
120
+ "num_train_epochs": 1,
121
+ "save_steps": 16,
122
+ "total_flos": 6.903809319402209e+17,
123
+ "train_batch_size": 1,
124
+ "trial_name": null,
125
+ "trial_params": null
126
+ }
checkpoint-7568/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:619d9e843dd7d8cff288742698e1be890ea7a8be9d70f15a760162e676357fa4
3
+ size 4920
checkpoint-7568/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-7584/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-7584/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "sail/Sailor-0.5B",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151643,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 2816,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 21,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 24,
18
+ "num_key_value_heads": 16,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": 32768,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.39.0",
25
+ "use_cache": false,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151936
28
+ }
checkpoint-7584/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "max_new_tokens": 2048,
5
+ "transformers_version": "4.39.0"
6
+ }
checkpoint-7584/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-7584/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04de49ef60a0904c181f094cd6444f7392b179c603bb3925c6ce25c9c4774201
3
+ size 2478313760
checkpoint-7584/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89ccc150c02002f22f424fe60728d31d250a93657ef0c319b190794d4035bb92
3
+ size 4956808758
checkpoint-7584/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:755e14817720c41543c1a422a0b78e32e590221dc68b88ed0640eed88bd5f198
3
+ size 14180
checkpoint-7584/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:886fd42484078d6089ab68e7e094b4076189120859f226490410821eaefbfe39
3
+ size 1064
checkpoint-7584/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-7584/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-7584/tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|endoftext|>",
37
+ "errors": "replace",
38
+ "model_max_length": 2048,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
checkpoint-7584/trainer_state.json ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.10492919199230048,
5
+ "eval_steps": 500,
6
+ "global_step": 7584,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "grad_norm": 1.4067256450653076,
14
+ "learning_rate": 9.930821699849192e-05,
15
+ "loss": 2.2227,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.01,
20
+ "grad_norm": 2.9092910289764404,
21
+ "learning_rate": 9.861643399698382e-05,
22
+ "loss": 2.1156,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.02,
27
+ "grad_norm": 1.1342206001281738,
28
+ "learning_rate": 9.792465099547574e-05,
29
+ "loss": 2.0774,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.03,
34
+ "grad_norm": 3.928356409072876,
35
+ "learning_rate": 9.723286799396766e-05,
36
+ "loss": 2.0468,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.03,
41
+ "grad_norm": 0.9985896944999695,
42
+ "learning_rate": 9.654108499245957e-05,
43
+ "loss": 2.0288,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 0.04,
48
+ "grad_norm": 1.6732710599899292,
49
+ "learning_rate": 9.584930199095148e-05,
50
+ "loss": 1.9954,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 0.05,
55
+ "grad_norm": 0.9810203313827515,
56
+ "learning_rate": 9.515751898944339e-05,
57
+ "loss": 1.9757,
58
+ "step": 3500
59
+ },
60
+ {
61
+ "epoch": 0.06,
62
+ "grad_norm": 0.8682870864868164,
63
+ "learning_rate": 9.446573598793531e-05,
64
+ "loss": 1.9651,
65
+ "step": 4000
66
+ },
67
+ {
68
+ "epoch": 0.06,
69
+ "grad_norm": 0.9405160546302795,
70
+ "learning_rate": 9.377395298642722e-05,
71
+ "loss": 1.9498,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 0.07,
76
+ "grad_norm": 0.8670147061347961,
77
+ "learning_rate": 9.308216998491913e-05,
78
+ "loss": 1.935,
79
+ "step": 5000
80
+ },
81
+ {
82
+ "epoch": 0.08,
83
+ "grad_norm": 0.9158061146736145,
84
+ "learning_rate": 9.239038698341104e-05,
85
+ "loss": 1.9252,
86
+ "step": 5500
87
+ },
88
+ {
89
+ "epoch": 0.08,
90
+ "grad_norm": 0.8484827280044556,
91
+ "learning_rate": 9.169860398190296e-05,
92
+ "loss": 1.9001,
93
+ "step": 6000
94
+ },
95
+ {
96
+ "epoch": 0.09,
97
+ "grad_norm": 0.8079173564910889,
98
+ "learning_rate": 9.100682098039488e-05,
99
+ "loss": 1.9174,
100
+ "step": 6500
101
+ },
102
+ {
103
+ "epoch": 0.1,
104
+ "grad_norm": 0.8451229929924011,
105
+ "learning_rate": 9.031503797888679e-05,
106
+ "loss": 1.8939,
107
+ "step": 7000
108
+ },
109
+ {
110
+ "epoch": 0.1,
111
+ "grad_norm": 0.7751355171203613,
112
+ "learning_rate": 8.962325497737871e-05,
113
+ "loss": 1.8915,
114
+ "step": 7500
115
+ }
116
+ ],
117
+ "logging_steps": 500,
118
+ "max_steps": 72277,
119
+ "num_input_tokens_seen": 0,
120
+ "num_train_epochs": 1,
121
+ "save_steps": 16,
122
+ "total_flos": 6.918405110775153e+17,
123
+ "train_batch_size": 1,
124
+ "trial_name": null,
125
+ "trial_params": null
126
+ }
checkpoint-7584/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:619d9e843dd7d8cff288742698e1be890ea7a8be9d70f15a760162e676357fa4
3
+ size 4920
checkpoint-7584/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-7600/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-7600/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "sail/Sailor-0.5B",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151643,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 2816,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 21,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 24,
18
+ "num_key_value_heads": 16,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": 32768,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.39.0",
25
+ "use_cache": false,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151936
28
+ }
checkpoint-7600/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "max_new_tokens": 2048,
5
+ "transformers_version": "4.39.0"
6
+ }
checkpoint-7600/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-7600/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11146d20de778d3cd004f0fb0fd9a9b35b43eeda4d30329a8ec6859c867f380c
3
+ size 2478313760
checkpoint-7600/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c9bc20ea9a5f27c524b6c3aa458b2685618ce1dc14cc8eab8564ff0095e52fc
3
+ size 4956808758
checkpoint-7600/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a41ab52e23892da54d70091e14df8f1b6941c11a68eef43ba6478fda827b6f4
3
+ size 14244
checkpoint-7600/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4763ef45d4c6c771b85175f8a79b7d11b80434eb92b755c683851b559cf22273
3
+ size 1064
checkpoint-7600/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-7600/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-7600/tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|endoftext|>",
37
+ "errors": "replace",
38
+ "model_max_length": 2048,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
checkpoint-7600/trainer_state.json ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.10515056159565978,
5
+ "eval_steps": 500,
6
+ "global_step": 7600,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "grad_norm": 1.4067256450653076,
14
+ "learning_rate": 9.930821699849192e-05,
15
+ "loss": 2.2227,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.01,
20
+ "grad_norm": 2.9092910289764404,
21
+ "learning_rate": 9.861643399698382e-05,
22
+ "loss": 2.1156,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.02,
27
+ "grad_norm": 1.1342206001281738,
28
+ "learning_rate": 9.792465099547574e-05,
29
+ "loss": 2.0774,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.03,
34
+ "grad_norm": 3.928356409072876,
35
+ "learning_rate": 9.723286799396766e-05,
36
+ "loss": 2.0468,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.03,
41
+ "grad_norm": 0.9985896944999695,
42
+ "learning_rate": 9.654108499245957e-05,
43
+ "loss": 2.0288,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 0.04,
48
+ "grad_norm": 1.6732710599899292,
49
+ "learning_rate": 9.584930199095148e-05,
50
+ "loss": 1.9954,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 0.05,
55
+ "grad_norm": 0.9810203313827515,
56
+ "learning_rate": 9.515751898944339e-05,
57
+ "loss": 1.9757,
58
+ "step": 3500
59
+ },
60
+ {
61
+ "epoch": 0.06,
62
+ "grad_norm": 0.8682870864868164,
63
+ "learning_rate": 9.446573598793531e-05,
64
+ "loss": 1.9651,
65
+ "step": 4000
66
+ },
67
+ {
68
+ "epoch": 0.06,
69
+ "grad_norm": 0.9405160546302795,
70
+ "learning_rate": 9.377395298642722e-05,
71
+ "loss": 1.9498,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 0.07,
76
+ "grad_norm": 0.8670147061347961,
77
+ "learning_rate": 9.308216998491913e-05,
78
+ "loss": 1.935,
79
+ "step": 5000
80
+ },
81
+ {
82
+ "epoch": 0.08,
83
+ "grad_norm": 0.9158061146736145,
84
+ "learning_rate": 9.239038698341104e-05,
85
+ "loss": 1.9252,
86
+ "step": 5500
87
+ },
88
+ {
89
+ "epoch": 0.08,
90
+ "grad_norm": 0.8484827280044556,
91
+ "learning_rate": 9.169860398190296e-05,
92
+ "loss": 1.9001,
93
+ "step": 6000
94
+ },
95
+ {
96
+ "epoch": 0.09,
97
+ "grad_norm": 0.8079173564910889,
98
+ "learning_rate": 9.100682098039488e-05,
99
+ "loss": 1.9174,
100
+ "step": 6500
101
+ },
102
+ {
103
+ "epoch": 0.1,
104
+ "grad_norm": 0.8451229929924011,
105
+ "learning_rate": 9.031503797888679e-05,
106
+ "loss": 1.8939,
107
+ "step": 7000
108
+ },
109
+ {
110
+ "epoch": 0.1,
111
+ "grad_norm": 0.7751355171203613,
112
+ "learning_rate": 8.962325497737871e-05,
113
+ "loss": 1.8915,
114
+ "step": 7500
115
+ }
116
+ ],
117
+ "logging_steps": 500,
118
+ "max_steps": 72277,
119
+ "num_input_tokens_seen": 0,
120
+ "num_train_epochs": 1,
121
+ "save_steps": 16,
122
+ "total_flos": 6.933000902148096e+17,
123
+ "train_batch_size": 1,
124
+ "trial_name": null,
125
+ "trial_params": null
126
+ }
checkpoint-7600/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:619d9e843dd7d8cff288742698e1be890ea7a8be9d70f15a760162e676357fa4
3
+ size 4920
checkpoint-7600/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-7616/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-7616/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "sail/Sailor-0.5B",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151643,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 2816,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 21,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 24,
18
+ "num_key_value_heads": 16,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": 32768,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.39.0",
25
+ "use_cache": false,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151936
28
+ }
checkpoint-7616/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "eos_token_id": 151643,
4
+ "max_new_tokens": 2048,
5
+ "transformers_version": "4.39.0"
6
+ }
checkpoint-7616/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-7616/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:937917280f9f64041d30fe11a7882da25ed0ecdee59fb7b1843952ded485414b
3
+ size 2478313760