yongtaek-lim commited on
Commit
98f9a4c
1 Parent(s): 279e4dd

End of training

Browse files
README.md CHANGED
@@ -6,6 +6,8 @@ tags:
6
  - llama-factory
7
  - full
8
  - generated_from_trainer
 
 
9
  model-index:
10
  - name: pogny
11
  results: []
@@ -16,7 +18,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # pogny
18
 
19
- This model is a fine-tuned version of [MLP-KTLim/llama-3-Korean-Bllossom-8B](https://huggingface.co/MLP-KTLim/llama-3-Korean-Bllossom-8B) on an unknown dataset.
 
 
 
20
 
21
  ## Model description
22
 
 
6
  - llama-factory
7
  - full
8
  - generated_from_trainer
9
+ metrics:
10
+ - accuracy
11
  model-index:
12
  - name: pogny
13
  results: []
 
18
 
19
  # pogny
20
 
21
+ This model is a fine-tuned version of [MLP-KTLim/llama-3-Korean-Bllossom-8B](https://huggingface.co/MLP-KTLim/llama-3-Korean-Bllossom-8B) on the alpaca_en_demo dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 1.2285
24
+ - Accuracy: 0.6567
25
 
26
  ## Model description
27
 
all_results.json CHANGED
@@ -2,12 +2,12 @@
2
  "epoch": 0.8,
3
  "eval_accuracy": 0.6567137677288996,
4
  "eval_loss": 1.2285137176513672,
5
- "eval_runtime": 0.9711,
6
- "eval_samples_per_second": 7.208,
7
- "eval_steps_per_second": 3.089,
8
  "total_flos": 18070241280.0,
9
  "train_loss": 1.165506362915039,
10
- "train_runtime": 221.1267,
11
- "train_samples_per_second": 0.249,
12
- "train_steps_per_second": 0.005
13
  }
 
2
  "epoch": 0.8,
3
  "eval_accuracy": 0.6567137677288996,
4
  "eval_loss": 1.2285137176513672,
5
+ "eval_runtime": 0.9782,
6
+ "eval_samples_per_second": 7.156,
7
+ "eval_steps_per_second": 3.067,
8
  "total_flos": 18070241280.0,
9
  "train_loss": 1.165506362915039,
10
+ "train_runtime": 223.0942,
11
+ "train_samples_per_second": 0.247,
12
+ "train_steps_per_second": 0.004
13
  }
eval_results.json CHANGED
@@ -2,7 +2,7 @@
2
  "epoch": 0.8,
3
  "eval_accuracy": 0.6567137677288996,
4
  "eval_loss": 1.2285137176513672,
5
- "eval_runtime": 0.9711,
6
- "eval_samples_per_second": 7.208,
7
- "eval_steps_per_second": 3.089
8
  }
 
2
  "epoch": 0.8,
3
  "eval_accuracy": 0.6567137677288996,
4
  "eval_loss": 1.2285137176513672,
5
+ "eval_runtime": 0.9782,
6
+ "eval_samples_per_second": 7.156,
7
+ "eval_steps_per_second": 3.067
8
  }
runs/Aug28_04-25-09_main1/events.out.tfevents.1724819518.main1.48549.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5df400a27f228b157dd7be6374711cad06bacbe8562c109d351de673d77a888
3
+ size 357
train_results.json CHANGED
@@ -2,7 +2,7 @@
2
  "epoch": 0.8,
3
  "total_flos": 18070241280.0,
4
  "train_loss": 1.165506362915039,
5
- "train_runtime": 221.1267,
6
- "train_samples_per_second": 0.249,
7
- "train_steps_per_second": 0.005
8
  }
 
2
  "epoch": 0.8,
3
  "total_flos": 18070241280.0,
4
  "train_loss": 1.165506362915039,
5
+ "train_runtime": 223.0942,
6
+ "train_samples_per_second": 0.247,
7
+ "train_steps_per_second": 0.004
8
  }
trainer_state.json CHANGED
@@ -13,16 +13,16 @@
13
  "step": 1,
14
  "total_flos": 18070241280.0,
15
  "train_loss": 1.165506362915039,
16
- "train_runtime": 221.1267,
17
- "train_samples_per_second": 0.249,
18
- "train_steps_per_second": 0.005
19
  }
20
  ],
21
  "logging_steps": 10,
22
  "max_steps": 1,
23
  "num_input_tokens_seen": 0,
24
  "num_train_epochs": 1,
25
- "save_steps": 50000,
26
  "stateful_callbacks": {
27
  "TrainerControl": {
28
  "args": {
 
13
  "step": 1,
14
  "total_flos": 18070241280.0,
15
  "train_loss": 1.165506362915039,
16
+ "train_runtime": 223.0942,
17
+ "train_samples_per_second": 0.247,
18
+ "train_steps_per_second": 0.004
19
  }
20
  ],
21
  "logging_steps": 10,
22
  "max_steps": 1,
23
  "num_input_tokens_seen": 0,
24
  "num_train_epochs": 1,
25
+ "save_steps": 1,
26
  "stateful_callbacks": {
27
  "TrainerControl": {
28
  "args": {