Files changed (1) hide show
  1. README.md +117 -1
README.md CHANGED
@@ -2,6 +2,109 @@
2
  license: apache-2.0
3
  tags:
4
  - merge
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  ---
6
  merge from quantum-v0.01 and mistral-7b-dpo-v5
7
 
@@ -19,4 +122,17 @@ dtype: bfloat16
19
  ```
20
 
21
  # Acknowlegement
22
- [mergekit](https://github.com/cg123/mergekit)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  license: apache-2.0
3
  tags:
4
  - merge
5
+ model-index:
6
+ - name: GML-Mistral-merged-v1
7
+ results:
8
+ - task:
9
+ type: text-generation
10
+ name: Text Generation
11
+ dataset:
12
+ name: AI2 Reasoning Challenge (25-Shot)
13
+ type: ai2_arc
14
+ config: ARC-Challenge
15
+ split: test
16
+ args:
17
+ num_few_shot: 25
18
+ metrics:
19
+ - type: acc_norm
20
+ value: 71.25
21
+ name: normalized accuracy
22
+ source:
23
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=zyh3826/GML-Mistral-merged-v1
24
+ name: Open LLM Leaderboard
25
+ - task:
26
+ type: text-generation
27
+ name: Text Generation
28
+ dataset:
29
+ name: HellaSwag (10-Shot)
30
+ type: hellaswag
31
+ split: validation
32
+ args:
33
+ num_few_shot: 10
34
+ metrics:
35
+ - type: acc_norm
36
+ value: 87.88
37
+ name: normalized accuracy
38
+ source:
39
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=zyh3826/GML-Mistral-merged-v1
40
+ name: Open LLM Leaderboard
41
+ - task:
42
+ type: text-generation
43
+ name: Text Generation
44
+ dataset:
45
+ name: MMLU (5-Shot)
46
+ type: cais/mmlu
47
+ config: all
48
+ split: test
49
+ args:
50
+ num_few_shot: 5
51
+ metrics:
52
+ - type: acc
53
+ value: 65.42
54
+ name: accuracy
55
+ source:
56
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=zyh3826/GML-Mistral-merged-v1
57
+ name: Open LLM Leaderboard
58
+ - task:
59
+ type: text-generation
60
+ name: Text Generation
61
+ dataset:
62
+ name: TruthfulQA (0-shot)
63
+ type: truthful_qa
64
+ config: multiple_choice
65
+ split: validation
66
+ args:
67
+ num_few_shot: 0
68
+ metrics:
69
+ - type: mc2
70
+ value: 69.28
71
+ source:
72
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=zyh3826/GML-Mistral-merged-v1
73
+ name: Open LLM Leaderboard
74
+ - task:
75
+ type: text-generation
76
+ name: Text Generation
77
+ dataset:
78
+ name: Winogrande (5-shot)
79
+ type: winogrande
80
+ config: winogrande_xl
81
+ split: validation
82
+ args:
83
+ num_few_shot: 5
84
+ metrics:
85
+ - type: acc
86
+ value: 80.98
87
+ name: accuracy
88
+ source:
89
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=zyh3826/GML-Mistral-merged-v1
90
+ name: Open LLM Leaderboard
91
+ - task:
92
+ type: text-generation
93
+ name: Text Generation
94
+ dataset:
95
+ name: GSM8k (5-shot)
96
+ type: gsm8k
97
+ config: main
98
+ split: test
99
+ args:
100
+ num_few_shot: 5
101
+ metrics:
102
+ - type: acc
103
+ value: 64.97
104
+ name: accuracy
105
+ source:
106
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=zyh3826/GML-Mistral-merged-v1
107
+ name: Open LLM Leaderboard
108
  ---
109
  merge from quantum-v0.01 and mistral-7b-dpo-v5
110
 
 
122
  ```
123
 
124
  # Acknowlegement
125
+ [mergekit](https://github.com/cg123/mergekit)
126
+ # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
127
+ Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_zyh3826__GML-Mistral-merged-v1)
128
+
129
+ | Metric |Value|
130
+ |---------------------------------|----:|
131
+ |Avg. |73.30|
132
+ |AI2 Reasoning Challenge (25-Shot)|71.25|
133
+ |HellaSwag (10-Shot) |87.88|
134
+ |MMLU (5-Shot) |65.42|
135
+ |TruthfulQA (0-shot) |69.28|
136
+ |Winogrande (5-shot) |80.98|
137
+ |GSM8k (5-shot) |64.97|
138
+