Spaces:
mteb
/

File size: 25,014 Bytes
f11b057
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29be9e3
f11b057
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29be9e3
 
 
 
 
 
 
 
 
 
 
 
 
 
f11b057
 
 
 
 
 
 
 
 
 
 
 
 
c8e4be5
 
f11b057
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29be9e3
f11b057
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8e4be5
6035aa5
 
 
 
c8e4be5
f11b057
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3219cef
 
 
 
f11b057
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a33a55
 
f11b057
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3219cef
5a33a55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bedf86a
 
5a33a55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bedf86a
 
 
 
 
5a33a55
 
 
 
 
 
 
 
 
 
f11b057
 
 
 
5a33a55
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
from functools import reduce
import json
import os
import pickle
import re

from datasets import load_dataset
from huggingface_hub import hf_hub_download
from huggingface_hub.repocard import metadata_load
import pandas as pd
from tqdm.autonotebook import tqdm

from utils.model_size import get_model_parameters_memory
from envs import LEADERBOARD_CONFIG, MODEL_META, REPO_ID, RESULTS_REPO, API


MODEL_CACHE = {}
TASKS_CONFIG = LEADERBOARD_CONFIG["tasks"]
BOARDS_CONFIG = LEADERBOARD_CONFIG["boards"]

TASKS = list(TASKS_CONFIG.keys())
PRETTY_NAMES = {
    "InstructionRetrieval": "Retrieval w/Instructions",
    "PairClassification": "Pair Classification",
    "BitextMining": "Bitext Mining",
}

TASK_TO_METRIC = {k: [v["metric"]] for k, v in TASKS_CONFIG.items()}
# Add legacy metric names
TASK_TO_METRIC["STS"].append("cos_sim_spearman")
TASK_TO_METRIC["STS"].append("cosine_spearman")
TASK_TO_METRIC["Summarization"].append("cos_sim_spearman")
TASK_TO_METRIC["Summarization"].append("cosine_spearman")
TASK_TO_METRIC["PairClassification"].append("cos_sim_ap")
TASK_TO_METRIC["PairClassification"].append("cosine_ap")


EXTERNAL_MODELS = {k for k,v in MODEL_META["model_meta"].items() if v.get("is_external", False)}
EXTERNAL_MODEL_TO_LINK = {k: v["link"] for k,v in MODEL_META["model_meta"].items() if v.get("link", False)}
EXTERNAL_MODEL_TO_DIM = {k: v["dim"] for k,v in MODEL_META["model_meta"].items() if v.get("dim", False)}
EXTERNAL_MODEL_TO_SEQLEN = {k: v["seq_len"] for k,v in MODEL_META["model_meta"].items() if v.get("seq_len", False)}
EXTERNAL_MODEL_TO_SIZE = {k: v["size"] for k,v in MODEL_META["model_meta"].items() if v.get("size", False)}
PROPRIETARY_MODELS = {k for k,v in MODEL_META["model_meta"].items() if v.get("is_proprietary", False)}
TASK_DESCRIPTIONS = {k: v["task_description"] for k,v in TASKS_CONFIG.items()}
TASK_DESCRIPTIONS["Overall"] = "Overall performance across MTEB tasks."
SENTENCE_TRANSFORMERS_COMPATIBLE_MODELS = {k for k,v in MODEL_META["model_meta"].items() if v.get("is_sentence_transformers_compatible", False)}
MODELS_TO_SKIP = MODEL_META["models_to_skip"]
CROSS_ENCODERS = MODEL_META["cross_encoders"]
BI_ENCODERS = [k for k, _ in MODEL_META["model_meta"].items() if k not in CROSS_ENCODERS + ["bm25"]]



TASK_TO_TASK_TYPE = {task_category: [] for task_category in TASKS}
for board_config in BOARDS_CONFIG.values():
    for task_category, task_list in board_config["tasks"].items():
        TASK_TO_TASK_TYPE[task_category].extend(task_list)


## Don't cache this because we want to re-compute every time
# model_infos_path = "model_infos.json"
MODEL_INFOS = {}
# if os.path.exists(model_infos_path):
#     with open(model_infos_path) as f:
#         MODEL_INFOS = json.load(f)

def add_rank(df):
    cols_to_rank = [col for col in df.columns if col not in ["Model", "Model Size (Million Parameters)", "Memory Usage (GB, fp32)", "Embedding Dimensions", "Max Tokens"]]
    if len(cols_to_rank) == 1:
        df.sort_values(cols_to_rank[0], ascending=False, inplace=True)
    else:
        df.insert(len(df.columns) - len(cols_to_rank), "Average", df[cols_to_rank].mean(axis=1, skipna=False))
        df.sort_values("Average", ascending=False, inplace=True)
    df.insert(0, "Rank", list(range(1, len(df) + 1)))
    df = df.round(2)
    # Fill NaN after averaging
    df.fillna("", inplace=True)
    return df


def make_clickable_model(model_name, link=None):
    if link is None:
        link = "https://huggingface.co/" + model_name
    # Remove user from model name
    return (
        f'<a target="_blank" style="text-decoration: underline" href="{link}">{model_name.split("/")[-1]}</a>'
    )


def add_lang(examples):
    if not(examples["eval_language"]):
        examples["mteb_dataset_name_with_lang"] = examples["mteb_dataset_name"]
    else:
        examples["mteb_dataset_name_with_lang"] = examples["mteb_dataset_name"] + f' ({examples["eval_language"]})'
    return examples

def norm(names): return set([name.split(" ")[0] for name in names])

def add_task(examples):
    # Could be added to the dataset loading script instead
    task_name = examples["mteb_dataset_name"]
    task_type = None
    for task_category, task_list in TASK_TO_TASK_TYPE.items():
        if task_name in norm(task_list):
            task_type = task_category
            break
    if task_type is not None:
        examples["mteb_task"] = task_type
    else:
        print("WARNING: Task not found for dataset", examples["mteb_dataset_name"])
        examples["mteb_task"] = "Unknown"
    return examples

def filter_metric_external(x, task, metrics):
    # This is a hack for the passkey and needle retrieval test, which reports ndcg_at_1 (i.e. accuracy), rather than the ndcg_at_10 that is commonly used for retrieval tasks. 
    if x['mteb_dataset_name'] in ['LEMBNeedleRetrieval', 'LEMBPasskeyRetrieval']:
        return x["mteb_task"] == task and x['metric'] == 'ndcg_at_1'
    else:
        return x["mteb_task"] == task and x["metric"] in metrics

def filter_metric_fetched(name, metric, expected_metrics):
    # This is a hack for the passkey and needle retrieval test, which reports ndcg_at_1 (i.e. accuracy), rather than the ndcg_at_10 that is commonly used for retrieval tasks. 
    return metric == 'ndcg_at_1' if name in ['LEMBNeedleRetrieval', 'LEMBPasskeyRetrieval'] else metric in expected_metrics


def get_dim_seq_size(model):
    filenames = [sib.rfilename for sib in model.siblings]
    dim, seq = "", ""
    for filename in filenames:
        if re.match("\d+_Pooling/config.json", filename):
            st_config_path = hf_hub_download(model.modelId, filename=filename)
            dim = json.load(open(st_config_path)).get("word_embedding_dimension", "")
            break
    for filename in filenames:
        if re.match("\d+_Dense/config.json", filename):
            st_config_path = hf_hub_download(model.modelId, filename=filename)
            dim = json.load(open(st_config_path)).get("out_features", dim)
    if "config.json" in filenames:
        config_path = hf_hub_download(model.modelId, filename="config.json")
        config = json.load(open(config_path))
        if not dim:
            dim = config.get("hidden_dim", config.get("hidden_size", config.get("d_model", "")))
        seq = config.get("n_positions", config.get("max_position_embeddings", config.get("n_ctx", config.get("seq_length", ""))))
    
    if dim == "" or seq == "":
        raise Exception(f"Could not find dim or seq for model {model.modelId}")
    
    # Get model file size without downloading. Parameters in million parameters and memory in GB
    parameters, memory = get_model_parameters_memory(model)
    return dim, seq, parameters, memory


def get_external_model_results():
    if os.path.exists("EXTERNAL_MODEL_RESULTS.json"):
        with open("EXTERNAL_MODEL_RESULTS.json") as f:
            EXTERNAL_MODEL_RESULTS = json.load(f)
        # Update with models not contained
        models_to_run = []
        for model in EXTERNAL_MODELS:
            if model not in EXTERNAL_MODEL_RESULTS:
                models_to_run.append(model)
                EXTERNAL_MODEL_RESULTS[model] = {k: {v[0]: []} for k, v in TASK_TO_METRIC.items()}

    ## only if we want to re-calculate all instead of using the cache... it's likely they haven't changed
    ## but if your model results have changed, delete it from the "EXTERNAL_MODEL_RESULTS.json" file
    else:
        EXTERNAL_MODEL_RESULTS = {model: {k: {v[0]: []} for k, v in TASK_TO_METRIC.items()} for model in EXTERNAL_MODELS}
        models_to_run = EXTERNAL_MODELS

    pbar = tqdm(models_to_run, desc="Fetching external model results")
    for model in pbar:
        pbar.set_description(f"Fetching external model results for {model!r}")
        ds = load_dataset(RESULTS_REPO, model, trust_remote_code=True, download_mode='force_redownload', verification_mode="no_checks")
        ds = ds.map(add_lang)
        ds = ds.map(add_task)
        base_dict = {"Model": make_clickable_model(model, link=EXTERNAL_MODEL_TO_LINK.get(model, f"https://huggingface.co/spaces/{REPO_ID}"))}

        for task, metrics in TASK_TO_METRIC.items():
            ds_dict = ds.filter(lambda x: filter_metric_external(x, task, metrics))["test"].to_dict()
            ds_dict = {k: round(v, 2) for k, v in zip(ds_dict["mteb_dataset_name_with_lang"], ds_dict["score"])}
            # metrics[0] is the main name for this metric; other names in the list are legacy for backward-compat
            EXTERNAL_MODEL_RESULTS[model][task][metrics[0]].append({**base_dict, **ds_dict})

    # Save & cache EXTERNAL_MODEL_RESULTS
    with open("EXTERNAL_MODEL_RESULTS.json", "w") as f:
        json.dump(EXTERNAL_MODEL_RESULTS, f, indent=4)

    return EXTERNAL_MODEL_RESULTS


def download_or_use_cache(modelId):
    global MODEL_CACHE
    if modelId in MODEL_CACHE:
        return MODEL_CACHE[modelId]
    try:
        readme_path = hf_hub_download(modelId, filename="README.md", etag_timeout=30)
    except Exception:
        print(f"ERROR: Could not fetch metadata for {modelId}, trying again")
        readme_path = hf_hub_download(modelId, filename="README.md", etag_timeout=30)
    meta = metadata_load(readme_path)
    MODEL_CACHE[modelId] = meta
    return meta


def get_mteb_data(tasks=["Clustering"], langs=[], datasets=[], fillna=True, add_emb_dim=True, task_to_metric=TASK_TO_METRIC, rank=True):
    global MODEL_INFOS

    with open("EXTERNAL_MODEL_RESULTS.json", "r") as f:
        external_model_results = json.load(f)

    api = API
    models = list(api.list_models(filter="mteb"))
    # Legacy names changes; Also fetch the old results & merge later
    if ('MLSUMClusteringP2P (fr)' in datasets):
        datasets.append('MLSUMClusteringP2P')
    if ('MLSUMClusteringS2S (fr)' in datasets):
        datasets.append('MLSUMClusteringS2S')
    if ('PawsXPairClassification (fr)' in datasets):
        datasets.append('PawsX (fr)')
    # Initialize list to models that we cannot fetch metadata from
    df_list = []
    for model in external_model_results:
        results_list = []
        for task in tasks:
            # Not all models have InstructionRetrieval, other new tasks
            if task not in external_model_results[model]: continue
            results_list += external_model_results[model][task][task_to_metric[task][0]]
        
        if len(datasets) > 0:
            res = {k: v for d in results_list for k, v in d.items() if (k == "Model") or any([x in k for x in datasets])}
        elif langs:
            # Would be cleaner to rely on an extra language column instead
            langs_format = [f"({lang})" for lang in langs]
            res = {k: v for d in results_list for k, v in d.items() if any([k.split(" ")[-1] in (k, x) for x in langs_format])}
        else:
            res = {k: v for d in results_list for k, v in d.items()}
        # Model & at least one result
        if len(res) > 1:
            if add_emb_dim:
                res["Model Size (Million Parameters)"] = EXTERNAL_MODEL_TO_SIZE.get(model, "")
                res["Memory Usage (GB, fp32)"] = round(res["Model Size (Million Parameters)"] * 1e6 * 4 / 1024**3, 2) if res["Model Size (Million Parameters)"] != "" else ""
                res["Embedding Dimensions"] = EXTERNAL_MODEL_TO_DIM.get(model, "")
                res["Max Tokens"] = EXTERNAL_MODEL_TO_SEQLEN.get(model, "")
            df_list.append(res)

    pbar = tqdm(models, desc="Fetching model metadata")
    for model in pbar:
        if model.modelId in MODELS_TO_SKIP: continue
        pbar.set_description(f"Fetching {model.modelId!r} metadata")
        meta = download_or_use_cache(model.modelId)
        MODEL_INFOS[model.modelId] = {
            "metadata": meta
        }
        if "model-index" not in meta:
            continue
        # meta['model-index'][0]["results"] is list of elements like:
        # {
        #    "task": {"type": "Classification"},
        #    "dataset": {
        #        "type": "mteb/amazon_massive_intent",
        #        "name": "MTEB MassiveIntentClassification (nb)",
        #        "config": "nb",
        #        "split": "test",
        #    },
        #    "metrics": [
        #        {"type": "accuracy", "value": 39.81506388702084},
        #        {"type": "f1", "value": 38.809586587791664},
        #    ],
        # },
        # Use "get" instead of dict indexing to skip incompat metadata instead of erroring out
        if len(datasets) > 0:
            task_results = [sub_res for sub_res in meta["model-index"][0]["results"] if (sub_res.get("task", {}).get("type", "") in tasks) and any([x in sub_res.get("dataset", {}).get("name", "") for x in datasets])]
        elif langs:
            task_results = [sub_res for sub_res in meta["model-index"][0]["results"] if (sub_res.get("task", {}).get("type", "") in tasks) and (sub_res.get("dataset", {}).get("config", "default") in ("default", *langs))]
        else:
            task_results = [sub_res for sub_res in meta["model-index"][0]["results"] if (sub_res.get("task", {}).get("type", "") in tasks)]
        try:
            out = [{res["dataset"]["name"].replace("MTEB ", ""): [round(score["value"], 2) for score in res["metrics"] if filter_metric_fetched(res["dataset"]["name"].replace("MTEB ", ""), score["type"], task_to_metric.get(res["task"]["type"]))][0]} for res in task_results]
        except Exception as e:
            print("ERROR", model.modelId, e)
            continue
        out = {k: v for d in out for k, v in d.items()}
        out["Model"] = make_clickable_model(model.modelId)
        # Model & at least one result
        if len(out) > 1:
            if add_emb_dim:
                # The except clause triggers on gated repos, we can use external metadata for those
                try:
                    MODEL_INFOS[model.modelId]["dim_seq_size"] = list(get_dim_seq_size(model))
                except:
                    name_without_org = model.modelId.split("/")[-1]
                    # EXTERNAL_MODEL_TO_SIZE[name_without_org] refers to millions of parameters, so for memory usage
                    # we multiply by 1e6 to get just the number of parameters, then by 4 to get the number of bytes
                    # given fp32 precision (4 bytes per float), then divide by 1024**3 to get the number of GB
                    MODEL_INFOS[model.modelId]["dim_seq_size"] = (
                        EXTERNAL_MODEL_TO_DIM.get(name_without_org, ""),
                        EXTERNAL_MODEL_TO_SEQLEN.get(name_without_org, ""),
                        EXTERNAL_MODEL_TO_SIZE.get(name_without_org, ""),
                        round(EXTERNAL_MODEL_TO_SIZE[name_without_org] * 1e6 * 4 / 1024**3, 2) if name_without_org in EXTERNAL_MODEL_TO_SIZE else "",
                    )
                out["Embedding Dimensions"], out["Max Tokens"], out["Model Size (Million Parameters)"], out["Memory Usage (GB, fp32)"] = tuple(MODEL_INFOS[model.modelId]["dim_seq_size"])
            df_list.append(out)
        if model.library_name == "sentence-transformers" or "sentence-transformers" in model.tags or "modules.json" in {file.rfilename for file in model.siblings}:
            SENTENCE_TRANSFORMERS_COMPATIBLE_MODELS.add(out["Model"])

    # # Save & cache MODEL_INFOS
    # with open("model_infos.json", "w") as f:
    #     json.dump(MODEL_INFOS, f)

    df = pd.DataFrame(df_list)
    # If there are any models that are the same, merge them
    # E.g. if out["Model"] has the same value in two places, merge & take whichever one is not NaN else just take the first one
    df = df.groupby("Model", as_index=False).first()
    # Put 'Model' column first
    cols = sorted(list(df.columns))
    base_columns = ["Model", "Model Size (Million Parameters)", "Memory Usage (GB, fp32)", "Embedding Dimensions", "Max Tokens"]
    if len(datasets) > 0:
        # Update legacy column names to be merged with newer ones
        # Update 'MLSUMClusteringP2P (fr)' with values from 'MLSUMClusteringP2P'
        if ('MLSUMClusteringP2P (fr)' in datasets) and ('MLSUMClusteringP2P' in cols):
            df['MLSUMClusteringP2P (fr)'] = df['MLSUMClusteringP2P (fr)'].fillna(df['MLSUMClusteringP2P'])
            datasets.remove('MLSUMClusteringP2P')
        if ('MLSUMClusteringS2S (fr)' in datasets) and ('MLSUMClusteringS2S' in cols):
            df['MLSUMClusteringS2S (fr)'] = df['MLSUMClusteringS2S (fr)'].fillna(df['MLSUMClusteringS2S'])
            datasets.remove('MLSUMClusteringS2S')
        if ('PawsXPairClassification (fr)' in datasets) and ('PawsX (fr)' in cols):
            if 'PawsXPairClassification (fr)' in cols:
                df['PawsXPairClassification (fr)'] = df['PawsXPairClassification (fr)'].fillna(df['PawsX (fr)'])
            else:
                df['PawsXPairClassification (fr)'] = df['PawsX (fr)']
            datasets.remove('PawsX (fr)')
        # Filter invalid columns
        cols = [col for col in cols if col in base_columns + datasets]
    i = 0
    for column in base_columns:
        if column in cols:
            cols.insert(i, cols.pop(cols.index(column)))
            i += 1
    df = df[cols]
    if rank:
        df = add_rank(df)       
    if fillna:
        df.fillna("", inplace=True)
    return df


# Get dict with a task list for each task category
# E.g. {"Classification": ["AmazonMassiveIntentClassification (en)", ...], "PairClassification": ["SprintDuplicateQuestions", ...]}
def get_mteb_average(task_dict: dict):
    all_tasks = reduce(lambda x, y: x + y, task_dict.values())
    DATA_OVERALL = get_mteb_data(
        tasks=list(task_dict.keys()),
        datasets=all_tasks,
        fillna=False,
        add_emb_dim=True,
        rank=False,
    )
    # Debugging:
    # DATA_OVERALL.to_csv("overall.csv")
    try:
        DATA_OVERALL.insert(1, f"Average ({len(all_tasks)} datasets)", DATA_OVERALL[all_tasks].mean(axis=1, skipna=False))
    except Exception as e:
        breakpoint()
    for i, (task_category, task_category_list) in enumerate(task_dict.items()):
        DATA_OVERALL.insert(i+2, f"{task_category} Average ({len(task_category_list)} datasets)", DATA_OVERALL[task_category_list].mean(axis=1, skipna=False))
    DATA_OVERALL.sort_values(f"Average ({len(all_tasks)} datasets)", ascending=False, inplace=True)
    # Start ranking from 1
    DATA_OVERALL.insert(0, "Rank", list(range(1, len(DATA_OVERALL) + 1)))

    DATA_OVERALL = DATA_OVERALL.round(2)

    DATA_TASKS = {}
    for task_category, task_category_list in task_dict.items():
        DATA_TASKS[task_category] = add_rank(DATA_OVERALL[["Model", "Model Size (Million Parameters)", "Memory Usage (GB, fp32)"] + task_category_list])
        DATA_TASKS[task_category] = DATA_TASKS[task_category][DATA_TASKS[task_category].iloc[:, 4:].ne("").any(axis=1)]

    # Fill NaN after averaging
    DATA_OVERALL.fillna("", inplace=True)

    data_overall_rows = ["Rank", "Model", "Model Size (Million Parameters)", "Memory Usage (GB, fp32)", "Embedding Dimensions", "Max Tokens", f"Average ({len(all_tasks)} datasets)"]
    for task_category, task_category_list in task_dict.items():
        data_overall_rows.append(f"{task_category} Average ({len(task_category_list)} datasets)")

    DATA_OVERALL = DATA_OVERALL[data_overall_rows]
    DATA_OVERALL = DATA_OVERALL[DATA_OVERALL.iloc[:, 5:].ne("").any(axis=1)]

    return DATA_OVERALL, DATA_TASKS


def refresh_leaderboard():
    """
    The main code to refresh and calculate results for the leaderboard. It does this by fetching the results from the
        external models and the models in the leaderboard, then calculating the average scores for each task category.

    Returns:
        dict: A dictionary containing the overall leaderboard and the task category leaderboards.
    """

    # get external model results and cache them
    # NOTE: if your model results have changed, use this function to refresh them (see inside for details)
    get_external_model_results()

    boards_data = {}
    all_data_tasks = []
    pbar_tasks = tqdm(BOARDS_CONFIG.items(), desc="Fetching leaderboard results for ???", total=len(BOARDS_CONFIG), leave=True)
    for board, board_config in pbar_tasks:
        boards_data[board] = {
            "data_overall": None,
            "data_tasks": {}
        }
        pbar_tasks.set_description(f"Fetching leaderboard results for {board!r}")
        pbar_tasks.refresh()
        if board_config["has_overall"]:
            data_overall, data_tasks = get_mteb_average(board_config["tasks"])
            boards_data[board]["data_overall"] = data_overall
            boards_data[board]["data_tasks"] = data_tasks
            all_data_tasks.extend(data_tasks.values())
        else:
            for task_category, task_category_list in board_config["tasks"].items():
                data_task_category = get_mteb_data(tasks=[task_category], datasets=task_category_list)
                data_task_category.drop(columns=["Embedding Dimensions", "Max Tokens"], inplace=True)
                boards_data[board]["data_tasks"][task_category] = data_task_category
                all_data_tasks.append(data_task_category)

    return all_data_tasks, boards_data



def write_out_results(item, item_name: str):
    """
    Due to their complex structure, let's recursively create subfolders until we reach the end
        of the item and then save the DFs as jsonl files

    Args:
        item (dict): The item to save
        item_name (str): The name of the item
    
    Returns:
        None
    """
    main_folder = item_name

    if isinstance(item, list): 
        for i, v in enumerate(item):
            write_out_results(v, os.path.join(main_folder, str(i)))

    elif isinstance(item, dict):
        for key, value in item.items():
            if isinstance(value, dict):
                write_out_results(value, os.path.join(main_folder, key))
            elif isinstance(value, list):
                for i, v in enumerate(value):
                    write_out_results(v, os.path.join(main_folder, key + str(i)))
            else:
                write_out_results(value, os.path.join(main_folder, key))

    elif isinstance(item, pd.DataFrame):
        print(f"Saving {main_folder} to {main_folder}/default.jsonl")
        os.makedirs(main_folder, exist_ok=True)
        
        item.reset_index().to_json(f"{main_folder}/default.jsonl", orient="records", lines=True)

    elif isinstance(item, str):
        print(f"Saving {main_folder} to {main_folder}/default.txt")
        os.makedirs(main_folder, exist_ok=True)
        with open(f"{main_folder}/default.txt", "w") as f:
            f.write(item)

    elif item is None:
        # write out an empty file
        print(f"Saving {main_folder} to {main_folder}/default.txt")
        os.makedirs(main_folder, exist_ok=True)
        with open(f"{main_folder}/default.txt", "w") as f:
            f.write("")

    else:
        raise Exception(f"Unknown type {type(item)}")


def load_results(data_path):
    """
    Do the reverse of `write_out_results` to reconstruct the item

    Args:
        data_path (str): The path to the data to load

    Returns:
        dict: The loaded data
    """
    if os.path.isdir(data_path):
        # if the folder just has numbers from 0 to N, load as a list
        all_files_in_dir = list(os.listdir(data_path))
        if set(all_files_in_dir) == set([str(i) for i in range(len(all_files_in_dir))]):
            ### the list case
            return [load_results(os.path.join(data_path, str(i))) for i in range(len(os.listdir(data_path)))]
        else:
            if len(all_files_in_dir) == 1:
                file_name = all_files_in_dir[0]
                if file_name == "default.jsonl": 
                    return load_results(os.path.join(data_path, file_name))
                else: ### the dict case
                    return {file_name: load_results(os.path.join(data_path, file_name))}
            else:
                return {file_name: load_results(os.path.join(data_path, file_name)) for file_name in all_files_in_dir}
        
    elif data_path.endswith(".jsonl"):
        df = pd.read_json(data_path, orient="records", lines=True)
        if "index" in df.columns:
            df = df.set_index("index")
        return df
    
    else:
        with open(data_path, "r") as f:
            data = f.read()
        if data == "":
            return None
        else:
            return data



if __name__ == "__main__":
    print(f"Refreshing leaderboard statistics...")
    all_data_tasks, boards_data = refresh_leaderboard()
    print(f"Done calculating, saving...")
    # save them so that the leaderboard can use them.  They're quite complex though
    #   but we can't use pickle files because of git-lfs. 
    write_out_results(all_data_tasks, "all_data_tasks")
    write_out_results(boards_data, "boards_data")

    # to load them use
    # all_data_tasks = load_results("all_data_tasks")
    # boards_data = load_results("boards_data")
    print("Done saving results!")