versae commited on
Commit
1cf25f8
1 Parent(s): 619f557

Create new file

Browse files
Files changed (1) hide show
  1. translator.py +41 -0
translator.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+ import torch
4
+
5
+ from datasets import load_dataset
6
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
7
+
8
+
9
+ model_name = "facebook/nllb-200-3.3B" # "facebook/nllb-200-distilled-600M"
10
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name, use_auth_token=True, torch_dtype=torch.float32)
12
+ model.to(device, torch.float32, True)
13
+ tokenizer = AutoTokenizer.from_pretrained(
14
+ model_name, use_auth_token=True, src_lang="eng_Latn"
15
+ )
16
+
17
+
18
+ def to_lang_code(text, lang_code):
19
+ inputs = tokenizer(text, return_tensors="pt").to(device)
20
+ translated_tokens = model.generate(
21
+ **inputs,
22
+ forced_bos_token_id=tokenizer.lang_code_to_id[lang_code],
23
+ max_length=int(len(inputs.tokens()) * 1.5) # 50% more tokens for the translation just in case
24
+ )
25
+ return tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
26
+
27
+
28
+ if __name__ == "__main__":
29
+ languages = (("nb", "nob_Latn"), ("nn", "nno_Latn"))
30
+ ds = load_dataset("paws-x", "en")
31
+ dss = {}
32
+ for lang, translate_code in languages:
33
+ translate = partial(to_lang_code, lang_code=translate_code)
34
+ dss[lang] = ds.map(lambda example: {
35
+ "sentence1": translate(example["sentence1"]),
36
+ "sentence2": translate(example["sentence2"]),
37
+ }, desc=f"Translating to {lang}")
38
+ for split in ("test", "validation", "train"):
39
+ json_lines = dss[lang][split].to_pandas().to_json(orient='records', lines=True)
40
+ with open(f"{lang}_{split}.json", "w") as json_file:
41
+ json_file.write(json_lines)