ORI-Muchim commited on
Commit
106c4c3
1 Parent(s): cbd5e09

Upload cleaners.py

Browse files
Files changed (1) hide show
  1. text/cleaners.py +128 -0
text/cleaners.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from text.japanese import japanese_to_romaji_with_accent, japanese_to_ipa, japanese_to_ipa2, japanese_to_ipa3
3
+ from text.korean import latin_to_hangul, number_to_hangul, divide_hangul, korean_to_lazy_ipa, korean_to_ipa
4
+ #from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo, chinese_to_romaji, chinese_to_lazy_ipa, chinese_to_ipa, chinese_to_ipa2
5
+ #from text.sanskrit import devanagari_to_ipa
6
+ from text.english import english_to_lazy_ipa, english_to_ipa2, english_to_lazy_ipa2
7
+ #from text.thai import num_to_thai, latin_to_thai
8
+ #from text.shanghainese import shanghainese_to_ipa
9
+ #from text.cantonese import cantonese_to_ipa
10
+ #from text.ngu_dialect import ngu_dialect_to_ipa
11
+
12
+
13
+ def japanese_cleaners(text):
14
+ text = japanese_to_romaji_with_accent(text)
15
+ text = re.sub(r'([A-Za-z])$', r'\1.', text)
16
+ return text
17
+
18
+
19
+ def japanese_cleaners2(text):
20
+ return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
21
+
22
+
23
+ def korean_cleaners(text):
24
+ '''Pipeline for Korean text'''
25
+ text = latin_to_hangul(text)
26
+ text = number_to_hangul(text)
27
+ text = divide_hangul(text)
28
+ text = re.sub(r'([\u3131-\u3163])$', r'\1.', text)
29
+ return text
30
+
31
+
32
+ def chinese_cleaners(text):
33
+ '''Pipeline for Chinese text'''
34
+ text = number_to_chinese(text)
35
+ text = chinese_to_bopomofo(text)
36
+ text = latin_to_bopomofo(text)
37
+ text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text)
38
+ return text
39
+
40
+
41
+ def zh_ja_mixture_cleaners(text):
42
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]',
43
+ lambda x: chinese_to_romaji(x.group(1))+' ', text)
44
+ text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent(
45
+ x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…')+' ', text)
46
+ text = re.sub(r'\s+$', '', text)
47
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
48
+ return text
49
+
50
+
51
+ def sanskrit_cleaners(text):
52
+ text = text.replace('॥', '।').replace('ॐ', 'ओम्')
53
+ text = re.sub(r'([^।])$', r'\1।', text)
54
+ return text
55
+
56
+
57
+ def cjks_cleaners(text):
58
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]',
59
+ lambda x: chinese_to_lazy_ipa(x.group(1))+' ', text)
60
+ text = re.sub(r'\[JA\](.*?)\[JA\]',
61
+ lambda x: japanese_to_ipa(x.group(1))+' ', text)
62
+ text = re.sub(r'\[KO\](.*?)\[KO\]',
63
+ lambda x: korean_to_lazy_ipa(x.group(1))+' ', text)
64
+ text = re.sub(r'\[SA\](.*?)\[SA\]',
65
+ lambda x: devanagari_to_ipa(x.group(1))+' ', text)
66
+ text = re.sub(r'\[EN\](.*?)\[EN\]',
67
+ lambda x: english_to_lazy_ipa(x.group(1))+' ', text)
68
+ text = re.sub(r'\s+$', '', text)
69
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
70
+ return text
71
+
72
+
73
+ def cjke_cleaners(text):
74
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace(
75
+ 'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn')+' ', text)
76
+ text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace(
77
+ 'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz')+' ', text)
78
+ text = re.sub(r'\[KO\](.*?)\[KO\]',
79
+ lambda x: korean_to_ipa(x.group(1))+' ', text)
80
+ text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace(
81
+ 'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u')+' ', text)
82
+ text = re.sub(r'\s+$', '', text)
83
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
84
+ return text
85
+
86
+
87
+ def cjke_cleaners2(text):
88
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]',
89
+ lambda x: chinese_to_ipa(x.group(1))+' ', text)
90
+ text = re.sub(r'\[JA\](.*?)\[JA\]',
91
+ lambda x: japanese_to_ipa2(x.group(1))+' ', text)
92
+ text = re.sub(r'\[KO\](.*?)\[KO\]',
93
+ lambda x: korean_to_ipa(x.group(1))+' ', text)
94
+ text = re.sub(r'\[EN\](.*?)\[EN\]',
95
+ lambda x: english_to_ipa2(x.group(1))+' ', text)
96
+ text = re.sub(r'\s+$', '', text)
97
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
98
+ return text
99
+
100
+
101
+ def thai_cleaners(text):
102
+ text = num_to_thai(text)
103
+ text = latin_to_thai(text)
104
+ return text
105
+
106
+
107
+ def shanghainese_cleaners(text):
108
+ text = shanghainese_to_ipa(text)
109
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
110
+ return text
111
+
112
+
113
+ def chinese_dialect_cleaners(text):
114
+ text = re.sub(r'\[ZH\](.*?)\[ZH\]',
115
+ lambda x: chinese_to_ipa2(x.group(1))+' ', text)
116
+ text = re.sub(r'\[JA\](.*?)\[JA\]',
117
+ lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ')+' ', text)
118
+ text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5',
119
+ '˧˧˦').replace('6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e')+' ', text)
120
+ text = re.sub(r'\[GD\](.*?)\[GD\]',
121
+ lambda x: cantonese_to_ipa(x.group(1))+' ', text)
122
+ text = re.sub(r'\[EN\](.*?)\[EN\]',
123
+ lambda x: english_to_lazy_ipa2(x.group(1))+' ', text)
124
+ text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group(
125
+ 1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ')+' ', text)
126
+ text = re.sub(r'\s+$', '', text)
127
+ text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
128
+ return text