File size: 1,098 Bytes
bc737db
 
35f3b0c
 
 
 
3060464
 
33aec36
0bd1a97
33aec36
0bd1a97
33aec36
0bd1a97
3060464
0bd1a97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33aec36
0bd1a97
33aec36
0bd1a97
33aec36
0bd1a97
33aec36
0bd1a97
33aec36
0bd1a97
 
 
 
 
 
 
 
 
 
 
 
 
33aec36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
---
library_name: transformers
license: mit
language:
- fa
pipeline_tag: token-classification
---
Named entity recognition On Persian dataset

traindataset=20484 persian sentense 

valdataset=2561

AutoTokenizer=HooshvareLab/bert-fa-base-uncased

ner_tags=
['O', 'B-pro',
'I-pro',
'B-pers', 
'I-pers', 
'B-org', 
'I-org',
'B-loc',
'I-loc', 
'B-fac',
'I-fac', 
'B-event',
'I-event']

training_args=
    learning_rate=2e-5,
    
    per_device_train_batch_size=16,
    
    per_device_eval_batch_size=16,
    
    num_train_epochs=4,
    
    weight_decay=0.01
    

Training Loss=0.001000

sample1:
  'entity': 'B-loc',
  'score': 0.9998902,
  'index': 2,
  'word': 'تهران',

sample2:
  'entity': 'B-pers',
  'score': 0.99988234,
  'index': 2,
  'word': 'عباس',


for use this model:

    from transformers import pipeline
    
    pipe = pipeline("token-classification", model="NLPclass/Named_entity_recognition_persian")

    sentence = ""

    predicted_ner = pipe(sentence)
    
    for entity in predicted_ner:
    
        print(f"Entity: {entity['word']}, Label: {entity['entity']}")