Upload v2 ONNX model for token classification
Browse files- model.onnx +1 -1
- tokenizer.json +1 -6
- tokenizer_config.json +0 -4
model.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 435877656
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aca8cd70251c19d42a6ff6609da681bb343edb766c046f88f376ab1411b0333e
|
3 |
size 435877656
|
tokenizer.json
CHANGED
@@ -1,11 +1,6 @@
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
-
"truncation":
|
4 |
-
"direction": "Right",
|
5 |
-
"max_length": 512,
|
6 |
-
"strategy": "LongestFirst",
|
7 |
-
"stride": 0
|
8 |
-
},
|
9 |
"padding": null,
|
10 |
"added_tokens": [
|
11 |
{
|
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
+
"truncation": null,
|
|
|
|
|
|
|
|
|
|
|
4 |
"padding": null,
|
5 |
"added_tokens": [
|
6 |
{
|
tokenizer_config.json
CHANGED
@@ -46,15 +46,11 @@
|
|
46 |
"do_lower_case": true,
|
47 |
"extra_special_tokens": {},
|
48 |
"mask_token": "[MASK]",
|
49 |
-
"max_length": 512,
|
50 |
"model_max_length": 512,
|
51 |
"pad_token": "[PAD]",
|
52 |
"sep_token": "[SEP]",
|
53 |
-
"stride": 0,
|
54 |
"strip_accents": null,
|
55 |
"tokenize_chinese_chars": true,
|
56 |
"tokenizer_class": "BertTokenizer",
|
57 |
-
"truncation_side": "right",
|
58 |
-
"truncation_strategy": "longest_first",
|
59 |
"unk_token": "[UNK]"
|
60 |
}
|
|
|
46 |
"do_lower_case": true,
|
47 |
"extra_special_tokens": {},
|
48 |
"mask_token": "[MASK]",
|
|
|
49 |
"model_max_length": 512,
|
50 |
"pad_token": "[PAD]",
|
51 |
"sep_token": "[SEP]",
|
|
|
52 |
"strip_accents": null,
|
53 |
"tokenize_chinese_chars": true,
|
54 |
"tokenizer_class": "BertTokenizer",
|
|
|
|
|
55 |
"unk_token": "[UNK]"
|
56 |
}
|