Upload Pipeline
Browse files- config.json +235 -235
- model.safetensors +2 -2
config.json
CHANGED
@@ -1,235 +1,235 @@
|
|
1 |
-
{
|
2 |
-
"inputs": [
|
3 |
-
"images"
|
4 |
-
],
|
5 |
-
"modules": {
|
6 |
-
"avg_pool": {
|
7 |
-
"config": {
|
8 |
-
"args": {
|
9 |
-
"output_size": [
|
10 |
-
null,
|
11 |
-
1
|
12 |
-
]
|
13 |
-
}
|
14 |
-
},
|
15 |
-
"type": "
|
16 |
-
},
|
17 |
-
"feature_extraction": {
|
18 |
-
"config": {
|
19 |
-
"args": {
|
20 |
-
"input_channel": 1,
|
21 |
-
"output_channel": 512,
|
22 |
-
"variant": "DTRB"
|
23 |
-
}
|
24 |
-
},
|
25 |
-
"type": "DeepTextRecognition.ResNetModel"
|
26 |
-
},
|
27 |
-
"permute": {
|
28 |
-
"config": {
|
29 |
-
"args": {
|
30 |
-
"dims": [
|
31 |
-
0,
|
32 |
-
3,
|
33 |
-
1,
|
34 |
-
2
|
35 |
-
]
|
36 |
-
}
|
37 |
-
},
|
38 |
-
"type": "
|
39 |
-
},
|
40 |
-
"prediction": {
|
41 |
-
"config": {
|
42 |
-
"args": {
|
43 |
-
"in_features": 256,
|
44 |
-
"out_features": 37
|
45 |
-
}
|
46 |
-
},
|
47 |
-
"type": "torch.nn.Linear"
|
48 |
-
},
|
49 |
-
"processing": {
|
50 |
-
"config": {
|
51 |
-
"args": {
|
52 |
-
"channels_size": 1,
|
53 |
-
"image_size": [
|
54 |
-
32,
|
55 |
-
100
|
56 |
-
],
|
57 |
-
"padding": "left"
|
58 |
-
}
|
59 |
-
},
|
60 |
-
"type": "DeepTextRecognition.ImageProcessor"
|
61 |
-
},
|
62 |
-
"sequence_modeling": {
|
63 |
-
"config": {
|
64 |
-
"args": {
|
65 |
-
"hidden_sizes": [
|
66 |
-
256,
|
67 |
-
256
|
68 |
-
],
|
69 |
-
"input_size": 512,
|
70 |
-
"output_size": 256
|
71 |
-
}
|
72 |
-
},
|
73 |
-
"type": "DeepTextRecognition.BiLSTMModel"
|
74 |
-
},
|
75 |
-
"squeeze": {
|
76 |
-
"config": {
|
77 |
-
"args": {
|
78 |
-
"dim": 3
|
79 |
-
}
|
80 |
-
},
|
81 |
-
"type": "
|
82 |
-
},
|
83 |
-
"tokenizer": {
|
84 |
-
"config": {
|
85 |
-
"args": {
|
86 |
-
"characters": [
|
87 |
-
"0",
|
88 |
-
"1",
|
89 |
-
"2",
|
90 |
-
"3",
|
91 |
-
"4",
|
92 |
-
"5",
|
93 |
-
"6",
|
94 |
-
"7",
|
95 |
-
"8",
|
96 |
-
"9",
|
97 |
-
"a",
|
98 |
-
"b",
|
99 |
-
"c",
|
100 |
-
"d",
|
101 |
-
"e",
|
102 |
-
"f",
|
103 |
-
"g",
|
104 |
-
"h",
|
105 |
-
"i",
|
106 |
-
"j",
|
107 |
-
"k",
|
108 |
-
"l",
|
109 |
-
"m",
|
110 |
-
"n",
|
111 |
-
"o",
|
112 |
-
"p",
|
113 |
-
"q",
|
114 |
-
"r",
|
115 |
-
"s",
|
116 |
-
"t",
|
117 |
-
"u",
|
118 |
-
"v",
|
119 |
-
"w",
|
120 |
-
"x",
|
121 |
-
"y",
|
122 |
-
"z"
|
123 |
-
],
|
124 |
-
"max_length": 25
|
125 |
-
}
|
126 |
-
},
|
127 |
-
"type": "DeepTextRecognition.CTCTokenizer"
|
128 |
-
},
|
129 |
-
"transformation": {
|
130 |
-
"config": {
|
131 |
-
"args": {
|
132 |
-
"F": 20,
|
133 |
-
"I_channel_num": 1,
|
134 |
-
"I_r_size": [
|
135 |
-
32,
|
136 |
-
100
|
137 |
-
],
|
138 |
-
"I_size": [
|
139 |
-
32,
|
140 |
-
100
|
141 |
-
]
|
142 |
-
}
|
143 |
-
},
|
144 |
-
"type": "DeepTextRecognition.TPSModel"
|
145 |
-
}
|
146 |
-
},
|
147 |
-
"order": [
|
148 |
-
"processing",
|
149 |
-
"transformation",
|
150 |
-
"feature_extraction",
|
151 |
-
"permute",
|
152 |
-
"avg_pool",
|
153 |
-
"squeeze",
|
154 |
-
"sequence_modeling",
|
155 |
-
"prediction",
|
156 |
-
"tokenizer"
|
157 |
-
],
|
158 |
-
"outputs": [
|
159 |
-
"tokenizer:labels"
|
160 |
-
],
|
161 |
-
"routing": {
|
162 |
-
"avg_pool": {
|
163 |
-
"inputs": [
|
164 |
-
"permute:permuted_features"
|
165 |
-
],
|
166 |
-
"outputs": [
|
167 |
-
"avg_pool:pooled_features"
|
168 |
-
]
|
169 |
-
},
|
170 |
-
"feature_extraction": {
|
171 |
-
"inputs": [
|
172 |
-
"transformation:transformed_images"
|
173 |
-
],
|
174 |
-
"outputs": [
|
175 |
-
"feature_extraction:extracted_features"
|
176 |
-
]
|
177 |
-
},
|
178 |
-
"permute": {
|
179 |
-
"inputs": [
|
180 |
-
"feature_extraction:extracted_features"
|
181 |
-
],
|
182 |
-
"outputs": [
|
183 |
-
"permute:permuted_features"
|
184 |
-
]
|
185 |
-
},
|
186 |
-
"prediction": {
|
187 |
-
"inputs": [
|
188 |
-
"sequence_modeling:modeled_features"
|
189 |
-
],
|
190 |
-
"outputs": [
|
191 |
-
"prediction:predictions"
|
192 |
-
]
|
193 |
-
},
|
194 |
-
"processing": {
|
195 |
-
"inputs": [
|
196 |
-
"images"
|
197 |
-
],
|
198 |
-
"outputs": [
|
199 |
-
"processing:processed_images"
|
200 |
-
]
|
201 |
-
},
|
202 |
-
"sequence_modeling": {
|
203 |
-
"inputs": [
|
204 |
-
"squeeze:squeezed_features"
|
205 |
-
],
|
206 |
-
"outputs": [
|
207 |
-
"sequence_modeling:modeled_features"
|
208 |
-
]
|
209 |
-
},
|
210 |
-
"squeeze": {
|
211 |
-
"inputs": [
|
212 |
-
"avg_pool:pooled_features"
|
213 |
-
],
|
214 |
-
"outputs": [
|
215 |
-
"squeeze:squeezed_features"
|
216 |
-
]
|
217 |
-
},
|
218 |
-
"tokenizer": {
|
219 |
-
"inputs": [
|
220 |
-
"prediction:predictions"
|
221 |
-
],
|
222 |
-
"outputs": [
|
223 |
-
"tokenizer:labels"
|
224 |
-
]
|
225 |
-
},
|
226 |
-
"transformation": {
|
227 |
-
"inputs": [
|
228 |
-
"processing:processed_images"
|
229 |
-
],
|
230 |
-
"outputs": [
|
231 |
-
"transformation:transformed_images"
|
232 |
-
]
|
233 |
-
}
|
234 |
-
}
|
235 |
-
}
|
|
|
1 |
+
{
|
2 |
+
"inputs": [
|
3 |
+
"images"
|
4 |
+
],
|
5 |
+
"modules": {
|
6 |
+
"avg_pool": {
|
7 |
+
"config": {
|
8 |
+
"args": {
|
9 |
+
"output_size": [
|
10 |
+
null,
|
11 |
+
1
|
12 |
+
]
|
13 |
+
}
|
14 |
+
},
|
15 |
+
"type": "DeepTextRecognition.AdaptiveAvgPoolModule"
|
16 |
+
},
|
17 |
+
"feature_extraction": {
|
18 |
+
"config": {
|
19 |
+
"args": {
|
20 |
+
"input_channel": 1,
|
21 |
+
"output_channel": 512,
|
22 |
+
"variant": "DTRB"
|
23 |
+
}
|
24 |
+
},
|
25 |
+
"type": "DeepTextRecognition.ResNetModel"
|
26 |
+
},
|
27 |
+
"permute": {
|
28 |
+
"config": {
|
29 |
+
"args": {
|
30 |
+
"dims": [
|
31 |
+
0,
|
32 |
+
3,
|
33 |
+
1,
|
34 |
+
2
|
35 |
+
]
|
36 |
+
}
|
37 |
+
},
|
38 |
+
"type": "DeepTextRecognition.PermuteModule"
|
39 |
+
},
|
40 |
+
"prediction": {
|
41 |
+
"config": {
|
42 |
+
"args": {
|
43 |
+
"in_features": 256,
|
44 |
+
"out_features": 37
|
45 |
+
}
|
46 |
+
},
|
47 |
+
"type": "torch.nn.Linear"
|
48 |
+
},
|
49 |
+
"processing": {
|
50 |
+
"config": {
|
51 |
+
"args": {
|
52 |
+
"channels_size": 1,
|
53 |
+
"image_size": [
|
54 |
+
32,
|
55 |
+
100
|
56 |
+
],
|
57 |
+
"padding": "left"
|
58 |
+
}
|
59 |
+
},
|
60 |
+
"type": "DeepTextRecognition.ImageProcessor"
|
61 |
+
},
|
62 |
+
"sequence_modeling": {
|
63 |
+
"config": {
|
64 |
+
"args": {
|
65 |
+
"hidden_sizes": [
|
66 |
+
256,
|
67 |
+
256
|
68 |
+
],
|
69 |
+
"input_size": 512,
|
70 |
+
"output_size": 256
|
71 |
+
}
|
72 |
+
},
|
73 |
+
"type": "DeepTextRecognition.BiLSTMModel"
|
74 |
+
},
|
75 |
+
"squeeze": {
|
76 |
+
"config": {
|
77 |
+
"args": {
|
78 |
+
"dim": 3
|
79 |
+
}
|
80 |
+
},
|
81 |
+
"type": "DeepTextRecognition.SqueezeModule"
|
82 |
+
},
|
83 |
+
"tokenizer": {
|
84 |
+
"config": {
|
85 |
+
"args": {
|
86 |
+
"characters": [
|
87 |
+
"0",
|
88 |
+
"1",
|
89 |
+
"2",
|
90 |
+
"3",
|
91 |
+
"4",
|
92 |
+
"5",
|
93 |
+
"6",
|
94 |
+
"7",
|
95 |
+
"8",
|
96 |
+
"9",
|
97 |
+
"a",
|
98 |
+
"b",
|
99 |
+
"c",
|
100 |
+
"d",
|
101 |
+
"e",
|
102 |
+
"f",
|
103 |
+
"g",
|
104 |
+
"h",
|
105 |
+
"i",
|
106 |
+
"j",
|
107 |
+
"k",
|
108 |
+
"l",
|
109 |
+
"m",
|
110 |
+
"n",
|
111 |
+
"o",
|
112 |
+
"p",
|
113 |
+
"q",
|
114 |
+
"r",
|
115 |
+
"s",
|
116 |
+
"t",
|
117 |
+
"u",
|
118 |
+
"v",
|
119 |
+
"w",
|
120 |
+
"x",
|
121 |
+
"y",
|
122 |
+
"z"
|
123 |
+
],
|
124 |
+
"max_length": 25
|
125 |
+
}
|
126 |
+
},
|
127 |
+
"type": "DeepTextRecognition.CTCTokenizer"
|
128 |
+
},
|
129 |
+
"transformation": {
|
130 |
+
"config": {
|
131 |
+
"args": {
|
132 |
+
"F": 20,
|
133 |
+
"I_channel_num": 1,
|
134 |
+
"I_r_size": [
|
135 |
+
32,
|
136 |
+
100
|
137 |
+
],
|
138 |
+
"I_size": [
|
139 |
+
32,
|
140 |
+
100
|
141 |
+
]
|
142 |
+
}
|
143 |
+
},
|
144 |
+
"type": "DeepTextRecognition.TPSModel"
|
145 |
+
}
|
146 |
+
},
|
147 |
+
"order": [
|
148 |
+
"processing",
|
149 |
+
"transformation",
|
150 |
+
"feature_extraction",
|
151 |
+
"permute",
|
152 |
+
"avg_pool",
|
153 |
+
"squeeze",
|
154 |
+
"sequence_modeling",
|
155 |
+
"prediction",
|
156 |
+
"tokenizer"
|
157 |
+
],
|
158 |
+
"outputs": [
|
159 |
+
"tokenizer:labels"
|
160 |
+
],
|
161 |
+
"routing": {
|
162 |
+
"avg_pool": {
|
163 |
+
"inputs": [
|
164 |
+
"permute:permuted_features"
|
165 |
+
],
|
166 |
+
"outputs": [
|
167 |
+
"avg_pool:pooled_features"
|
168 |
+
]
|
169 |
+
},
|
170 |
+
"feature_extraction": {
|
171 |
+
"inputs": [
|
172 |
+
"transformation:transformed_images"
|
173 |
+
],
|
174 |
+
"outputs": [
|
175 |
+
"feature_extraction:extracted_features"
|
176 |
+
]
|
177 |
+
},
|
178 |
+
"permute": {
|
179 |
+
"inputs": [
|
180 |
+
"feature_extraction:extracted_features"
|
181 |
+
],
|
182 |
+
"outputs": [
|
183 |
+
"permute:permuted_features"
|
184 |
+
]
|
185 |
+
},
|
186 |
+
"prediction": {
|
187 |
+
"inputs": [
|
188 |
+
"sequence_modeling:modeled_features"
|
189 |
+
],
|
190 |
+
"outputs": [
|
191 |
+
"prediction:predictions"
|
192 |
+
]
|
193 |
+
},
|
194 |
+
"processing": {
|
195 |
+
"inputs": [
|
196 |
+
"images"
|
197 |
+
],
|
198 |
+
"outputs": [
|
199 |
+
"processing:processed_images"
|
200 |
+
]
|
201 |
+
},
|
202 |
+
"sequence_modeling": {
|
203 |
+
"inputs": [
|
204 |
+
"squeeze:squeezed_features"
|
205 |
+
],
|
206 |
+
"outputs": [
|
207 |
+
"sequence_modeling:modeled_features"
|
208 |
+
]
|
209 |
+
},
|
210 |
+
"squeeze": {
|
211 |
+
"inputs": [
|
212 |
+
"avg_pool:pooled_features"
|
213 |
+
],
|
214 |
+
"outputs": [
|
215 |
+
"squeeze:squeezed_features"
|
216 |
+
]
|
217 |
+
},
|
218 |
+
"tokenizer": {
|
219 |
+
"inputs": [
|
220 |
+
"prediction:predictions"
|
221 |
+
],
|
222 |
+
"outputs": [
|
223 |
+
"tokenizer:labels"
|
224 |
+
]
|
225 |
+
},
|
226 |
+
"transformation": {
|
227 |
+
"inputs": [
|
228 |
+
"processing:processed_images"
|
229 |
+
],
|
230 |
+
"outputs": [
|
231 |
+
"transformation:transformed_images"
|
232 |
+
]
|
233 |
+
}
|
234 |
+
}
|
235 |
+
}
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f03a7e38a48e115d7de35b1dfddf330d0ee8476541175f6a11800094c290210b
|
3 |
+
size 195868384
|