Add/update the quantized ONNX model files and README.md for Transformers.js v3
#1
by
whitphx
HF Staff
- opened
- README.md +3 -3
- onnx/model.onnx +3 -0
- onnx/model_bnb4.onnx +3 -0
- onnx/model_fp16.onnx +3 -0
- onnx/model_int8.onnx +3 -0
- onnx/model_q4.onnx +3 -0
- onnx/model_q4f16.onnx +3 -0
- onnx/model_uint8.onnx +3 -0
README.md
CHANGED
@@ -7,15 +7,15 @@ https://huggingface.co/gpt2 with ONNX weights to be compatible with Transformers
|
|
7 |
|
8 |
## Usage (Transformers.js)
|
9 |
|
10 |
-
If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@
|
11 |
```bash
|
12 |
-
npm i @
|
13 |
```
|
14 |
|
15 |
You can then use the model to generate text as follows:
|
16 |
|
17 |
```js
|
18 |
-
import { pipeline } from '@
|
19 |
|
20 |
// Create a text-generation pipeline
|
21 |
const generator = await pipeline('text-generation', 'Xenova/gpt2');
|
|
|
7 |
|
8 |
## Usage (Transformers.js)
|
9 |
|
10 |
+
If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@huggingface/transformers) using:
|
11 |
```bash
|
12 |
+
npm i @huggingface/transformers
|
13 |
```
|
14 |
|
15 |
You can then use the model to generate text as follows:
|
16 |
|
17 |
```js
|
18 |
+
import { pipeline } from '@huggingface/transformers';
|
19 |
|
20 |
// Create a text-generation pipeline
|
21 |
const generator = await pipeline('text-generation', 'Xenova/gpt2');
|
onnx/model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6aa1de057398705e99e1088f7ff2ef1a04edb19f7a4f2a0ae36fdd76c8d0349
|
3 |
+
size 500799139
|
onnx/model_bnb4.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eca5fb461d8a6881d975e1db7324d57a007a295e644f470407741e9637d2a23f
|
3 |
+
size 499625644
|
onnx/model_fp16.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85e1b7732e8b9a89ddf7e4c113d903393905db8a5e83fb3f8110a9958dccfd9f
|
3 |
+
size 250753380
|
onnx/model_int8.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d64699534bb055dcfe1403831bc96a68e53193a530bc9730e52f76793b3cb680
|
3 |
+
size 281199403
|
onnx/model_q4.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eca5fb461d8a6881d975e1db7324d57a007a295e644f470407741e9637d2a23f
|
3 |
+
size 499625644
|
onnx/model_q4f16.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe3cfcdbb15819e9f674c0d803197a59f6ed48df23bf7d98924d440a607e4365
|
3 |
+
size 250753399
|
onnx/model_uint8.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b16e94c9f40d654a5d40a32ed9838f401e0b28b9f8b2aeda03643e30d666c680
|
3 |
+
size 281199428
|