Initial commit
Browse files- .gitignore +1 -0
- LICENSE +0 -0
- LICENSE_NOTICE.txt +7 -0
- README.md +27 -1
- nvidia_parakeet-v2/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
- nvidia_parakeet-v2/AudioEncoder.mlmodelc/coremldata.bin +3 -0
- nvidia_parakeet-v2/AudioEncoder.mlmodelc/metadata.json +84 -0
- nvidia_parakeet-v2/AudioEncoder.mlmodelc/model.mil +0 -0
- nvidia_parakeet-v2/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
- nvidia_parakeet-v2/LICENSE_NOTICE.txt +7 -0
- nvidia_parakeet-v2/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
- nvidia_parakeet-v2/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
- nvidia_parakeet-v2/MelSpectrogram.mlmodelc/metadata.json +75 -0
- nvidia_parakeet-v2/MelSpectrogram.mlmodelc/model.mil +76 -0
- nvidia_parakeet-v2/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
- nvidia_parakeet-v2/MultimodalLogits.mlmodelc/analytics/coremldata.bin +3 -0
- nvidia_parakeet-v2/MultimodalLogits.mlmodelc/coremldata.bin +3 -0
- nvidia_parakeet-v2/MultimodalLogits.mlmodelc/metadata.json +74 -0
- nvidia_parakeet-v2/MultimodalLogits.mlmodelc/model.mil +15 -0
- nvidia_parakeet-v2/MultimodalLogits.mlmodelc/weights/weight.bin +3 -0
- nvidia_parakeet-v2/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
- nvidia_parakeet-v2/TextDecoder.mlmodelc/coremldata.bin +3 -0
- nvidia_parakeet-v2/TextDecoder.mlmodelc/metadata.json +107 -0
- nvidia_parakeet-v2/TextDecoder.mlmodelc/model.mil +64 -0
- nvidia_parakeet-v2/TextDecoder.mlmodelc/weights/weight.bin +3 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
*.DS_Store
|
LICENSE
DELETED
File without changes
|
LICENSE_NOTICE.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Argmax proprietary and confidential. Under NDA.
|
2 |
+
|
3 |
+
Copyright 2024 Argmax, Inc. All rights reserved.
|
4 |
+
|
5 |
+
Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
|
6 |
+
|
7 |
+
Please contact Argmax for licensing information at [email protected].
|
README.md
CHANGED
@@ -1,5 +1,31 @@
|
|
1 |
---
|
2 |
license: other
|
3 |
license_name: argmax-fmod-license
|
4 |
-
license_link:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
---
|
|
|
|
|
|
1 |
---
|
2 |
license: other
|
3 |
license_name: argmax-fmod-license
|
4 |
+
license_link: https://huggingface.co/argmaxinc/parakeetkit-pro/blob/main/LICENSE_NOTICE.txt
|
5 |
+
pretty_name: "ParakeetKit"
|
6 |
+
viewer: false
|
7 |
+
library_name: whisperkit
|
8 |
+
tags:
|
9 |
+
- whisper
|
10 |
+
- whisperkit
|
11 |
+
- parakeet
|
12 |
+
- nvidia
|
13 |
+
- openai
|
14 |
+
- coreml
|
15 |
+
- asr
|
16 |
+
- transcription
|
17 |
+
- local
|
18 |
+
- on-device
|
19 |
+
- quantized
|
20 |
+
- compressed
|
21 |
+
- automatic-speech-recognition
|
22 |
+
extra_gated_heading: "ParakeetKit Pro (Part of Argmax SDK)"
|
23 |
+
extra_gated_description: "ParakeetKit Pro deploys Nvidia Parakeet models on Apple Silicon. Request access to [Argmax SDK](https://www.argmaxinc.com/#request-access)"
|
24 |
+
extra_gated_fields:
|
25 |
+
Company: text
|
26 |
+
Work email: text
|
27 |
+
I acknowledge the license notice: checkbox
|
28 |
+
extra_gated_button_content: "Submit"
|
29 |
---
|
30 |
+
|
31 |
+
# ParakeetKit Pro
|
nvidia_parakeet-v2/AudioEncoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:692d0a743521402679dbfab76b054448ddb2fe9e8f26ed87252ab73a17080ac3
|
3 |
+
size 243
|
nvidia_parakeet-v2/AudioEncoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:49b5a4d69d29bd453b790affeb44eef4155ecba0cc19cc6dfb37515eacc66b7f
|
3 |
+
size 405
|
nvidia_parakeet-v2/AudioEncoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Float16",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 1024 × 1 × 188)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 1024, 1, 188]",
|
13 |
+
"name" : "encoder_output_embeds",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 1 × 640 × 1 × 188)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[1, 640, 1, 188]",
|
23 |
+
"name" : "joint_projected_encoder_output_embeds",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
}
|
26 |
+
],
|
27 |
+
"modelParameters" : [
|
28 |
+
|
29 |
+
],
|
30 |
+
"specificationVersion" : 8,
|
31 |
+
"mlProgramOperationTypeHistogram" : {
|
32 |
+
"Ios16.silu" : 72,
|
33 |
+
"Ios17.mul" : 72,
|
34 |
+
"Split" : 24,
|
35 |
+
"Ios17.transpose" : 1,
|
36 |
+
"Ios17.matmul" : 72,
|
37 |
+
"Ios17.conv" : 295,
|
38 |
+
"Ios16.sigmoid" : 24,
|
39 |
+
"Ios17.add" : 168,
|
40 |
+
"Ios17.sliceByIndex" : 48,
|
41 |
+
"Ios17.batchNorm" : 120,
|
42 |
+
"Ios16.relu" : 3,
|
43 |
+
"Ios16.softmax" : 24,
|
44 |
+
"Ios17.reshape" : 193,
|
45 |
+
"Ios17.layerNorm" : 120,
|
46 |
+
"Pad" : 24
|
47 |
+
},
|
48 |
+
"computePrecision" : "Mixed (Float16, Int32)",
|
49 |
+
"isUpdatable" : "0",
|
50 |
+
"stateSchema" : [
|
51 |
+
|
52 |
+
],
|
53 |
+
"availability" : {
|
54 |
+
"macOS" : "14.0",
|
55 |
+
"tvOS" : "17.0",
|
56 |
+
"visionOS" : "1.0",
|
57 |
+
"watchOS" : "10.0",
|
58 |
+
"iOS" : "17.0",
|
59 |
+
"macCatalyst" : "17.0"
|
60 |
+
},
|
61 |
+
"modelType" : {
|
62 |
+
"name" : "MLModelType_mlProgram"
|
63 |
+
},
|
64 |
+
"userDefinedMetadata" : {
|
65 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
66 |
+
"com.github.apple.coremltools.source" : "torch==2.6.0",
|
67 |
+
"com.github.apple.coremltools.version" : "8.2"
|
68 |
+
},
|
69 |
+
"inputSchema" : [
|
70 |
+
{
|
71 |
+
"hasShapeFlexibility" : "0",
|
72 |
+
"isOptional" : "0",
|
73 |
+
"dataType" : "Float16",
|
74 |
+
"formattedType" : "MultiArray (Float16 1 × 1 × 1501 × 128)",
|
75 |
+
"shortDescription" : "",
|
76 |
+
"shape" : "[1, 1, 1501, 128]",
|
77 |
+
"name" : "melspectrogram_features",
|
78 |
+
"type" : "MultiArray"
|
79 |
+
}
|
80 |
+
],
|
81 |
+
"generatedClassName" : "AudioEncoder",
|
82 |
+
"method" : "predict"
|
83 |
+
}
|
84 |
+
]
|
nvidia_parakeet-v2/AudioEncoder.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|
nvidia_parakeet-v2/AudioEncoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:847b7f622f378e62feae92ac279191bdf7f10c1b0eba8aa772eacbb8308a13ac
|
3 |
+
size 1219841984
|
nvidia_parakeet-v2/LICENSE_NOTICE.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Argmax proprietary and confidential. Under NDA.
|
2 |
+
|
3 |
+
Copyright 2024 Argmax, Inc. All rights reserved.
|
4 |
+
|
5 |
+
Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
|
6 |
+
|
7 |
+
Please contact Argmax for licensing information at [email protected].
|
nvidia_parakeet-v2/MelSpectrogram.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6f9e0ad4bff30b21d884c2cdc5bf1551b59209d9314c93a11f6bc4a21e4d26b5
|
3 |
+
size 243
|
nvidia_parakeet-v2/MelSpectrogram.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1ff0c946a5a04b1f3d754001c0951bea58f79e57e04124d6da0f03a09c535acd
|
3 |
+
size 327
|
nvidia_parakeet-v2/MelSpectrogram.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Float16",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 128 × 1501)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[128, 1501]",
|
13 |
+
"name" : "melspectrogram_features",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"modelParameters" : [
|
18 |
+
|
19 |
+
],
|
20 |
+
"specificationVersion" : 8,
|
21 |
+
"mlProgramOperationTypeHistogram" : {
|
22 |
+
"Ios17.mul" : 2,
|
23 |
+
"Ios17.sqrt" : 1,
|
24 |
+
"Ios17.square" : 3,
|
25 |
+
"Ios17.sub" : 2,
|
26 |
+
"Ios17.matmul" : 1,
|
27 |
+
"Ios17.conv" : 2,
|
28 |
+
"Ios17.log" : 1,
|
29 |
+
"Ios17.sliceByIndex" : 2,
|
30 |
+
"Ios17.add" : 3,
|
31 |
+
"Ios16.reduceMean" : 2,
|
32 |
+
"Ios17.realDiv" : 1,
|
33 |
+
"Ios17.expandDims" : 2,
|
34 |
+
"Ios17.squeeze" : 2,
|
35 |
+
"Ios17.reshape" : 2,
|
36 |
+
"Identity" : 1,
|
37 |
+
"Pad" : 2
|
38 |
+
},
|
39 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
40 |
+
"isUpdatable" : "0",
|
41 |
+
"stateSchema" : [
|
42 |
+
|
43 |
+
],
|
44 |
+
"availability" : {
|
45 |
+
"macOS" : "14.0",
|
46 |
+
"tvOS" : "17.0",
|
47 |
+
"visionOS" : "1.0",
|
48 |
+
"watchOS" : "10.0",
|
49 |
+
"iOS" : "17.0",
|
50 |
+
"macCatalyst" : "17.0"
|
51 |
+
},
|
52 |
+
"modelType" : {
|
53 |
+
"name" : "MLModelType_mlProgram"
|
54 |
+
},
|
55 |
+
"userDefinedMetadata" : {
|
56 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
57 |
+
"com.github.apple.coremltools.version" : "8.2",
|
58 |
+
"com.github.apple.coremltools.source" : "torch==2.6.0"
|
59 |
+
},
|
60 |
+
"inputSchema" : [
|
61 |
+
{
|
62 |
+
"hasShapeFlexibility" : "0",
|
63 |
+
"isOptional" : "0",
|
64 |
+
"dataType" : "Float16",
|
65 |
+
"formattedType" : "MultiArray (Float16 240000)",
|
66 |
+
"shortDescription" : "",
|
67 |
+
"shape" : "[240000]",
|
68 |
+
"name" : "audio",
|
69 |
+
"type" : "MultiArray"
|
70 |
+
}
|
71 |
+
],
|
72 |
+
"generatedClassName" : "MelSpectrogram",
|
73 |
+
"method" : "predict"
|
74 |
+
}
|
75 |
+
]
|
nvidia_parakeet-v2/MelSpectrogram.mlmodelc/model.mil
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.6.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios17>(tensor<fp16, [240000]> audio) {
|
5 |
+
tensor<int32, [1]> var_8_begin_0 = const()[name = tensor<string, []>("op_8_begin_0"), val = tensor<int32, [1]>([1])];
|
6 |
+
tensor<int32, [1]> var_8_end_0 = const()[name = tensor<string, []>("op_8_end_0"), val = tensor<int32, [1]>([240000])];
|
7 |
+
tensor<bool, [1]> var_8_end_mask_0 = const()[name = tensor<string, []>("op_8_end_mask_0"), val = tensor<bool, [1]>([true])];
|
8 |
+
tensor<fp16, [239999]> var_8_cast_fp16 = slice_by_index(begin = var_8_begin_0, end = var_8_end_0, end_mask = var_8_end_mask_0, x = audio)[name = tensor<string, []>("op_8_cast_fp16")];
|
9 |
+
tensor<int32, [1]> var_13_begin_0 = const()[name = tensor<string, []>("op_13_begin_0"), val = tensor<int32, [1]>([0])];
|
10 |
+
tensor<int32, [1]> var_13_end_0 = const()[name = tensor<string, []>("op_13_end_0"), val = tensor<int32, [1]>([239999])];
|
11 |
+
tensor<bool, [1]> var_13_end_mask_0 = const()[name = tensor<string, []>("op_13_end_mask_0"), val = tensor<bool, [1]>([false])];
|
12 |
+
tensor<fp16, [239999]> var_13_cast_fp16 = slice_by_index(begin = var_13_begin_0, end = var_13_end_0, end_mask = var_13_end_mask_0, x = audio)[name = tensor<string, []>("op_13_cast_fp16")];
|
13 |
+
tensor<fp16, []> var_14_to_fp16 = const()[name = tensor<string, []>("op_14_to_fp16"), val = tensor<fp16, []>(0x1.f0cp-1)];
|
14 |
+
tensor<fp16, [239999]> var_15_cast_fp16 = mul(x = var_13_cast_fp16, y = var_14_to_fp16)[name = tensor<string, []>("op_15_cast_fp16")];
|
15 |
+
tensor<fp16, [239999]> input_1_cast_fp16 = sub(x = var_8_cast_fp16, y = var_15_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
16 |
+
tensor<int32, [2]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [2]>([1, 0])];
|
17 |
+
tensor<string, []> input_3_mode_0 = const()[name = tensor<string, []>("input_3_mode_0"), val = tensor<string, []>("constant")];
|
18 |
+
tensor<fp16, []> const_0_to_fp16 = const()[name = tensor<string, []>("const_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
19 |
+
tensor<fp16, [240000]> input_3_cast_fp16 = pad(constant_val = const_0_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
20 |
+
tensor<int32, [3]> var_30 = const()[name = tensor<string, []>("op_30"), val = tensor<int32, [3]>([1, 1, 240000])];
|
21 |
+
tensor<fp16, [1, 1, 240000]> input_5_cast_fp16 = reshape(shape = var_30, x = input_3_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
22 |
+
tensor<int32, [6]> input_7_pad_0 = const()[name = tensor<string, []>("input_7_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 256, 256])];
|
23 |
+
tensor<string, []> input_7_mode_0 = const()[name = tensor<string, []>("input_7_mode_0"), val = tensor<string, []>("reflect")];
|
24 |
+
tensor<fp16, []> const_2_to_fp16 = const()[name = tensor<string, []>("const_2_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
|
25 |
+
tensor<fp16, [1, 1, 240512]> input_7_cast_fp16 = pad(constant_val = const_2_to_fp16, mode = input_7_mode_0, pad = input_7_pad_0, x = input_5_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
26 |
+
tensor<int32, [1]> var_42 = const()[name = tensor<string, []>("op_42"), val = tensor<int32, [1]>([240512])];
|
27 |
+
tensor<fp16, [240512]> input_cast_fp16 = reshape(shape = var_42, x = input_7_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
28 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
|
29 |
+
tensor<fp16, [1, 240512]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
|
30 |
+
tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
|
31 |
+
tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
|
32 |
+
tensor<fp16, [1, 1, 240512]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = tensor<string, []>("expand_dims_4_cast_fp16")];
|
33 |
+
tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
|
34 |
+
tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
35 |
+
tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
|
36 |
+
tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
|
37 |
+
tensor<fp16, [257, 1, 512]> expand_dims_1_to_fp16 = const()[name = tensor<string, []>("expand_dims_1_to_fp16"), val = tensor<fp16, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
38 |
+
tensor<fp16, [1, 257, 1501]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
|
39 |
+
tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
|
40 |
+
tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
|
41 |
+
tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
|
42 |
+
tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
|
43 |
+
tensor<fp16, [257, 1, 512]> expand_dims_2_to_fp16 = const()[name = tensor<string, []>("expand_dims_2_to_fp16"), val = tensor<fp16, [257, 1, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(263296)))];
|
44 |
+
tensor<fp16, [1, 257, 1501]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
|
45 |
+
tensor<int32, [1]> squeeze_0_axes_0 = const()[name = tensor<string, []>("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
|
46 |
+
tensor<fp16, [257, 1501]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = tensor<string, []>("squeeze_0_cast_fp16")];
|
47 |
+
tensor<int32, [1]> squeeze_1_axes_0 = const()[name = tensor<string, []>("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
|
48 |
+
tensor<fp16, [257, 1501]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = tensor<string, []>("squeeze_1_cast_fp16")];
|
49 |
+
tensor<fp16, [257, 1501]> square_1_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = tensor<string, []>("square_1_cast_fp16")];
|
50 |
+
tensor<fp16, [257, 1501]> square_2_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = tensor<string, []>("square_2_cast_fp16")];
|
51 |
+
tensor<fp16, [257, 1501]> add_1_cast_fp16 = add(x = square_1_cast_fp16, y = square_2_cast_fp16)[name = tensor<string, []>("add_1_cast_fp16")];
|
52 |
+
tensor<fp16, [257, 1501]> magnitudes_cast_fp16 = identity(x = add_1_cast_fp16)[name = tensor<string, []>("magnitudes_cast_fp16")];
|
53 |
+
tensor<bool, []> mel_spec_1_transpose_x_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_x_0"), val = tensor<bool, []>(false)];
|
54 |
+
tensor<bool, []> mel_spec_1_transpose_y_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_y_0"), val = tensor<bool, []>(false)];
|
55 |
+
tensor<fp16, [128, 257]> mel_filters_to_fp16 = const()[name = tensor<string, []>("mel_filters_to_fp16"), val = tensor<fp16, [128, 257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(526528)))];
|
56 |
+
tensor<fp16, [128, 1501]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16, y = magnitudes_cast_fp16)[name = tensor<string, []>("mel_spec_1_cast_fp16")];
|
57 |
+
tensor<fp16, []> var_56_to_fp16 = const()[name = tensor<string, []>("op_56_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
58 |
+
tensor<fp16, [128, 1501]> mel_spec_3_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_56_to_fp16)[name = tensor<string, []>("mel_spec_3_cast_fp16")];
|
59 |
+
tensor<fp32, []> mel_spec_epsilon_0 = const()[name = tensor<string, []>("mel_spec_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
|
60 |
+
tensor<fp16, [128, 1501]> mel_spec_cast_fp16 = log(epsilon = mel_spec_epsilon_0, x = mel_spec_3_cast_fp16)[name = tensor<string, []>("mel_spec_cast_fp16")];
|
61 |
+
tensor<int32, [1]> per_feature_mean_axes_0 = const()[name = tensor<string, []>("per_feature_mean_axes_0"), val = tensor<int32, [1]>([-1])];
|
62 |
+
tensor<bool, []> per_feature_mean_keep_dims_0 = const()[name = tensor<string, []>("per_feature_mean_keep_dims_0"), val = tensor<bool, []>(true)];
|
63 |
+
tensor<fp16, [128, 1]> per_feature_mean_cast_fp16 = reduce_mean(axes = per_feature_mean_axes_0, keep_dims = per_feature_mean_keep_dims_0, x = mel_spec_cast_fp16)[name = tensor<string, []>("per_feature_mean_cast_fp16")];
|
64 |
+
tensor<fp16, [128, 1501]> sub_0_cast_fp16 = sub(x = mel_spec_cast_fp16, y = per_feature_mean_cast_fp16)[name = tensor<string, []>("sub_0_cast_fp16")];
|
65 |
+
tensor<fp16, [128, 1501]> square_0_cast_fp16 = square(x = sub_0_cast_fp16)[name = tensor<string, []>("square_0_cast_fp16")];
|
66 |
+
tensor<int32, [1]> reduce_mean_1_axes_0 = const()[name = tensor<string, []>("reduce_mean_1_axes_0"), val = tensor<int32, [1]>([-1])];
|
67 |
+
tensor<bool, []> reduce_mean_1_keep_dims_0 = const()[name = tensor<string, []>("reduce_mean_1_keep_dims_0"), val = tensor<bool, []>(true)];
|
68 |
+
tensor<fp16, [128, 1]> reduce_mean_1_cast_fp16 = reduce_mean(axes = reduce_mean_1_axes_0, keep_dims = reduce_mean_1_keep_dims_0, x = square_0_cast_fp16)[name = tensor<string, []>("reduce_mean_1_cast_fp16")];
|
69 |
+
tensor<fp16, []> real_div_0_to_fp16 = const()[name = tensor<string, []>("real_div_0_to_fp16"), val = tensor<fp16, []>(0x1.004p+0)];
|
70 |
+
tensor<fp16, [128, 1]> mul_0_cast_fp16 = mul(x = reduce_mean_1_cast_fp16, y = real_div_0_to_fp16)[name = tensor<string, []>("mul_0_cast_fp16")];
|
71 |
+
tensor<fp16, [128, 1]> sqrt_0_cast_fp16 = sqrt(x = mul_0_cast_fp16)[name = tensor<string, []>("sqrt_0_cast_fp16")];
|
72 |
+
tensor<fp16, []> var_70_to_fp16 = const()[name = tensor<string, []>("op_70_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
73 |
+
tensor<fp16, [128, 1]> per_feature_std_cast_fp16 = add(x = sqrt_0_cast_fp16, y = var_70_to_fp16)[name = tensor<string, []>("per_feature_std_cast_fp16")];
|
74 |
+
tensor<fp16, [128, 1501]> melspectrogram_features = real_div(x = sub_0_cast_fp16, y = per_feature_std_cast_fp16)[name = tensor<string, []>("op_74_cast_fp16")];
|
75 |
+
} -> (melspectrogram_features);
|
76 |
+
}
|
nvidia_parakeet-v2/MelSpectrogram.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:023c2303b7c3a1fafed92fc6ec46c1d43a48c0bbcdf33d6441d383a61747734c
|
3 |
+
size 592384
|
nvidia_parakeet-v2/MultimodalLogits.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:15e92aaa4d64eb8c9ac6347b0a71bd2d054a78ab589f1a86e19d0fca384e1024
|
3 |
+
size 243
|
nvidia_parakeet-v2/MultimodalLogits.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:077f39d87111ac670e39938177f145f3f28b5a4845e95d1af2e716de3e23dd6f
|
3 |
+
size 369
|
nvidia_parakeet-v2/MultimodalLogits.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Float16",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 1030)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 1030]",
|
13 |
+
"name" : "logits",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"modelParameters" : [
|
18 |
+
|
19 |
+
],
|
20 |
+
"specificationVersion" : 8,
|
21 |
+
"mlProgramOperationTypeHistogram" : {
|
22 |
+
"Ios16.softmax" : 1,
|
23 |
+
"Ios17.log" : 1,
|
24 |
+
"Ios17.linear" : 1,
|
25 |
+
"Ios17.add" : 1,
|
26 |
+
"Ios16.relu" : 1
|
27 |
+
},
|
28 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
29 |
+
"isUpdatable" : "0",
|
30 |
+
"stateSchema" : [
|
31 |
+
|
32 |
+
],
|
33 |
+
"availability" : {
|
34 |
+
"macOS" : "14.0",
|
35 |
+
"tvOS" : "17.0",
|
36 |
+
"visionOS" : "1.0",
|
37 |
+
"watchOS" : "10.0",
|
38 |
+
"iOS" : "17.0",
|
39 |
+
"macCatalyst" : "17.0"
|
40 |
+
},
|
41 |
+
"modelType" : {
|
42 |
+
"name" : "MLModelType_mlProgram"
|
43 |
+
},
|
44 |
+
"userDefinedMetadata" : {
|
45 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
46 |
+
"com.github.apple.coremltools.source" : "torch==2.6.0",
|
47 |
+
"com.github.apple.coremltools.version" : "8.2"
|
48 |
+
},
|
49 |
+
"inputSchema" : [
|
50 |
+
{
|
51 |
+
"hasShapeFlexibility" : "0",
|
52 |
+
"isOptional" : "0",
|
53 |
+
"dataType" : "Float16",
|
54 |
+
"formattedType" : "MultiArray (Float16 1 × 640)",
|
55 |
+
"shortDescription" : "",
|
56 |
+
"shape" : "[1, 640]",
|
57 |
+
"name" : "encoder_output_projected",
|
58 |
+
"type" : "MultiArray"
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"hasShapeFlexibility" : "0",
|
62 |
+
"isOptional" : "0",
|
63 |
+
"dataType" : "Float16",
|
64 |
+
"formattedType" : "MultiArray (Float16 1 × 640)",
|
65 |
+
"shortDescription" : "",
|
66 |
+
"shape" : "[1, 640]",
|
67 |
+
"name" : "decoder_output_projected",
|
68 |
+
"type" : "MultiArray"
|
69 |
+
}
|
70 |
+
],
|
71 |
+
"generatedClassName" : "MultimodalLogits",
|
72 |
+
"method" : "predict"
|
73 |
+
}
|
74 |
+
]
|
nvidia_parakeet-v2/MultimodalLogits.mlmodelc/model.mil
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.6.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios17>(tensor<fp16, [1, 640]> decoder_output_projected, tensor<fp16, [1, 640]> encoder_output_projected) {
|
5 |
+
tensor<fp16, [1, 640]> input_1_cast_fp16 = add(x = decoder_output_projected, y = encoder_output_projected)[name = tensor<string, []>("input_1_cast_fp16")];
|
6 |
+
tensor<fp16, [1, 640]> input_3_cast_fp16 = relu(x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
7 |
+
tensor<fp16, [1030, 640]> joint_net_1_weight_to_fp16 = const()[name = tensor<string, []>("joint_net_1_weight_to_fp16"), val = tensor<fp16, [1030, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
8 |
+
tensor<fp16, [1030]> joint_net_1_bias_to_fp16 = const()[name = tensor<string, []>("joint_net_1_bias_to_fp16"), val = tensor<fp16, [1030]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1318528)))];
|
9 |
+
tensor<fp16, [1, 1030]> linear_0_cast_fp16 = linear(bias = joint_net_1_bias_to_fp16, weight = joint_net_1_weight_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
|
10 |
+
tensor<int32, []> var_11 = const()[name = tensor<string, []>("op_11"), val = tensor<int32, []>(-1)];
|
11 |
+
tensor<fp16, [1, 1030]> var_13_softmax_cast_fp16 = softmax(axis = var_11, x = linear_0_cast_fp16)[name = tensor<string, []>("op_13_softmax_cast_fp16")];
|
12 |
+
tensor<fp32, []> var_13_epsilon_0 = const()[name = tensor<string, []>("op_13_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
|
13 |
+
tensor<fp16, [1, 1030]> logits = log(epsilon = var_13_epsilon_0, x = var_13_softmax_cast_fp16)[name = tensor<string, []>("op_13_cast_fp16")];
|
14 |
+
} -> (logits);
|
15 |
+
}
|
nvidia_parakeet-v2/MultimodalLogits.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:90d1edb3c533791da821c091c4a37e948b556aca9637da03f4024a0c7c4f02f2
|
3 |
+
size 1320652
|
nvidia_parakeet-v2/TextDecoder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:170a169a6cc4087fc6a62541a66bfdf1dfd7613ad6e32e94a3d9e161436b581a
|
3 |
+
size 243
|
nvidia_parakeet-v2/TextDecoder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dcb5824d67862c34f3e63afdefd4aa6024a8bc5531a283ff538b0354309c03f3
|
3 |
+
size 441
|
nvidia_parakeet-v2/TextDecoder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Float16",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 × 640)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 640]",
|
13 |
+
"name" : "decoder_output_projected",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 2 × 640)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[2, 640]",
|
23 |
+
"name" : "new_state_1",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 2 × 640)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[2, 640]",
|
33 |
+
"name" : "new_state_2",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
}
|
36 |
+
],
|
37 |
+
"modelParameters" : [
|
38 |
+
|
39 |
+
],
|
40 |
+
"specificationVersion" : 8,
|
41 |
+
"mlProgramOperationTypeHistogram" : {
|
42 |
+
"Ios17.squeeze" : 7,
|
43 |
+
"Ios17.gather" : 1,
|
44 |
+
"Ios17.cast" : 1,
|
45 |
+
"Ios17.lstm" : 2,
|
46 |
+
"Split" : 2,
|
47 |
+
"Ios17.linear" : 1,
|
48 |
+
"Stack" : 2,
|
49 |
+
"Ios17.expandDims" : 3
|
50 |
+
},
|
51 |
+
"computePrecision" : "Mixed (Float16, Int16, Int32)",
|
52 |
+
"isUpdatable" : "0",
|
53 |
+
"stateSchema" : [
|
54 |
+
|
55 |
+
],
|
56 |
+
"availability" : {
|
57 |
+
"macOS" : "14.0",
|
58 |
+
"tvOS" : "17.0",
|
59 |
+
"visionOS" : "1.0",
|
60 |
+
"watchOS" : "10.0",
|
61 |
+
"iOS" : "17.0",
|
62 |
+
"macCatalyst" : "17.0"
|
63 |
+
},
|
64 |
+
"modelType" : {
|
65 |
+
"name" : "MLModelType_mlProgram"
|
66 |
+
},
|
67 |
+
"userDefinedMetadata" : {
|
68 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
69 |
+
"com.github.apple.coremltools.source" : "torch==2.6.0",
|
70 |
+
"com.github.apple.coremltools.version" : "8.2"
|
71 |
+
},
|
72 |
+
"inputSchema" : [
|
73 |
+
{
|
74 |
+
"hasShapeFlexibility" : "0",
|
75 |
+
"isOptional" : "0",
|
76 |
+
"dataType" : "Int32",
|
77 |
+
"formattedType" : "MultiArray (Int32 1)",
|
78 |
+
"shortDescription" : "",
|
79 |
+
"shape" : "[1]",
|
80 |
+
"name" : "decoder_input_ids",
|
81 |
+
"type" : "MultiArray"
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"hasShapeFlexibility" : "0",
|
85 |
+
"isOptional" : "0",
|
86 |
+
"dataType" : "Float16",
|
87 |
+
"formattedType" : "MultiArray (Float16 2 × 640)",
|
88 |
+
"shortDescription" : "",
|
89 |
+
"shape" : "[2, 640]",
|
90 |
+
"name" : "state_1",
|
91 |
+
"type" : "MultiArray"
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"hasShapeFlexibility" : "0",
|
95 |
+
"isOptional" : "0",
|
96 |
+
"dataType" : "Float16",
|
97 |
+
"formattedType" : "MultiArray (Float16 2 × 640)",
|
98 |
+
"shortDescription" : "",
|
99 |
+
"shape" : "[2, 640]",
|
100 |
+
"name" : "state_2",
|
101 |
+
"type" : "MultiArray"
|
102 |
+
}
|
103 |
+
],
|
104 |
+
"generatedClassName" : "TextDecoder",
|
105 |
+
"method" : "predict"
|
106 |
+
}
|
107 |
+
]
|
nvidia_parakeet-v2/TextDecoder.mlmodelc/model.mil
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.6.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.2"}})]
|
3 |
+
{
|
4 |
+
func main<ios17>(tensor<int32, [1]> decoder_input_ids, tensor<fp16, [2, 640]> state_1, tensor<fp16, [2, 640]> state_2) {
|
5 |
+
tensor<int32, []> input_1_axis_0 = const()[name = tensor<string, []>("input_1_axis_0"), val = tensor<int32, []>(0)];
|
6 |
+
tensor<int32, []> input_1_batch_dims_0 = const()[name = tensor<string, []>("input_1_batch_dims_0"), val = tensor<int32, []>(0)];
|
7 |
+
tensor<bool, []> input_1_validate_indices_0 = const()[name = tensor<string, []>("input_1_validate_indices_0"), val = tensor<bool, []>(false)];
|
8 |
+
tensor<fp16, [1025, 640]> prediction_embed_weight_to_fp16 = const()[name = tensor<string, []>("prediction_embed_weight_to_fp16"), val = tensor<fp16, [1025, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
9 |
+
tensor<string, []> decoder_input_ids_to_int16_dtype_0 = const()[name = tensor<string, []>("decoder_input_ids_to_int16_dtype_0"), val = tensor<string, []>("int16")];
|
10 |
+
tensor<int16, [1]> decoder_input_ids_to_int16 = cast(dtype = decoder_input_ids_to_int16_dtype_0, x = decoder_input_ids)[name = tensor<string, []>("cast_6")];
|
11 |
+
tensor<fp16, [1, 640]> input_1_cast_fp16_cast_uint16 = gather(axis = input_1_axis_0, batch_dims = input_1_batch_dims_0, indices = decoder_input_ids_to_int16, validate_indices = input_1_validate_indices_0, x = prediction_embed_weight_to_fp16)[name = tensor<string, []>("input_1_cast_fp16_cast_uint16")];
|
12 |
+
tensor<int32, [1]> input_3_axes_0 = const()[name = tensor<string, []>("input_3_axes_0"), val = tensor<int32, [1]>([1])];
|
13 |
+
tensor<fp16, [1, 1, 640]> input_3_cast_fp16 = expand_dims(axes = input_3_axes_0, x = input_1_cast_fp16_cast_uint16)[name = tensor<string, []>("input_3_cast_fp16")];
|
14 |
+
tensor<int32, [1]> hx_1_axes_0 = const()[name = tensor<string, []>("hx_1_axes_0"), val = tensor<int32, [1]>([1])];
|
15 |
+
tensor<fp16, [2, 1, 640]> hx_1_cast_fp16 = expand_dims(axes = hx_1_axes_0, x = state_1)[name = tensor<string, []>("hx_1_cast_fp16")];
|
16 |
+
tensor<int32, [1]> hx_axes_0 = const()[name = tensor<string, []>("hx_axes_0"), val = tensor<int32, [1]>([1])];
|
17 |
+
tensor<fp16, [2, 1, 640]> hx_cast_fp16 = expand_dims(axes = hx_axes_0, x = state_2)[name = tensor<string, []>("hx_cast_fp16")];
|
18 |
+
tensor<int32, []> split_0_num_splits_0 = const()[name = tensor<string, []>("split_0_num_splits_0"), val = tensor<int32, []>(2)];
|
19 |
+
tensor<int32, []> split_0_axis_0 = const()[name = tensor<string, []>("split_0_axis_0"), val = tensor<int32, []>(0)];
|
20 |
+
tensor<fp16, [1, 1, 640]> split_0_cast_fp16_0, tensor<fp16, [1, 1, 640]> split_0_cast_fp16_1 = split(axis = split_0_axis_0, num_splits = split_0_num_splits_0, x = hx_1_cast_fp16)[name = tensor<string, []>("split_0_cast_fp16")];
|
21 |
+
tensor<int32, []> split_1_num_splits_0 = const()[name = tensor<string, []>("split_1_num_splits_0"), val = tensor<int32, []>(2)];
|
22 |
+
tensor<int32, []> split_1_axis_0 = const()[name = tensor<string, []>("split_1_axis_0"), val = tensor<int32, []>(0)];
|
23 |
+
tensor<fp16, [1, 1, 640]> split_1_cast_fp16_0, tensor<fp16, [1, 1, 640]> split_1_cast_fp16_1 = split(axis = split_1_axis_0, num_splits = split_1_num_splits_0, x = hx_cast_fp16)[name = tensor<string, []>("split_1_cast_fp16")];
|
24 |
+
tensor<int32, [1]> output_lstm_layer_0_lstm_h0_squeeze_axes_0 = const()[name = tensor<string, []>("output_lstm_layer_0_lstm_h0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
|
25 |
+
tensor<fp16, [1, 640]> output_lstm_layer_0_lstm_h0_squeeze_cast_fp16 = squeeze(axes = output_lstm_layer_0_lstm_h0_squeeze_axes_0, x = split_0_cast_fp16_0)[name = tensor<string, []>("output_lstm_layer_0_lstm_h0_squeeze_cast_fp16")];
|
26 |
+
tensor<int32, [1]> output_lstm_layer_0_lstm_c0_squeeze_axes_0 = const()[name = tensor<string, []>("output_lstm_layer_0_lstm_c0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
|
27 |
+
tensor<fp16, [1, 640]> output_lstm_layer_0_lstm_c0_squeeze_cast_fp16 = squeeze(axes = output_lstm_layer_0_lstm_c0_squeeze_axes_0, x = split_1_cast_fp16_0)[name = tensor<string, []>("output_lstm_layer_0_lstm_c0_squeeze_cast_fp16")];
|
28 |
+
tensor<string, []> output_lstm_layer_0_direction_0 = const()[name = tensor<string, []>("output_lstm_layer_0_direction_0"), val = tensor<string, []>("forward")];
|
29 |
+
tensor<bool, []> output_lstm_layer_0_output_sequence_0 = const()[name = tensor<string, []>("output_lstm_layer_0_output_sequence_0"), val = tensor<bool, []>(true)];
|
30 |
+
tensor<string, []> output_lstm_layer_0_recurrent_activation_0 = const()[name = tensor<string, []>("output_lstm_layer_0_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
|
31 |
+
tensor<string, []> output_lstm_layer_0_cell_activation_0 = const()[name = tensor<string, []>("output_lstm_layer_0_cell_activation_0"), val = tensor<string, []>("tanh")];
|
32 |
+
tensor<string, []> output_lstm_layer_0_activation_0 = const()[name = tensor<string, []>("output_lstm_layer_0_activation_0"), val = tensor<string, []>("tanh")];
|
33 |
+
tensor<fp16, [2560, 640]> concat_1_to_fp16 = const()[name = tensor<string, []>("concat_1_to_fp16"), val = tensor<fp16, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1312128)))];
|
34 |
+
tensor<fp16, [2560, 640]> concat_2_to_fp16 = const()[name = tensor<string, []>("concat_2_to_fp16"), val = tensor<fp16, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4588992)))];
|
35 |
+
tensor<fp16, [2560]> concat_0_to_fp16 = const()[name = tensor<string, []>("concat_0_to_fp16"), val = tensor<fp16, [2560]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(7865856)))];
|
36 |
+
tensor<fp16, [1, 1, 640]> output_lstm_layer_0_cast_fp16_0, tensor<fp16, [1, 640]> output_lstm_layer_0_cast_fp16_1, tensor<fp16, [1, 640]> output_lstm_layer_0_cast_fp16_2 = lstm(activation = output_lstm_layer_0_activation_0, bias = concat_0_to_fp16, cell_activation = output_lstm_layer_0_cell_activation_0, direction = output_lstm_layer_0_direction_0, initial_c = output_lstm_layer_0_lstm_c0_squeeze_cast_fp16, initial_h = output_lstm_layer_0_lstm_h0_squeeze_cast_fp16, output_sequence = output_lstm_layer_0_output_sequence_0, recurrent_activation = output_lstm_layer_0_recurrent_activation_0, weight_hh = concat_2_to_fp16, weight_ih = concat_1_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("output_lstm_layer_0_cast_fp16")];
|
37 |
+
tensor<int32, [1]> output_lstm_h0_squeeze_axes_0 = const()[name = tensor<string, []>("output_lstm_h0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
|
38 |
+
tensor<fp16, [1, 640]> output_lstm_h0_squeeze_cast_fp16 = squeeze(axes = output_lstm_h0_squeeze_axes_0, x = split_0_cast_fp16_1)[name = tensor<string, []>("output_lstm_h0_squeeze_cast_fp16")];
|
39 |
+
tensor<int32, [1]> output_lstm_c0_squeeze_axes_0 = const()[name = tensor<string, []>("output_lstm_c0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
|
40 |
+
tensor<fp16, [1, 640]> output_lstm_c0_squeeze_cast_fp16 = squeeze(axes = output_lstm_c0_squeeze_axes_0, x = split_1_cast_fp16_1)[name = tensor<string, []>("output_lstm_c0_squeeze_cast_fp16")];
|
41 |
+
tensor<string, []> output_direction_0 = const()[name = tensor<string, []>("output_direction_0"), val = tensor<string, []>("forward")];
|
42 |
+
tensor<bool, []> output_output_sequence_0 = const()[name = tensor<string, []>("output_output_sequence_0"), val = tensor<bool, []>(true)];
|
43 |
+
tensor<string, []> output_recurrent_activation_0 = const()[name = tensor<string, []>("output_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
|
44 |
+
tensor<string, []> output_cell_activation_0 = const()[name = tensor<string, []>("output_cell_activation_0"), val = tensor<string, []>("tanh")];
|
45 |
+
tensor<string, []> output_activation_0 = const()[name = tensor<string, []>("output_activation_0"), val = tensor<string, []>("tanh")];
|
46 |
+
tensor<fp16, [2560, 640]> concat_4_to_fp16 = const()[name = tensor<string, []>("concat_4_to_fp16"), val = tensor<fp16, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(7871040)))];
|
47 |
+
tensor<fp16, [2560, 640]> concat_5_to_fp16 = const()[name = tensor<string, []>("concat_5_to_fp16"), val = tensor<fp16, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11147904)))];
|
48 |
+
tensor<fp16, [2560]> concat_3_to_fp16 = const()[name = tensor<string, []>("concat_3_to_fp16"), val = tensor<fp16, [2560]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14424768)))];
|
49 |
+
tensor<fp16, [1, 1, 640]> output_cast_fp16_0, tensor<fp16, [1, 640]> output_cast_fp16_1, tensor<fp16, [1, 640]> output_cast_fp16_2 = lstm(activation = output_activation_0, bias = concat_3_to_fp16, cell_activation = output_cell_activation_0, direction = output_direction_0, initial_c = output_lstm_c0_squeeze_cast_fp16, initial_h = output_lstm_h0_squeeze_cast_fp16, output_sequence = output_output_sequence_0, recurrent_activation = output_recurrent_activation_0, weight_hh = concat_5_to_fp16, weight_ih = concat_4_to_fp16, x = output_lstm_layer_0_cast_fp16_0)[name = tensor<string, []>("output_cast_fp16")];
|
50 |
+
tensor<int32, []> var_32_axis_0 = const()[name = tensor<string, []>("op_32_axis_0"), val = tensor<int32, []>(0)];
|
51 |
+
tensor<fp16, [2, 1, 640]> var_32_cast_fp16 = stack(axis = var_32_axis_0, values = (output_lstm_layer_0_cast_fp16_1, output_cast_fp16_1))[name = tensor<string, []>("op_32_cast_fp16")];
|
52 |
+
tensor<int32, []> var_33_axis_0 = const()[name = tensor<string, []>("op_33_axis_0"), val = tensor<int32, []>(0)];
|
53 |
+
tensor<fp16, [2, 1, 640]> var_33_cast_fp16 = stack(axis = var_33_axis_0, values = (output_lstm_layer_0_cast_fp16_2, output_cast_fp16_2))[name = tensor<string, []>("op_33_cast_fp16")];
|
54 |
+
tensor<int32, [1]> input_axes_0 = const()[name = tensor<string, []>("input_axes_0"), val = tensor<int32, [1]>([1])];
|
55 |
+
tensor<fp16, [1, 640]> input_cast_fp16 = squeeze(axes = input_axes_0, x = output_cast_fp16_0)[name = tensor<string, []>("input_cast_fp16")];
|
56 |
+
tensor<int32, [1]> var_35_axes_0 = const()[name = tensor<string, []>("op_35_axes_0"), val = tensor<int32, [1]>([1])];
|
57 |
+
tensor<fp16, [2, 640]> new_state_1 = squeeze(axes = var_35_axes_0, x = var_32_cast_fp16)[name = tensor<string, []>("op_35_cast_fp16")];
|
58 |
+
tensor<int32, [1]> var_36_axes_0 = const()[name = tensor<string, []>("op_36_axes_0"), val = tensor<int32, [1]>([1])];
|
59 |
+
tensor<fp16, [2, 640]> new_state_2 = squeeze(axes = var_36_axes_0, x = var_33_cast_fp16)[name = tensor<string, []>("op_36_cast_fp16")];
|
60 |
+
tensor<fp16, [640, 640]> joint_projection_weight_to_fp16 = const()[name = tensor<string, []>("joint_projection_weight_to_fp16"), val = tensor<fp16, [640, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14429952)))];
|
61 |
+
tensor<fp16, [640]> joint_projection_bias_to_fp16 = const()[name = tensor<string, []>("joint_projection_bias_to_fp16"), val = tensor<fp16, [640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15249216)))];
|
62 |
+
tensor<fp16, [1, 640]> decoder_output_projected = linear(bias = joint_projection_bias_to_fp16, weight = joint_projection_weight_to_fp16, x = input_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
|
63 |
+
} -> (decoder_output_projected, new_state_1, new_state_2);
|
64 |
+
}
|
nvidia_parakeet-v2/TextDecoder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f39f483dcf06a76b7806a987b8c9309258aca550242e6b94c28b07df3122253c
|
3 |
+
size 15250560
|