aotrih commited on
Commit
d04e1f8
·
1 Parent(s): b2a0acc

Add nvidia_parakeet-v3

Browse files
Files changed (25) hide show
  1. nvidia_parakeet-v3/AudioEncoder.mlmodelc/LICENSE_NOTICE.txt +7 -0
  2. nvidia_parakeet-v3/AudioEncoder.mlmodelc/analytics/coremldata.bin +3 -0
  3. nvidia_parakeet-v3/AudioEncoder.mlmodelc/coremldata.bin +3 -0
  4. nvidia_parakeet-v3/AudioEncoder.mlmodelc/metadata.json +96 -0
  5. nvidia_parakeet-v3/AudioEncoder.mlmodelc/model.mil +0 -0
  6. nvidia_parakeet-v3/AudioEncoder.mlmodelc/weights/weight.bin +3 -0
  7. nvidia_parakeet-v3/LICENSE_NOTICE.txt +7 -0
  8. nvidia_parakeet-v3/MelSpectrogram.mlmodelc/LICENSE_NOTICE.txt +7 -0
  9. nvidia_parakeet-v3/MelSpectrogram.mlmodelc/analytics/coremldata.bin +3 -0
  10. nvidia_parakeet-v3/MelSpectrogram.mlmodelc/coremldata.bin +3 -0
  11. nvidia_parakeet-v3/MelSpectrogram.mlmodelc/metadata.json +77 -0
  12. nvidia_parakeet-v3/MelSpectrogram.mlmodelc/model.mil +81 -0
  13. nvidia_parakeet-v3/MelSpectrogram.mlmodelc/weights/weight.bin +3 -0
  14. nvidia_parakeet-v3/MultimodalLogits.mlmodelc/LICENSE_NOTICE.txt +7 -0
  15. nvidia_parakeet-v3/MultimodalLogits.mlmodelc/analytics/coremldata.bin +3 -0
  16. nvidia_parakeet-v3/MultimodalLogits.mlmodelc/coremldata.bin +3 -0
  17. nvidia_parakeet-v3/MultimodalLogits.mlmodelc/metadata.json +75 -0
  18. nvidia_parakeet-v3/MultimodalLogits.mlmodelc/model.mil +15 -0
  19. nvidia_parakeet-v3/MultimodalLogits.mlmodelc/weights/weight.bin +3 -0
  20. nvidia_parakeet-v3/TextDecoder.mlmodelc/LICENSE_NOTICE.txt +7 -0
  21. nvidia_parakeet-v3/TextDecoder.mlmodelc/analytics/coremldata.bin +3 -0
  22. nvidia_parakeet-v3/TextDecoder.mlmodelc/coremldata.bin +3 -0
  23. nvidia_parakeet-v3/TextDecoder.mlmodelc/metadata.json +111 -0
  24. nvidia_parakeet-v3/TextDecoder.mlmodelc/model.mil +73 -0
  25. nvidia_parakeet-v3/TextDecoder.mlmodelc/weights/weight.bin +3 -0
nvidia_parakeet-v3/AudioEncoder.mlmodelc/LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
nvidia_parakeet-v3/AudioEncoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d7fb2cdf39ba37f092cbf481ff457b8ff84746012e4402542cd712d69a1326a
3
+ size 243
nvidia_parakeet-v3/AudioEncoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c3a319786d1956fe24eba520a941c7e5ab8486f14558dd895d6bd9c95f45bb1
3
+ size 493
nvidia_parakeet-v3/AudioEncoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1024 × 1 × 188)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1024, 1, 188]",
13
+ "name" : "encoder_output_embeds",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 640 × 1 × 188)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 640, 1, 188]",
23
+ "name" : "joint_projected_encoder_output_embeds",
24
+ "type" : "MultiArray"
25
+ }
26
+ ],
27
+ "modelParameters" : [
28
+
29
+ ],
30
+ "specificationVersion" : 8,
31
+ "mlProgramOperationTypeHistogram" : {
32
+ "Ios16.silu" : 72,
33
+ "Ios17.mul" : 73,
34
+ "Split" : 24,
35
+ "Ios17.transpose" : 1,
36
+ "Ios17.sub" : 1,
37
+ "Ios17.matmul" : 72,
38
+ "Ios17.conv" : 295,
39
+ "Ios16.sigmoid" : 24,
40
+ "Ios17.add" : 168,
41
+ "Ios17.sliceByIndex" : 48,
42
+ "Ios17.batchNorm" : 120,
43
+ "Ios16.relu" : 3,
44
+ "Ios16.softmax" : 24,
45
+ "Ios17.reshape" : 193,
46
+ "Ios17.layerNorm" : 120,
47
+ "Pad" : 24
48
+ },
49
+ "computePrecision" : "Mixed (Float16, Int32)",
50
+ "isUpdatable" : "0",
51
+ "stateSchema" : [
52
+
53
+ ],
54
+ "availability" : {
55
+ "macOS" : "14.0",
56
+ "tvOS" : "17.0",
57
+ "visionOS" : "1.0",
58
+ "watchOS" : "10.0",
59
+ "iOS" : "17.0",
60
+ "macCatalyst" : "17.0"
61
+ },
62
+ "modelType" : {
63
+ "name" : "MLModelType_mlProgram"
64
+ },
65
+ "userDefinedMetadata" : {
66
+ "com.github.apple.coremltools.conversion_date" : "2025-08-14",
67
+ "com.github.apple.coremltools.source" : "torch==2.5.0",
68
+ "com.github.apple.coremltools.version" : "9.0b1",
69
+ "com.github.apple.coremltools.source_dialect" : "TorchScript"
70
+ },
71
+ "inputSchema" : [
72
+ {
73
+ "hasShapeFlexibility" : "0",
74
+ "isOptional" : "0",
75
+ "dataType" : "Float16",
76
+ "formattedType" : "MultiArray (Float16 1 × 1 × 1501 × 128)",
77
+ "shortDescription" : "",
78
+ "shape" : "[1, 1, 1501, 128]",
79
+ "name" : "melspectrogram_features",
80
+ "type" : "MultiArray"
81
+ },
82
+ {
83
+ "hasShapeFlexibility" : "0",
84
+ "isOptional" : "0",
85
+ "dataType" : "Float16",
86
+ "formattedType" : "MultiArray (Float16 1 × 1 × 1 × 1)",
87
+ "shortDescription" : "",
88
+ "shape" : "[1, 1, 1, 1]",
89
+ "name" : "input_1",
90
+ "type" : "MultiArray"
91
+ }
92
+ ],
93
+ "generatedClassName" : "AudioEncoder",
94
+ "method" : "predict"
95
+ }
96
+ ]
nvidia_parakeet-v3/AudioEncoder.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
nvidia_parakeet-v3/AudioEncoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f95d33a2f1582d171ad8dacdb724d1d96a8566c958044c61419f1b90d307074b
3
+ size 1219841984
nvidia_parakeet-v3/LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
nvidia_parakeet-v3/MelSpectrogram.mlmodelc/LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
nvidia_parakeet-v3/MelSpectrogram.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e7d1205a095f5ebc8372aec82b911979c3a010c37bd70fd2f546a21dbdce15c
3
+ size 243
nvidia_parakeet-v3/MelSpectrogram.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f2a333c9134ecf776a755d626cb782de9fd65a4520bc18668432c21da75a625
3
+ size 392
nvidia_parakeet-v3/MelSpectrogram.mlmodelc/metadata.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Mixed (Float16, Palettized (6 bits))",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1 × 1501 × 128)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1, 1501, 128]",
13
+ "name" : "melspectrogram_features",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 8,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios17.mul" : 2,
23
+ "Ios17.sqrt" : 1,
24
+ "Ios17.square" : 3,
25
+ "Ios17.transpose" : 1,
26
+ "Ios17.sub" : 2,
27
+ "Ios16.constexprLutToDense" : 3,
28
+ "Ios17.conv" : 2,
29
+ "Ios17.matmul" : 1,
30
+ "Ios17.log" : 1,
31
+ "Ios17.sliceByIndex" : 2,
32
+ "Ios17.add" : 3,
33
+ "Ios16.reduceMean" : 2,
34
+ "Ios17.realDiv" : 1,
35
+ "Ios17.expandDims" : 4,
36
+ "Ios17.squeeze" : 2,
37
+ "Ios17.reshape" : 2,
38
+ "Pad" : 2
39
+ },
40
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
41
+ "isUpdatable" : "0",
42
+ "stateSchema" : [
43
+
44
+ ],
45
+ "availability" : {
46
+ "macOS" : "14.0",
47
+ "tvOS" : "17.0",
48
+ "visionOS" : "1.0",
49
+ "watchOS" : "10.0",
50
+ "iOS" : "17.0",
51
+ "macCatalyst" : "17.0"
52
+ },
53
+ "modelType" : {
54
+ "name" : "MLModelType_mlProgram"
55
+ },
56
+ "userDefinedMetadata" : {
57
+ "com.github.apple.coremltools.conversion_date" : "2025-08-14",
58
+ "com.github.apple.coremltools.source" : "torch==2.5.0",
59
+ "com.github.apple.coremltools.version" : "9.0b1",
60
+ "com.github.apple.coremltools.source_dialect" : "TorchScript"
61
+ },
62
+ "inputSchema" : [
63
+ {
64
+ "hasShapeFlexibility" : "0",
65
+ "isOptional" : "0",
66
+ "dataType" : "Float16",
67
+ "formattedType" : "MultiArray (Float16 240000)",
68
+ "shortDescription" : "",
69
+ "shape" : "[240000]",
70
+ "name" : "audio",
71
+ "type" : "MultiArray"
72
+ }
73
+ ],
74
+ "generatedClassName" : "MelSpectrogram_6_bit",
75
+ "method" : "predict"
76
+ }
77
+ ]
nvidia_parakeet-v3/MelSpectrogram.mlmodelc/model.mil ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}})]
3
+ {
4
+ func main<ios17>(tensor<fp16, [240000]> audio) {
5
+ tensor<int32, [1]> var_8_begin_0 = const()[name = tensor<string, []>("op_8_begin_0"), val = tensor<int32, [1]>([1])];
6
+ tensor<int32, [1]> var_8_end_0 = const()[name = tensor<string, []>("op_8_end_0"), val = tensor<int32, [1]>([240000])];
7
+ tensor<bool, [1]> var_8_end_mask_0 = const()[name = tensor<string, []>("op_8_end_mask_0"), val = tensor<bool, [1]>([true])];
8
+ tensor<fp16, [239999]> var_8_cast_fp16 = slice_by_index(begin = var_8_begin_0, end = var_8_end_0, end_mask = var_8_end_mask_0, x = audio)[name = tensor<string, []>("op_8_cast_fp16")];
9
+ tensor<int32, [1]> var_13_begin_0 = const()[name = tensor<string, []>("op_13_begin_0"), val = tensor<int32, [1]>([0])];
10
+ tensor<int32, [1]> var_13_end_0 = const()[name = tensor<string, []>("op_13_end_0"), val = tensor<int32, [1]>([239999])];
11
+ tensor<bool, [1]> var_13_end_mask_0 = const()[name = tensor<string, []>("op_13_end_mask_0"), val = tensor<bool, [1]>([false])];
12
+ tensor<fp16, [239999]> var_13_cast_fp16 = slice_by_index(begin = var_13_begin_0, end = var_13_end_0, end_mask = var_13_end_mask_0, x = audio)[name = tensor<string, []>("op_13_cast_fp16")];
13
+ tensor<fp16, []> var_14_to_fp16 = const()[name = tensor<string, []>("op_14_to_fp16"), val = tensor<fp16, []>(0x1.f0cp-1)];
14
+ tensor<fp16, [239999]> var_15_cast_fp16 = mul(x = var_13_cast_fp16, y = var_14_to_fp16)[name = tensor<string, []>("op_15_cast_fp16")];
15
+ tensor<fp16, [239999]> input_1_cast_fp16 = sub(x = var_8_cast_fp16, y = var_15_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
16
+ tensor<int32, [2]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [2]>([1, 0])];
17
+ tensor<string, []> input_3_mode_0 = const()[name = tensor<string, []>("input_3_mode_0"), val = tensor<string, []>("constant")];
18
+ tensor<fp16, []> const_0_to_fp16 = const()[name = tensor<string, []>("const_0_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
19
+ tensor<fp16, [240000]> input_3_cast_fp16 = pad(constant_val = const_0_to_fp16, mode = input_3_mode_0, pad = input_3_pad_0, x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
20
+ tensor<int32, [3]> var_30 = const()[name = tensor<string, []>("op_30"), val = tensor<int32, [3]>([1, 1, 240000])];
21
+ tensor<fp16, [1, 1, 240000]> input_5_cast_fp16 = reshape(shape = var_30, x = input_3_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
22
+ tensor<int32, [6]> input_7_pad_0 = const()[name = tensor<string, []>("input_7_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 256, 256])];
23
+ tensor<string, []> input_7_mode_0 = const()[name = tensor<string, []>("input_7_mode_0"), val = tensor<string, []>("reflect")];
24
+ tensor<fp16, []> const_2_to_fp16 = const()[name = tensor<string, []>("const_2_to_fp16"), val = tensor<fp16, []>(0x0p+0)];
25
+ tensor<fp16, [1, 1, 240512]> input_7_cast_fp16 = pad(constant_val = const_2_to_fp16, mode = input_7_mode_0, pad = input_7_pad_0, x = input_5_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
26
+ tensor<int32, [1]> var_42 = const()[name = tensor<string, []>("op_42"), val = tensor<int32, [1]>([240512])];
27
+ tensor<fp16, [240512]> input_cast_fp16 = reshape(shape = var_42, x = input_7_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
28
+ tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([0])];
29
+ tensor<fp16, [1, 240512]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = input_cast_fp16)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
30
+ tensor<int32, [1]> expand_dims_3 = const()[name = tensor<string, []>("expand_dims_3"), val = tensor<int32, [1]>([160])];
31
+ tensor<int32, [1]> expand_dims_4_axes_0 = const()[name = tensor<string, []>("expand_dims_4_axes_0"), val = tensor<int32, [1]>([1])];
32
+ tensor<fp16, [1, 1, 240512]> expand_dims_4_cast_fp16 = expand_dims(axes = expand_dims_4_axes_0, x = expand_dims_0_cast_fp16)[name = tensor<string, []>("expand_dims_4_cast_fp16")];
33
+ tensor<string, []> conv_0_pad_type_0 = const()[name = tensor<string, []>("conv_0_pad_type_0"), val = tensor<string, []>("valid")];
34
+ tensor<int32, [2]> conv_0_pad_0 = const()[name = tensor<string, []>("conv_0_pad_0"), val = tensor<int32, [2]>([0, 0])];
35
+ tensor<int32, [1]> conv_0_dilations_0 = const()[name = tensor<string, []>("conv_0_dilations_0"), val = tensor<int32, [1]>([1])];
36
+ tensor<int32, []> conv_0_groups_0 = const()[name = tensor<string, []>("conv_0_groups_0"), val = tensor<int32, []>(1)];
37
+ tensor<fp16, [257, 1, 512]> expand_dims_1_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [98688]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(98816))), name = tensor<string, []>("expand_dims_1_to_fp16_palettized"), shape = tensor<uint32, [3]>([257, 1, 512])];
38
+ tensor<fp16, [1, 257, 1501]> conv_0_cast_fp16 = conv(dilations = conv_0_dilations_0, groups = conv_0_groups_0, pad = conv_0_pad_0, pad_type = conv_0_pad_type_0, strides = expand_dims_3, weight = expand_dims_1_to_fp16_palettized, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_0_cast_fp16")];
39
+ tensor<string, []> conv_1_pad_type_0 = const()[name = tensor<string, []>("conv_1_pad_type_0"), val = tensor<string, []>("valid")];
40
+ tensor<int32, [2]> conv_1_pad_0 = const()[name = tensor<string, []>("conv_1_pad_0"), val = tensor<int32, [2]>([0, 0])];
41
+ tensor<int32, [1]> conv_1_dilations_0 = const()[name = tensor<string, []>("conv_1_dilations_0"), val = tensor<int32, [1]>([1])];
42
+ tensor<int32, []> conv_1_groups_0 = const()[name = tensor<string, []>("conv_1_groups_0"), val = tensor<int32, []>(1)];
43
+ tensor<fp16, [257, 1, 512]> expand_dims_2_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [98688]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(99008))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(197760))), name = tensor<string, []>("expand_dims_2_to_fp16_palettized"), shape = tensor<uint32, [3]>([257, 1, 512])];
44
+ tensor<fp16, [1, 257, 1501]> conv_1_cast_fp16 = conv(dilations = conv_1_dilations_0, groups = conv_1_groups_0, pad = conv_1_pad_0, pad_type = conv_1_pad_type_0, strides = expand_dims_3, weight = expand_dims_2_to_fp16_palettized, x = expand_dims_4_cast_fp16)[name = tensor<string, []>("conv_1_cast_fp16")];
45
+ tensor<int32, [1]> squeeze_0_axes_0 = const()[name = tensor<string, []>("squeeze_0_axes_0"), val = tensor<int32, [1]>([0])];
46
+ tensor<fp16, [257, 1501]> squeeze_0_cast_fp16 = squeeze(axes = squeeze_0_axes_0, x = conv_0_cast_fp16)[name = tensor<string, []>("squeeze_0_cast_fp16")];
47
+ tensor<int32, [1]> squeeze_1_axes_0 = const()[name = tensor<string, []>("squeeze_1_axes_0"), val = tensor<int32, [1]>([0])];
48
+ tensor<fp16, [257, 1501]> squeeze_1_cast_fp16 = squeeze(axes = squeeze_1_axes_0, x = conv_1_cast_fp16)[name = tensor<string, []>("squeeze_1_cast_fp16")];
49
+ tensor<fp16, [257, 1501]> square_1_cast_fp16 = square(x = squeeze_0_cast_fp16)[name = tensor<string, []>("square_1_cast_fp16")];
50
+ tensor<fp16, [257, 1501]> square_2_cast_fp16 = square(x = squeeze_1_cast_fp16)[name = tensor<string, []>("square_2_cast_fp16")];
51
+ tensor<fp16, [257, 1501]> add_1_cast_fp16 = add(x = square_1_cast_fp16, y = square_2_cast_fp16)[name = tensor<string, []>("add_1_cast_fp16")];
52
+ tensor<bool, []> mel_spec_1_transpose_x_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_x_0"), val = tensor<bool, []>(false)];
53
+ tensor<bool, []> mel_spec_1_transpose_y_0 = const()[name = tensor<string, []>("mel_spec_1_transpose_y_0"), val = tensor<bool, []>(false)];
54
+ tensor<fp16, [128, 257]> mel_filters_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [24672]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(197952))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(222720))), name = tensor<string, []>("mel_filters_to_fp16_palettized"), shape = tensor<uint32, [2]>([128, 257])];
55
+ tensor<fp16, [128, 1501]> mel_spec_1_cast_fp16 = matmul(transpose_x = mel_spec_1_transpose_x_0, transpose_y = mel_spec_1_transpose_y_0, x = mel_filters_to_fp16_palettized, y = add_1_cast_fp16)[name = tensor<string, []>("mel_spec_1_cast_fp16")];
56
+ tensor<fp16, []> var_56_to_fp16 = const()[name = tensor<string, []>("op_56_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
57
+ tensor<fp16, [128, 1501]> mel_spec_3_cast_fp16 = add(x = mel_spec_1_cast_fp16, y = var_56_to_fp16)[name = tensor<string, []>("mel_spec_3_cast_fp16")];
58
+ tensor<fp32, []> mel_spec_5_epsilon_0 = const()[name = tensor<string, []>("mel_spec_5_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
59
+ tensor<fp16, [128, 1501]> mel_spec_5_cast_fp16 = log(epsilon = mel_spec_5_epsilon_0, x = mel_spec_3_cast_fp16)[name = tensor<string, []>("mel_spec_5_cast_fp16")];
60
+ tensor<int32, [1]> per_feature_mean_axes_0 = const()[name = tensor<string, []>("per_feature_mean_axes_0"), val = tensor<int32, [1]>([-1])];
61
+ tensor<bool, []> per_feature_mean_keep_dims_0 = const()[name = tensor<string, []>("per_feature_mean_keep_dims_0"), val = tensor<bool, []>(true)];
62
+ tensor<fp16, [128, 1]> per_feature_mean_cast_fp16 = reduce_mean(axes = per_feature_mean_axes_0, keep_dims = per_feature_mean_keep_dims_0, x = mel_spec_5_cast_fp16)[name = tensor<string, []>("per_feature_mean_cast_fp16")];
63
+ tensor<fp16, [128, 1501]> sub_0_cast_fp16 = sub(x = mel_spec_5_cast_fp16, y = per_feature_mean_cast_fp16)[name = tensor<string, []>("sub_0_cast_fp16")];
64
+ tensor<fp16, [128, 1501]> square_0_cast_fp16 = square(x = sub_0_cast_fp16)[name = tensor<string, []>("square_0_cast_fp16")];
65
+ tensor<int32, [1]> reduce_mean_1_axes_0 = const()[name = tensor<string, []>("reduce_mean_1_axes_0"), val = tensor<int32, [1]>([-1])];
66
+ tensor<bool, []> reduce_mean_1_keep_dims_0 = const()[name = tensor<string, []>("reduce_mean_1_keep_dims_0"), val = tensor<bool, []>(true)];
67
+ tensor<fp16, [128, 1]> reduce_mean_1_cast_fp16 = reduce_mean(axes = reduce_mean_1_axes_0, keep_dims = reduce_mean_1_keep_dims_0, x = square_0_cast_fp16)[name = tensor<string, []>("reduce_mean_1_cast_fp16")];
68
+ tensor<fp16, []> real_div_0_to_fp16 = const()[name = tensor<string, []>("real_div_0_to_fp16"), val = tensor<fp16, []>(0x1.004p+0)];
69
+ tensor<fp16, [128, 1]> mul_0_cast_fp16 = mul(x = reduce_mean_1_cast_fp16, y = real_div_0_to_fp16)[name = tensor<string, []>("mul_0_cast_fp16")];
70
+ tensor<fp16, [128, 1]> sqrt_0_cast_fp16 = sqrt(x = mul_0_cast_fp16)[name = tensor<string, []>("sqrt_0_cast_fp16")];
71
+ tensor<fp16, []> var_70_to_fp16 = const()[name = tensor<string, []>("op_70_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
72
+ tensor<fp16, [128, 1]> per_feature_std_cast_fp16 = add(x = sqrt_0_cast_fp16, y = var_70_to_fp16)[name = tensor<string, []>("per_feature_std_cast_fp16")];
73
+ tensor<fp16, [128, 1501]> mel_spec_cast_fp16 = real_div(x = sub_0_cast_fp16, y = per_feature_std_cast_fp16)[name = tensor<string, []>("mel_spec_cast_fp16")];
74
+ tensor<int32, [2]> var_75_perm_0 = const()[name = tensor<string, []>("op_75_perm_0"), val = tensor<int32, [2]>([1, 0])];
75
+ tensor<int32, [1]> var_77_axes_0 = const()[name = tensor<string, []>("op_77_axes_0"), val = tensor<int32, [1]>([0])];
76
+ tensor<fp16, [1501, 128]> var_75_cast_fp16 = transpose(perm = var_75_perm_0, x = mel_spec_cast_fp16)[name = tensor<string, []>("transpose_0")];
77
+ tensor<fp16, [1, 1501, 128]> var_77_cast_fp16 = expand_dims(axes = var_77_axes_0, x = var_75_cast_fp16)[name = tensor<string, []>("op_77_cast_fp16")];
78
+ tensor<int32, [1]> var_79_axes_0 = const()[name = tensor<string, []>("op_79_axes_0"), val = tensor<int32, [1]>([1])];
79
+ tensor<fp16, [1, 1, 1501, 128]> melspectrogram_features = expand_dims(axes = var_79_axes_0, x = var_77_cast_fp16)[name = tensor<string, []>("op_79_cast_fp16")];
80
+ } -> (melspectrogram_features);
81
+ }
nvidia_parakeet-v3/MelSpectrogram.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdb3127d090c4856df950d5e6f059d1936ddd121a4f9da7362ed7c5ef7b189b3
3
+ size 222912
nvidia_parakeet-v3/MultimodalLogits.mlmodelc/LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
nvidia_parakeet-v3/MultimodalLogits.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbb1fc22c1a4b16fff489b9424efcf0112cd0223ca427621aca45ce744f3ff8d
3
+ size 243
nvidia_parakeet-v3/MultimodalLogits.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a0441fb4458b0ddb433752446048f5ff3fe681af731833b96a8fe8f80d0521b
3
+ size 432
nvidia_parakeet-v3/MultimodalLogits.mlmodelc/metadata.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 8198)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 8198]",
13
+ "name" : "logits",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "modelParameters" : [
18
+
19
+ ],
20
+ "specificationVersion" : 8,
21
+ "mlProgramOperationTypeHistogram" : {
22
+ "Ios16.softmax" : 1,
23
+ "Ios17.log" : 1,
24
+ "Ios17.linear" : 1,
25
+ "Ios17.add" : 1,
26
+ "Ios16.relu" : 1
27
+ },
28
+ "computePrecision" : "Mixed (Float16, Float32, Int32)",
29
+ "isUpdatable" : "0",
30
+ "stateSchema" : [
31
+
32
+ ],
33
+ "availability" : {
34
+ "macOS" : "14.0",
35
+ "tvOS" : "17.0",
36
+ "visionOS" : "1.0",
37
+ "watchOS" : "10.0",
38
+ "iOS" : "17.0",
39
+ "macCatalyst" : "17.0"
40
+ },
41
+ "modelType" : {
42
+ "name" : "MLModelType_mlProgram"
43
+ },
44
+ "userDefinedMetadata" : {
45
+ "com.github.apple.coremltools.conversion_date" : "2025-08-14",
46
+ "com.github.apple.coremltools.source" : "torch==2.5.0",
47
+ "com.github.apple.coremltools.version" : "9.0b1",
48
+ "com.github.apple.coremltools.source_dialect" : "TorchScript"
49
+ },
50
+ "inputSchema" : [
51
+ {
52
+ "hasShapeFlexibility" : "0",
53
+ "isOptional" : "0",
54
+ "dataType" : "Float16",
55
+ "formattedType" : "MultiArray (Float16 1 × 640)",
56
+ "shortDescription" : "",
57
+ "shape" : "[1, 640]",
58
+ "name" : "encoder_output_projected",
59
+ "type" : "MultiArray"
60
+ },
61
+ {
62
+ "hasShapeFlexibility" : "0",
63
+ "isOptional" : "0",
64
+ "dataType" : "Float16",
65
+ "formattedType" : "MultiArray (Float16 1 × 640)",
66
+ "shortDescription" : "",
67
+ "shape" : "[1, 640]",
68
+ "name" : "decoder_output_projected",
69
+ "type" : "MultiArray"
70
+ }
71
+ ],
72
+ "generatedClassName" : "MultimodalLogits",
73
+ "method" : "predict"
74
+ }
75
+ ]
nvidia_parakeet-v3/MultimodalLogits.mlmodelc/model.mil ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "9.0b1"}})]
3
+ {
4
+ func main<ios17>(tensor<fp16, [1, 640]> decoder_output_projected, tensor<fp16, [1, 640]> encoder_output_projected) {
5
+ tensor<fp16, [1, 640]> input_1_cast_fp16 = add(x = decoder_output_projected, y = encoder_output_projected)[name = tensor<string, []>("input_1_cast_fp16")];
6
+ tensor<fp16, [1, 640]> input_3_cast_fp16 = relu(x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
7
+ tensor<fp16, [8198, 640]> joint_net_1_weight_to_fp16 = const()[name = tensor<string, []>("joint_net_1_weight_to_fp16"), val = tensor<fp16, [8198, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
8
+ tensor<fp16, [8198]> joint_net_1_bias_to_fp16 = const()[name = tensor<string, []>("joint_net_1_bias_to_fp16"), val = tensor<fp16, [8198]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10493568)))];
9
+ tensor<fp16, [1, 8198]> linear_0_cast_fp16 = linear(bias = joint_net_1_bias_to_fp16, weight = joint_net_1_weight_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
10
+ tensor<int32, []> var_11 = const()[name = tensor<string, []>("op_11"), val = tensor<int32, []>(-1)];
11
+ tensor<fp16, [1, 8198]> var_13_softmax_cast_fp16 = softmax(axis = var_11, x = linear_0_cast_fp16)[name = tensor<string, []>("op_13_softmax_cast_fp16")];
12
+ tensor<fp32, []> var_13_epsilon_0 = const()[name = tensor<string, []>("op_13_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
13
+ tensor<fp16, [1, 8198]> logits = log(epsilon = var_13_epsilon_0, x = var_13_softmax_cast_fp16)[name = tensor<string, []>("op_13_cast_fp16")];
14
+ } -> (logits);
15
+ }
nvidia_parakeet-v3/MultimodalLogits.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d83381f8a19ac296033554a7cfec355063b8f3225129cb8bed013c92f6ab141f
3
+ size 10510028
nvidia_parakeet-v3/TextDecoder.mlmodelc/LICENSE_NOTICE.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Argmax proprietary and confidential. Under NDA.
2
+
3
+ Copyright 2024 Argmax, Inc. All rights reserved.
4
+
5
+ Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
6
+
7
+ Please contact Argmax for licensing information at [email protected].
nvidia_parakeet-v3/TextDecoder.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91faa7cfe003e448303bbbec3fe4ae0cf18a80ec8d903f760867e905b09cc125
3
+ size 243
nvidia_parakeet-v3/TextDecoder.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d337a032cea76091dc96a57d0c7a904c95190e54442a2fe336057ca0714378ce
3
+ size 504
nvidia_parakeet-v3/TextDecoder.mlmodelc/metadata.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "storagePrecision" : "Float16",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 640)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 640]",
13
+ "name" : "decoder_output_projected",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 2 × 640)",
21
+ "shortDescription" : "",
22
+ "shape" : "[2, 640]",
23
+ "name" : "new_state_1",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 2 × 640)",
31
+ "shortDescription" : "",
32
+ "shape" : "[2, 640]",
33
+ "name" : "new_state_2",
34
+ "type" : "MultiArray"
35
+ }
36
+ ],
37
+ "modelParameters" : [
38
+
39
+ ],
40
+ "specificationVersion" : 8,
41
+ "mlProgramOperationTypeHistogram" : {
42
+ "Select" : 1,
43
+ "Ios17.squeeze" : 7,
44
+ "Ios17.gather" : 1,
45
+ "Ios17.cast" : 3,
46
+ "Ios17.lstm" : 2,
47
+ "Split" : 2,
48
+ "Ios17.add" : 1,
49
+ "Ios17.linear" : 1,
50
+ "Ios17.greaterEqual" : 1,
51
+ "Stack" : 2,
52
+ "Ios17.expandDims" : 3
53
+ },
54
+ "computePrecision" : "Mixed (Float16, Int16, Int32)",
55
+ "isUpdatable" : "0",
56
+ "stateSchema" : [
57
+
58
+ ],
59
+ "availability" : {
60
+ "macOS" : "14.0",
61
+ "tvOS" : "17.0",
62
+ "visionOS" : "1.0",
63
+ "watchOS" : "10.0",
64
+ "iOS" : "17.0",
65
+ "macCatalyst" : "17.0"
66
+ },
67
+ "modelType" : {
68
+ "name" : "MLModelType_mlProgram"
69
+ },
70
+ "userDefinedMetadata" : {
71
+ "com.github.apple.coremltools.conversion_date" : "2025-08-14",
72
+ "com.github.apple.coremltools.source" : "torch==2.5.0",
73
+ "com.github.apple.coremltools.version" : "9.0b1",
74
+ "com.github.apple.coremltools.source_dialect" : "TorchScript"
75
+ },
76
+ "inputSchema" : [
77
+ {
78
+ "hasShapeFlexibility" : "0",
79
+ "isOptional" : "0",
80
+ "dataType" : "Int32",
81
+ "formattedType" : "MultiArray (Int32 1)",
82
+ "shortDescription" : "",
83
+ "shape" : "[1]",
84
+ "name" : "decoder_input_ids",
85
+ "type" : "MultiArray"
86
+ },
87
+ {
88
+ "hasShapeFlexibility" : "0",
89
+ "isOptional" : "0",
90
+ "dataType" : "Float16",
91
+ "formattedType" : "MultiArray (Float16 2 × 640)",
92
+ "shortDescription" : "",
93
+ "shape" : "[2, 640]",
94
+ "name" : "state_1",
95
+ "type" : "MultiArray"
96
+ },
97
+ {
98
+ "hasShapeFlexibility" : "0",
99
+ "isOptional" : "0",
100
+ "dataType" : "Float16",
101
+ "formattedType" : "MultiArray (Float16 2 × 640)",
102
+ "shortDescription" : "",
103
+ "shape" : "[2, 640]",
104
+ "name" : "state_2",
105
+ "type" : "MultiArray"
106
+ }
107
+ ],
108
+ "generatedClassName" : "TextDecoder",
109
+ "method" : "predict"
110
+ }
111
+ ]
nvidia_parakeet-v3/TextDecoder.mlmodelc/model.mil ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.0)
2
+ [buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "9.0b1"}})]
3
+ {
4
+ func main<ios17>(tensor<int32, [1]> decoder_input_ids, tensor<fp16, [2, 640]> state_1, tensor<fp16, [2, 640]> state_2) {
5
+ tensor<int32, []> input_1_batch_dims_0 = const()[name = tensor<string, []>("input_1_batch_dims_0"), val = tensor<int32, []>(0)];
6
+ tensor<bool, []> input_1_validate_indices_0 = const()[name = tensor<string, []>("input_1_validate_indices_0"), val = tensor<bool, []>(false)];
7
+ tensor<fp16, [8193, 640]> prediction_embed_weight_to_fp16 = const()[name = tensor<string, []>("prediction_embed_weight_to_fp16"), val = tensor<fp16, [8193, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
8
+ tensor<string, []> decoder_input_ids_to_int16_dtype_0 = const()[name = tensor<string, []>("decoder_input_ids_to_int16_dtype_0"), val = tensor<string, []>("int16")];
9
+ tensor<string, []> cast_6_dtype_0 = const()[name = tensor<string, []>("cast_6_dtype_0"), val = tensor<string, []>("int32")];
10
+ tensor<int32, []> greater_equal_0_y_0 = const()[name = tensor<string, []>("greater_equal_0_y_0"), val = tensor<int32, []>(0)];
11
+ tensor<int16, [1]> decoder_input_ids_to_int16 = cast(dtype = decoder_input_ids_to_int16_dtype_0, x = decoder_input_ids)[name = tensor<string, []>("cast_9")];
12
+ tensor<int32, [1]> cast_6 = cast(dtype = cast_6_dtype_0, x = decoder_input_ids_to_int16)[name = tensor<string, []>("cast_8")];
13
+ tensor<bool, [1]> greater_equal_0 = greater_equal(x = cast_6, y = greater_equal_0_y_0)[name = tensor<string, []>("greater_equal_0")];
14
+ tensor<int32, []> slice_by_index_0 = const()[name = tensor<string, []>("slice_by_index_0"), val = tensor<int32, []>(8193)];
15
+ tensor<int32, [1]> add_2 = add(x = cast_6, y = slice_by_index_0)[name = tensor<string, []>("add_2")];
16
+ tensor<int32, [1]> select_0 = select(a = cast_6, b = add_2, cond = greater_equal_0)[name = tensor<string, []>("select_0")];
17
+ tensor<int32, []> input_1_cast_fp16_cast_uint16_axis_0 = const()[name = tensor<string, []>("input_1_cast_fp16_cast_uint16_axis_0"), val = tensor<int32, []>(0)];
18
+ tensor<string, []> select_0_to_int16_dtype_0 = const()[name = tensor<string, []>("select_0_to_int16_dtype_0"), val = tensor<string, []>("int16")];
19
+ tensor<int16, [1]> select_0_to_int16 = cast(dtype = select_0_to_int16_dtype_0, x = select_0)[name = tensor<string, []>("cast_7")];
20
+ tensor<fp16, [1, 640]> input_1_cast_fp16_cast_uint16_cast_uint16 = gather(axis = input_1_cast_fp16_cast_uint16_axis_0, batch_dims = input_1_batch_dims_0, indices = select_0_to_int16, validate_indices = input_1_validate_indices_0, x = prediction_embed_weight_to_fp16)[name = tensor<string, []>("input_1_cast_fp16_cast_uint16_cast_uint16")];
21
+ tensor<int32, [1]> input_3_axes_0 = const()[name = tensor<string, []>("input_3_axes_0"), val = tensor<int32, [1]>([1])];
22
+ tensor<fp16, [1, 1, 640]> input_3_cast_fp16 = expand_dims(axes = input_3_axes_0, x = input_1_cast_fp16_cast_uint16_cast_uint16)[name = tensor<string, []>("input_3_cast_fp16")];
23
+ tensor<int32, [1]> hx_1_axes_0 = const()[name = tensor<string, []>("hx_1_axes_0"), val = tensor<int32, [1]>([1])];
24
+ tensor<fp16, [2, 1, 640]> hx_1_cast_fp16 = expand_dims(axes = hx_1_axes_0, x = state_1)[name = tensor<string, []>("hx_1_cast_fp16")];
25
+ tensor<int32, [1]> hx_axes_0 = const()[name = tensor<string, []>("hx_axes_0"), val = tensor<int32, [1]>([1])];
26
+ tensor<fp16, [2, 1, 640]> hx_cast_fp16 = expand_dims(axes = hx_axes_0, x = state_2)[name = tensor<string, []>("hx_cast_fp16")];
27
+ tensor<int32, []> split_0_num_splits_0 = const()[name = tensor<string, []>("split_0_num_splits_0"), val = tensor<int32, []>(2)];
28
+ tensor<int32, []> split_0_axis_0 = const()[name = tensor<string, []>("split_0_axis_0"), val = tensor<int32, []>(0)];
29
+ tensor<fp16, [1, 1, 640]> split_0_cast_fp16_0, tensor<fp16, [1, 1, 640]> split_0_cast_fp16_1 = split(axis = split_0_axis_0, num_splits = split_0_num_splits_0, x = hx_1_cast_fp16)[name = tensor<string, []>("split_0_cast_fp16")];
30
+ tensor<int32, []> split_1_num_splits_0 = const()[name = tensor<string, []>("split_1_num_splits_0"), val = tensor<int32, []>(2)];
31
+ tensor<int32, []> split_1_axis_0 = const()[name = tensor<string, []>("split_1_axis_0"), val = tensor<int32, []>(0)];
32
+ tensor<fp16, [1, 1, 640]> split_1_cast_fp16_0, tensor<fp16, [1, 1, 640]> split_1_cast_fp16_1 = split(axis = split_1_axis_0, num_splits = split_1_num_splits_0, x = hx_cast_fp16)[name = tensor<string, []>("split_1_cast_fp16")];
33
+ tensor<int32, [1]> output_lstm_layer_0_lstm_h0_squeeze_axes_0 = const()[name = tensor<string, []>("output_lstm_layer_0_lstm_h0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
34
+ tensor<fp16, [1, 640]> output_lstm_layer_0_lstm_h0_squeeze_cast_fp16 = squeeze(axes = output_lstm_layer_0_lstm_h0_squeeze_axes_0, x = split_0_cast_fp16_0)[name = tensor<string, []>("output_lstm_layer_0_lstm_h0_squeeze_cast_fp16")];
35
+ tensor<int32, [1]> output_lstm_layer_0_lstm_c0_squeeze_axes_0 = const()[name = tensor<string, []>("output_lstm_layer_0_lstm_c0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
36
+ tensor<fp16, [1, 640]> output_lstm_layer_0_lstm_c0_squeeze_cast_fp16 = squeeze(axes = output_lstm_layer_0_lstm_c0_squeeze_axes_0, x = split_1_cast_fp16_0)[name = tensor<string, []>("output_lstm_layer_0_lstm_c0_squeeze_cast_fp16")];
37
+ tensor<string, []> output_lstm_layer_0_direction_0 = const()[name = tensor<string, []>("output_lstm_layer_0_direction_0"), val = tensor<string, []>("forward")];
38
+ tensor<bool, []> output_lstm_layer_0_output_sequence_0 = const()[name = tensor<string, []>("output_lstm_layer_0_output_sequence_0"), val = tensor<bool, []>(true)];
39
+ tensor<string, []> output_lstm_layer_0_recurrent_activation_0 = const()[name = tensor<string, []>("output_lstm_layer_0_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
40
+ tensor<string, []> output_lstm_layer_0_cell_activation_0 = const()[name = tensor<string, []>("output_lstm_layer_0_cell_activation_0"), val = tensor<string, []>("tanh")];
41
+ tensor<string, []> output_lstm_layer_0_activation_0 = const()[name = tensor<string, []>("output_lstm_layer_0_activation_0"), val = tensor<string, []>("tanh")];
42
+ tensor<fp16, [2560, 640]> concat_1_to_fp16 = const()[name = tensor<string, []>("concat_1_to_fp16"), val = tensor<fp16, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10487168)))];
43
+ tensor<fp16, [2560, 640]> concat_2_to_fp16 = const()[name = tensor<string, []>("concat_2_to_fp16"), val = tensor<fp16, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13764032)))];
44
+ tensor<fp16, [2560]> concat_0_to_fp16 = const()[name = tensor<string, []>("concat_0_to_fp16"), val = tensor<fp16, [2560]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17040896)))];
45
+ tensor<fp16, [1, 1, 640]> output_lstm_layer_0_cast_fp16_0, tensor<fp16, [1, 640]> output_lstm_layer_0_cast_fp16_1, tensor<fp16, [1, 640]> output_lstm_layer_0_cast_fp16_2 = lstm(activation = output_lstm_layer_0_activation_0, bias = concat_0_to_fp16, cell_activation = output_lstm_layer_0_cell_activation_0, direction = output_lstm_layer_0_direction_0, initial_c = output_lstm_layer_0_lstm_c0_squeeze_cast_fp16, initial_h = output_lstm_layer_0_lstm_h0_squeeze_cast_fp16, output_sequence = output_lstm_layer_0_output_sequence_0, recurrent_activation = output_lstm_layer_0_recurrent_activation_0, weight_hh = concat_2_to_fp16, weight_ih = concat_1_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("output_lstm_layer_0_cast_fp16")];
46
+ tensor<int32, [1]> output_lstm_h0_squeeze_axes_0 = const()[name = tensor<string, []>("output_lstm_h0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
47
+ tensor<fp16, [1, 640]> output_lstm_h0_squeeze_cast_fp16 = squeeze(axes = output_lstm_h0_squeeze_axes_0, x = split_0_cast_fp16_1)[name = tensor<string, []>("output_lstm_h0_squeeze_cast_fp16")];
48
+ tensor<int32, [1]> output_lstm_c0_squeeze_axes_0 = const()[name = tensor<string, []>("output_lstm_c0_squeeze_axes_0"), val = tensor<int32, [1]>([0])];
49
+ tensor<fp16, [1, 640]> output_lstm_c0_squeeze_cast_fp16 = squeeze(axes = output_lstm_c0_squeeze_axes_0, x = split_1_cast_fp16_1)[name = tensor<string, []>("output_lstm_c0_squeeze_cast_fp16")];
50
+ tensor<string, []> output_direction_0 = const()[name = tensor<string, []>("output_direction_0"), val = tensor<string, []>("forward")];
51
+ tensor<bool, []> output_output_sequence_0 = const()[name = tensor<string, []>("output_output_sequence_0"), val = tensor<bool, []>(true)];
52
+ tensor<string, []> output_recurrent_activation_0 = const()[name = tensor<string, []>("output_recurrent_activation_0"), val = tensor<string, []>("sigmoid")];
53
+ tensor<string, []> output_cell_activation_0 = const()[name = tensor<string, []>("output_cell_activation_0"), val = tensor<string, []>("tanh")];
54
+ tensor<string, []> output_activation_0 = const()[name = tensor<string, []>("output_activation_0"), val = tensor<string, []>("tanh")];
55
+ tensor<fp16, [2560, 640]> concat_4_to_fp16 = const()[name = tensor<string, []>("concat_4_to_fp16"), val = tensor<fp16, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(17046080)))];
56
+ tensor<fp16, [2560, 640]> concat_5_to_fp16 = const()[name = tensor<string, []>("concat_5_to_fp16"), val = tensor<fp16, [2560, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(20322944)))];
57
+ tensor<fp16, [2560]> concat_3_to_fp16 = const()[name = tensor<string, []>("concat_3_to_fp16"), val = tensor<fp16, [2560]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23599808)))];
58
+ tensor<fp16, [1, 1, 640]> output_cast_fp16_0, tensor<fp16, [1, 640]> output_cast_fp16_1, tensor<fp16, [1, 640]> output_cast_fp16_2 = lstm(activation = output_activation_0, bias = concat_3_to_fp16, cell_activation = output_cell_activation_0, direction = output_direction_0, initial_c = output_lstm_c0_squeeze_cast_fp16, initial_h = output_lstm_h0_squeeze_cast_fp16, output_sequence = output_output_sequence_0, recurrent_activation = output_recurrent_activation_0, weight_hh = concat_5_to_fp16, weight_ih = concat_4_to_fp16, x = output_lstm_layer_0_cast_fp16_0)[name = tensor<string, []>("output_cast_fp16")];
59
+ tensor<int32, []> var_32_axis_0 = const()[name = tensor<string, []>("op_32_axis_0"), val = tensor<int32, []>(0)];
60
+ tensor<fp16, [2, 1, 640]> var_32_cast_fp16 = stack(axis = var_32_axis_0, values = (output_lstm_layer_0_cast_fp16_1, output_cast_fp16_1))[name = tensor<string, []>("op_32_cast_fp16")];
61
+ tensor<int32, []> var_33_axis_0 = const()[name = tensor<string, []>("op_33_axis_0"), val = tensor<int32, []>(0)];
62
+ tensor<fp16, [2, 1, 640]> var_33_cast_fp16 = stack(axis = var_33_axis_0, values = (output_lstm_layer_0_cast_fp16_2, output_cast_fp16_2))[name = tensor<string, []>("op_33_cast_fp16")];
63
+ tensor<int32, [1]> input_axes_0 = const()[name = tensor<string, []>("input_axes_0"), val = tensor<int32, [1]>([1])];
64
+ tensor<fp16, [1, 640]> input_cast_fp16 = squeeze(axes = input_axes_0, x = output_cast_fp16_0)[name = tensor<string, []>("input_cast_fp16")];
65
+ tensor<int32, [1]> var_35_axes_0 = const()[name = tensor<string, []>("op_35_axes_0"), val = tensor<int32, [1]>([1])];
66
+ tensor<fp16, [2, 640]> new_state_1 = squeeze(axes = var_35_axes_0, x = var_32_cast_fp16)[name = tensor<string, []>("op_35_cast_fp16")];
67
+ tensor<int32, [1]> var_36_axes_0 = const()[name = tensor<string, []>("op_36_axes_0"), val = tensor<int32, [1]>([1])];
68
+ tensor<fp16, [2, 640]> new_state_2 = squeeze(axes = var_36_axes_0, x = var_33_cast_fp16)[name = tensor<string, []>("op_36_cast_fp16")];
69
+ tensor<fp16, [640, 640]> joint_projection_weight_to_fp16 = const()[name = tensor<string, []>("joint_projection_weight_to_fp16"), val = tensor<fp16, [640, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(23604992)))];
70
+ tensor<fp16, [640]> joint_projection_bias_to_fp16 = const()[name = tensor<string, []>("joint_projection_bias_to_fp16"), val = tensor<fp16, [640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(24424256)))];
71
+ tensor<fp16, [1, 640]> decoder_output_projected = linear(bias = joint_projection_bias_to_fp16, weight = joint_projection_weight_to_fp16, x = input_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
72
+ } -> (decoder_output_projected, new_state_1, new_state_2);
73
+ }
nvidia_parakeet-v3/TextDecoder.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36cd656ebf1105ee5aa85d1e55c00f25669327aef8b39346333a19260ca78018
3
+ size 24425600