|
program(1.0) |
|
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "9.0b1"}})] |
|
{ |
|
func main<ios17>(tensor<fp16, [1, 640]> decoder_output_projected, tensor<fp16, [1, 640]> encoder_output_projected) { |
|
tensor<fp16, [1, 640]> input_1_cast_fp16 = add(x = decoder_output_projected, y = encoder_output_projected)[name = tensor<string, []>("input_1_cast_fp16")]; |
|
tensor<fp16, [1, 640]> input_3_cast_fp16 = relu(x = input_1_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")]; |
|
tensor<fp16, [8198, 640]> joint_net_1_weight_to_fp16 = const()[name = tensor<string, []>("joint_net_1_weight_to_fp16"), val = tensor<fp16, [8198, 640]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))]; |
|
tensor<fp16, [8198]> joint_net_1_bias_to_fp16 = const()[name = tensor<string, []>("joint_net_1_bias_to_fp16"), val = tensor<fp16, [8198]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10493568)))]; |
|
tensor<fp16, [1, 8198]> linear_0_cast_fp16 = linear(bias = joint_net_1_bias_to_fp16, weight = joint_net_1_weight_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")]; |
|
tensor<int32, []> var_11 = const()[name = tensor<string, []>("op_11"), val = tensor<int32, []>(-1)]; |
|
tensor<fp16, [1, 8198]> var_13_softmax_cast_fp16 = softmax(axis = var_11, x = linear_0_cast_fp16)[name = tensor<string, []>("op_13_softmax_cast_fp16")]; |
|
tensor<fp32, []> var_13_epsilon_0 = const()[name = tensor<string, []>("op_13_epsilon_0"), val = tensor<fp32, []>(0x1p-149)]; |
|
tensor<fp16, [1, 8198]> logits = log(epsilon = var_13_epsilon_0, x = var_13_softmax_cast_fp16)[name = tensor<string, []>("op_13_cast_fp16")]; |
|
} -> (logits); |
|
} |