SpeakerKit Pro v1 compressed variants
Browse filesThis view is limited to 50 files because it contains too many changes. Β
See raw diff
- {speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc β speaker_embedder/pyannote-v3/W16A16}/LICENSE_NOTICE.txt +0 -0
- speaker_embedder/pyannote-v3/W16A16/README.txt +6 -0
- speaker_embedder/pyannote-v3/{SpeakerEmbedder.mlmodelc β W16A16/SpeakerEmbedder.mlmodelc}/analytics/coremldata.bin +0 -0
- speaker_embedder/pyannote-v3/{SpeakerEmbedder.mlmodelc β W16A16/SpeakerEmbedder.mlmodelc}/coremldata.bin +0 -0
- speaker_embedder/pyannote-v3/{SpeakerEmbedder.mlmodelc β W16A16/SpeakerEmbedder.mlmodelc}/metadata.json +0 -0
- speaker_embedder/pyannote-v3/{SpeakerEmbedder.mlmodelc β W16A16/SpeakerEmbedder.mlmodelc}/model.mil +0 -0
- speaker_embedder/pyannote-v3/{SpeakerEmbedder.mlmodelc β W16A16/SpeakerEmbedder.mlmodelc}/weights/weight.bin +0 -0
- speaker_embedder/pyannote-v3/{SpeakerEmbedderPreprocessor.mlmodelc β W16A16/SpeakerEmbedderPreprocessor.mlmodelc}/analytics/coremldata.bin +0 -0
- speaker_embedder/pyannote-v3/{SpeakerEmbedderPreprocessor.mlmodelc β W16A16/SpeakerEmbedderPreprocessor.mlmodelc}/coremldata.bin +0 -0
- speaker_embedder/pyannote-v3/{SpeakerEmbedderPreprocessor.mlmodelc β W16A16/SpeakerEmbedderPreprocessor.mlmodelc}/metadata.json +0 -0
- speaker_embedder/pyannote-v3/{SpeakerEmbedderPreprocessor.mlmodelc β W16A16/SpeakerEmbedderPreprocessor.mlmodelc}/model.mil +0 -0
- speaker_embedder/pyannote-v3/{SpeakerEmbedderPreprocessor.mlmodelc β W16A16/SpeakerEmbedderPreprocessor.mlmodelc}/weights/weight.bin +0 -0
- speaker_embedder/pyannote-v3/W6A16/LICENSE_NOTICE.txt +7 -0
- speaker_embedder/pyannote-v3/W6A16/README.txt +6 -0
- speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedder.mlmodelc/analytics/coremldata.bin +3 -0
- speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedder.mlmodelc/coremldata.bin +3 -0
- speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedder.mlmodelc/metadata.json +87 -0
- speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedder.mlmodelc/model.mil +473 -0
- speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedder.mlmodelc/weights/weight.bin +3 -0
- speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedderPreprocessor.mlmodelc/analytics/coremldata.bin +3 -0
- speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedderPreprocessor.mlmodelc/coremldata.bin +3 -0
- speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedderPreprocessor.mlmodelc/metadata.json +77 -0
- speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedderPreprocessor.mlmodelc/model.mil +90 -0
- speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedderPreprocessor.mlmodelc/weights/weight.bin +3 -0
- speaker_embedder/pyannote-v3/W8A16/LICENSE_NOTICE.txt +7 -0
- speaker_embedder/pyannote-v3/W8A16/README.txt +6 -0
- speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedder.mlmodelc/analytics/coremldata.bin +3 -0
- speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedder.mlmodelc/coremldata.bin +3 -0
- speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedder.mlmodelc/metadata.json +87 -0
- speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedder.mlmodelc/model.mil +473 -0
- speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedder.mlmodelc/weights/weight.bin +3 -0
- speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedderPreprocessor.mlmodelc/analytics/coremldata.bin +3 -0
- speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedderPreprocessor.mlmodelc/coremldata.bin +3 -0
- speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedderPreprocessor.mlmodelc/metadata.json +77 -0
- speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedderPreprocessor.mlmodelc/model.mil +90 -0
- speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedderPreprocessor.mlmodelc/weights/weight.bin +3 -0
- speaker_segmenter/pyannote-v3/W32A32/LICENSE_NOTICE.txt +7 -0
- speaker_segmenter/pyannote-v3/W32A32/README.txt +6 -0
- speaker_segmenter/pyannote-v3/W32A32/SpeakerSegmenter.mlmodelc/LICENSE_NOTICE.txt +7 -0
- speaker_segmenter/pyannote-v3/{SpeakerSegmenter.mlmodelc β W32A32/SpeakerSegmenter.mlmodelc}/analytics/coremldata.bin +0 -0
- speaker_segmenter/pyannote-v3/{SpeakerSegmenter.mlmodelc β W32A32/SpeakerSegmenter.mlmodelc}/coremldata.bin +0 -0
- speaker_segmenter/pyannote-v3/{SpeakerSegmenter.mlmodelc β W32A32/SpeakerSegmenter.mlmodelc}/metadata.json +0 -0
- speaker_segmenter/pyannote-v3/{SpeakerSegmenter.mlmodelc β W32A32/SpeakerSegmenter.mlmodelc}/model.mil +0 -0
- speaker_segmenter/pyannote-v3/{SpeakerSegmenter.mlmodelc β W32A32/SpeakerSegmenter.mlmodelc}/weights/weight.bin +0 -0
- speaker_segmenter/pyannote-v3/W8A16/LICENSE_NOTICE.txt +7 -0
- speaker_segmenter/pyannote-v3/W8A16/README.txt +6 -0
- speaker_segmenter/pyannote-v3/W8A16/SpeakerSegmenter.mlmodelc/analytics/coremldata.bin +3 -0
- speaker_segmenter/pyannote-v3/W8A16/SpeakerSegmenter.mlmodelc/coremldata.bin +3 -0
- speaker_segmenter/pyannote-v3/W8A16/SpeakerSegmenter.mlmodelc/metadata.json +133 -0
- speaker_segmenter/pyannote-v3/W8A16/SpeakerSegmenter.mlmodelc/model.mil +0 -0
{speaker_segmenter/pyannote-v3/SpeakerSegmenter.mlmodelc β speaker_embedder/pyannote-v3/W16A16}/LICENSE_NOTICE.txt
RENAMED
File without changes
|
speaker_embedder/pyannote-v3/W16A16/README.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# License
|
2 |
+
|
3 |
+
Original model weights: https://github.com/wenet-e2e/wespeaker/blob/master/docs/pretrained.md#model-license
|
4 |
+
Argmax-optimized model asset (Assets with `.mlmodelc` extension): https://huggingface.co/argmaxinc/speakerkit-pro/blob/main/LICENSE_NOTICE.txt
|
5 |
+
|
6 |
+
Please contact [email protected] for licensing SpeakerKit Pro assets
|
speaker_embedder/pyannote-v3/{SpeakerEmbedder.mlmodelc β W16A16/SpeakerEmbedder.mlmodelc}/analytics/coremldata.bin
RENAMED
File without changes
|
speaker_embedder/pyannote-v3/{SpeakerEmbedder.mlmodelc β W16A16/SpeakerEmbedder.mlmodelc}/coremldata.bin
RENAMED
File without changes
|
speaker_embedder/pyannote-v3/{SpeakerEmbedder.mlmodelc β W16A16/SpeakerEmbedder.mlmodelc}/metadata.json
RENAMED
File without changes
|
speaker_embedder/pyannote-v3/{SpeakerEmbedder.mlmodelc β W16A16/SpeakerEmbedder.mlmodelc}/model.mil
RENAMED
File without changes
|
speaker_embedder/pyannote-v3/{SpeakerEmbedder.mlmodelc β W16A16/SpeakerEmbedder.mlmodelc}/weights/weight.bin
RENAMED
File without changes
|
speaker_embedder/pyannote-v3/{SpeakerEmbedderPreprocessor.mlmodelc β W16A16/SpeakerEmbedderPreprocessor.mlmodelc}/analytics/coremldata.bin
RENAMED
File without changes
|
speaker_embedder/pyannote-v3/{SpeakerEmbedderPreprocessor.mlmodelc β W16A16/SpeakerEmbedderPreprocessor.mlmodelc}/coremldata.bin
RENAMED
File without changes
|
speaker_embedder/pyannote-v3/{SpeakerEmbedderPreprocessor.mlmodelc β W16A16/SpeakerEmbedderPreprocessor.mlmodelc}/metadata.json
RENAMED
File without changes
|
speaker_embedder/pyannote-v3/{SpeakerEmbedderPreprocessor.mlmodelc β W16A16/SpeakerEmbedderPreprocessor.mlmodelc}/model.mil
RENAMED
File without changes
|
speaker_embedder/pyannote-v3/{SpeakerEmbedderPreprocessor.mlmodelc β W16A16/SpeakerEmbedderPreprocessor.mlmodelc}/weights/weight.bin
RENAMED
File without changes
|
speaker_embedder/pyannote-v3/W6A16/LICENSE_NOTICE.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Argmax proprietary and confidential. Under NDA.
|
2 |
+
|
3 |
+
Copyright 2024 Argmax, Inc. All rights reserved.
|
4 |
+
|
5 |
+
Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
|
6 |
+
|
7 |
+
Please contact Argmax for licensing information at [email protected].
|
speaker_embedder/pyannote-v3/W6A16/README.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# License
|
2 |
+
|
3 |
+
Original model weights: https://github.com/wenet-e2e/wespeaker/blob/master/docs/pretrained.md#model-license
|
4 |
+
Argmax-optimized model asset (Assets with `.mlmodelc` extension): https://huggingface.co/argmaxinc/speakerkit-pro/blob/main/LICENSE_NOTICE.txt
|
5 |
+
|
6 |
+
Please contact [email protected] for licensing SpeakerKit Pro assets
|
speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e63f6f24c2db3be6678d35a0dc87f34db5d488bd71969cb5fdb816efd6f7f2a5
|
3 |
+
size 243
|
speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c354879615fe262bc5f8c92b69df8c58111d06331c5d20ddb2e0efe99ea4441c
|
3 |
+
size 370
|
speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (6 bits))",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 Γ 3 Γ 256)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 3, 256]",
|
13 |
+
"name" : "speaker_embeddings",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"modelParameters" : [
|
18 |
+
|
19 |
+
],
|
20 |
+
"specificationVersion" : 7,
|
21 |
+
"mlProgramOperationTypeHistogram" : {
|
22 |
+
"Concat" : 3,
|
23 |
+
"Ios16.mul" : 12,
|
24 |
+
"SliceByIndex" : 3,
|
25 |
+
"Ios16.constexprLutToDense" : 35,
|
26 |
+
"Transpose" : 1,
|
27 |
+
"Ios16.sub" : 6,
|
28 |
+
"Ios16.sqrt" : 3,
|
29 |
+
"Stack" : 1,
|
30 |
+
"UpsampleNearestNeighbor" : 1,
|
31 |
+
"Ios16.conv" : 36,
|
32 |
+
"Ios16.add" : 22,
|
33 |
+
"Squeeze" : 1,
|
34 |
+
"Ios16.relu" : 33,
|
35 |
+
"Ios16.realDiv" : 9,
|
36 |
+
"Ios16.reduceSum" : 12,
|
37 |
+
"ExpandDims" : 8,
|
38 |
+
"Ios16.linear" : 1,
|
39 |
+
"Ios16.reshape" : 1
|
40 |
+
},
|
41 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
42 |
+
"isUpdatable" : "0",
|
43 |
+
"stateSchema" : [
|
44 |
+
|
45 |
+
],
|
46 |
+
"availability" : {
|
47 |
+
"macOS" : "13.0",
|
48 |
+
"tvOS" : "16.0",
|
49 |
+
"visionOS" : "1.0",
|
50 |
+
"watchOS" : "9.0",
|
51 |
+
"iOS" : "16.0",
|
52 |
+
"macCatalyst" : "16.0"
|
53 |
+
},
|
54 |
+
"modelType" : {
|
55 |
+
"name" : "MLModelType_mlProgram"
|
56 |
+
},
|
57 |
+
"userDefinedMetadata" : {
|
58 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
59 |
+
"com.github.apple.coremltools.version" : "8.1",
|
60 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1"
|
61 |
+
},
|
62 |
+
"inputSchema" : [
|
63 |
+
{
|
64 |
+
"hasShapeFlexibility" : "0",
|
65 |
+
"isOptional" : "0",
|
66 |
+
"dataType" : "Float16",
|
67 |
+
"formattedType" : "MultiArray (Float16 1 Γ 998 Γ 80)",
|
68 |
+
"shortDescription" : "",
|
69 |
+
"shape" : "[1, 998, 80]",
|
70 |
+
"name" : "preprocessor_output_1",
|
71 |
+
"type" : "MultiArray"
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"hasShapeFlexibility" : "0",
|
75 |
+
"isOptional" : "0",
|
76 |
+
"dataType" : "Float16",
|
77 |
+
"formattedType" : "MultiArray (Float16 1 Γ 3 Γ 589)",
|
78 |
+
"shortDescription" : "",
|
79 |
+
"shape" : "[1, 3, 589]",
|
80 |
+
"name" : "speaker_masks",
|
81 |
+
"type" : "MultiArray"
|
82 |
+
}
|
83 |
+
],
|
84 |
+
"generatedClassName" : "SpeakerEmbedding_6_bit",
|
85 |
+
"method" : "predict"
|
86 |
+
}
|
87 |
+
]
|
speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedder.mlmodelc/model.mil
ADDED
@@ -0,0 +1,473 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [1, 998, 80]> preprocessor_output_1, tensor<fp16, [1, 3, 589]> speaker_masks) {
|
5 |
+
tensor<int32, []> var_12 = const()[name = tensor<string, []>("op_12"), val = tensor<int32, []>(1)];
|
6 |
+
tensor<int32, [3]> var_22 = const()[name = tensor<string, []>("op_22"), val = tensor<int32, [3]>([0, 2, 1])];
|
7 |
+
tensor<int32, [1]> input_1_axes_0 = const()[name = tensor<string, []>("input_1_axes_0"), val = tensor<int32, [1]>([1])];
|
8 |
+
tensor<fp16, [1, 80, 998]> fbank_cast_fp16 = transpose(perm = var_22, x = preprocessor_output_1)[name = tensor<string, []>("transpose_0")];
|
9 |
+
tensor<fp16, [1, 1, 80, 998]> input_1_cast_fp16 = expand_dims(axes = input_1_axes_0, x = fbank_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
10 |
+
tensor<string, []> input_3_pad_type_0 = const()[name = tensor<string, []>("input_3_pad_type_0"), val = tensor<string, []>("custom")];
|
11 |
+
tensor<int32, [4]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
12 |
+
tensor<int32, [2]> input_3_strides_0 = const()[name = tensor<string, []>("input_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
13 |
+
tensor<int32, [2]> input_3_dilations_0 = const()[name = tensor<string, []>("input_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
14 |
+
tensor<int32, []> input_3_groups_0 = const()[name = tensor<string, []>("input_3_groups_0"), val = tensor<int32, []>(1)];
|
15 |
+
tensor<fp16, [32, 1, 3, 3]> const_5_to_fp16 = const()[name = tensor<string, []>("const_5_to_fp16"), val = tensor<fp16, [32, 1, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
16 |
+
tensor<fp16, [32]> const_6_to_fp16 = const()[name = tensor<string, []>("const_6_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(704)))];
|
17 |
+
tensor<fp16, [1, 32, 80, 998]> input_5_cast_fp16 = conv(bias = const_6_to_fp16, dilations = input_3_dilations_0, groups = input_3_groups_0, pad = input_3_pad_0, pad_type = input_3_pad_type_0, strides = input_3_strides_0, weight = const_5_to_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
18 |
+
tensor<fp16, [1, 32, 80, 998]> input_7_cast_fp16 = relu(x = input_5_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
19 |
+
tensor<string, []> input_9_pad_type_0 = const()[name = tensor<string, []>("input_9_pad_type_0"), val = tensor<string, []>("custom")];
|
20 |
+
tensor<int32, [4]> input_9_pad_0 = const()[name = tensor<string, []>("input_9_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
21 |
+
tensor<int32, [2]> input_9_strides_0 = const()[name = tensor<string, []>("input_9_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
22 |
+
tensor<int32, [2]> input_9_dilations_0 = const()[name = tensor<string, []>("input_9_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
23 |
+
tensor<int32, []> input_9_groups_0 = const()[name = tensor<string, []>("input_9_groups_0"), val = tensor<int32, []>(1)];
|
24 |
+
tensor<fp16, [32, 32, 3, 3]> const_7_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [6912]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(832))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(7808))), name = tensor<string, []>("const_7_to_fp16_palettized"), shape = tensor<uint32, [4]>([32, 32, 3, 3])];
|
25 |
+
tensor<fp16, [32]> const_8_to_fp16 = const()[name = tensor<string, []>("const_8_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8000)))];
|
26 |
+
tensor<fp16, [1, 32, 80, 998]> input_11_cast_fp16 = conv(bias = const_8_to_fp16, dilations = input_9_dilations_0, groups = input_9_groups_0, pad = input_9_pad_0, pad_type = input_9_pad_type_0, strides = input_9_strides_0, weight = const_7_to_fp16_palettized, x = input_7_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
27 |
+
tensor<fp16, [1, 32, 80, 998]> input_13_cast_fp16 = relu(x = input_11_cast_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
|
28 |
+
tensor<string, []> input_15_pad_type_0 = const()[name = tensor<string, []>("input_15_pad_type_0"), val = tensor<string, []>("custom")];
|
29 |
+
tensor<int32, [4]> input_15_pad_0 = const()[name = tensor<string, []>("input_15_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
30 |
+
tensor<int32, [2]> input_15_strides_0 = const()[name = tensor<string, []>("input_15_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
31 |
+
tensor<int32, [2]> input_15_dilations_0 = const()[name = tensor<string, []>("input_15_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
32 |
+
tensor<int32, []> input_15_groups_0 = const()[name = tensor<string, []>("input_15_groups_0"), val = tensor<int32, []>(1)];
|
33 |
+
tensor<fp16, [32, 32, 3, 3]> const_9_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [6912]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8128))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15104))), name = tensor<string, []>("const_9_to_fp16_palettized"), shape = tensor<uint32, [4]>([32, 32, 3, 3])];
|
34 |
+
tensor<fp16, [32]> const_10_to_fp16 = const()[name = tensor<string, []>("const_10_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15296)))];
|
35 |
+
tensor<fp16, [1, 32, 80, 998]> out_1_cast_fp16 = conv(bias = const_10_to_fp16, dilations = input_15_dilations_0, groups = input_15_groups_0, pad = input_15_pad_0, pad_type = input_15_pad_type_0, strides = input_15_strides_0, weight = const_9_to_fp16_palettized, x = input_13_cast_fp16)[name = tensor<string, []>("out_1_cast_fp16")];
|
36 |
+
tensor<fp16, [1, 32, 80, 998]> input_17_cast_fp16 = add(x = out_1_cast_fp16, y = input_7_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
|
37 |
+
tensor<fp16, [1, 32, 80, 998]> input_19_cast_fp16 = relu(x = input_17_cast_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
|
38 |
+
tensor<string, []> input_21_pad_type_0 = const()[name = tensor<string, []>("input_21_pad_type_0"), val = tensor<string, []>("custom")];
|
39 |
+
tensor<int32, [4]> input_21_pad_0 = const()[name = tensor<string, []>("input_21_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
40 |
+
tensor<int32, [2]> input_21_strides_0 = const()[name = tensor<string, []>("input_21_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
41 |
+
tensor<int32, [2]> input_21_dilations_0 = const()[name = tensor<string, []>("input_21_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
42 |
+
tensor<int32, []> input_21_groups_0 = const()[name = tensor<string, []>("input_21_groups_0"), val = tensor<int32, []>(1)];
|
43 |
+
tensor<fp16, [32, 32, 3, 3]> const_11_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [6912]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15424))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22400))), name = tensor<string, []>("const_11_to_fp16_palettized"), shape = tensor<uint32, [4]>([32, 32, 3, 3])];
|
44 |
+
tensor<fp16, [32]> const_12_to_fp16 = const()[name = tensor<string, []>("const_12_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22592)))];
|
45 |
+
tensor<fp16, [1, 32, 80, 998]> input_23_cast_fp16 = conv(bias = const_12_to_fp16, dilations = input_21_dilations_0, groups = input_21_groups_0, pad = input_21_pad_0, pad_type = input_21_pad_type_0, strides = input_21_strides_0, weight = const_11_to_fp16_palettized, x = input_19_cast_fp16)[name = tensor<string, []>("input_23_cast_fp16")];
|
46 |
+
tensor<fp16, [1, 32, 80, 998]> input_25_cast_fp16 = relu(x = input_23_cast_fp16)[name = tensor<string, []>("input_25_cast_fp16")];
|
47 |
+
tensor<string, []> input_27_pad_type_0 = const()[name = tensor<string, []>("input_27_pad_type_0"), val = tensor<string, []>("custom")];
|
48 |
+
tensor<int32, [4]> input_27_pad_0 = const()[name = tensor<string, []>("input_27_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
49 |
+
tensor<int32, [2]> input_27_strides_0 = const()[name = tensor<string, []>("input_27_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
50 |
+
tensor<int32, [2]> input_27_dilations_0 = const()[name = tensor<string, []>("input_27_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
51 |
+
tensor<int32, []> input_27_groups_0 = const()[name = tensor<string, []>("input_27_groups_0"), val = tensor<int32, []>(1)];
|
52 |
+
tensor<fp16, [32, 32, 3, 3]> const_13_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [6912]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(22720))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29696))), name = tensor<string, []>("const_13_to_fp16_palettized"), shape = tensor<uint32, [4]>([32, 32, 3, 3])];
|
53 |
+
tensor<fp16, [32]> const_14_to_fp16 = const()[name = tensor<string, []>("const_14_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(29888)))];
|
54 |
+
tensor<fp16, [1, 32, 80, 998]> out_3_cast_fp16 = conv(bias = const_14_to_fp16, dilations = input_27_dilations_0, groups = input_27_groups_0, pad = input_27_pad_0, pad_type = input_27_pad_type_0, strides = input_27_strides_0, weight = const_13_to_fp16_palettized, x = input_25_cast_fp16)[name = tensor<string, []>("out_3_cast_fp16")];
|
55 |
+
tensor<fp16, [1, 32, 80, 998]> input_29_cast_fp16 = add(x = out_3_cast_fp16, y = input_19_cast_fp16)[name = tensor<string, []>("input_29_cast_fp16")];
|
56 |
+
tensor<fp16, [1, 32, 80, 998]> input_31_cast_fp16 = relu(x = input_29_cast_fp16)[name = tensor<string, []>("input_31_cast_fp16")];
|
57 |
+
tensor<string, []> input_33_pad_type_0 = const()[name = tensor<string, []>("input_33_pad_type_0"), val = tensor<string, []>("custom")];
|
58 |
+
tensor<int32, [4]> input_33_pad_0 = const()[name = tensor<string, []>("input_33_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
59 |
+
tensor<int32, [2]> input_33_strides_0 = const()[name = tensor<string, []>("input_33_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
60 |
+
tensor<int32, [2]> input_33_dilations_0 = const()[name = tensor<string, []>("input_33_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
61 |
+
tensor<int32, []> input_33_groups_0 = const()[name = tensor<string, []>("input_33_groups_0"), val = tensor<int32, []>(1)];
|
62 |
+
tensor<fp16, [32, 32, 3, 3]> const_15_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [6912]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30016))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(36992))), name = tensor<string, []>("const_15_to_fp16_palettized"), shape = tensor<uint32, [4]>([32, 32, 3, 3])];
|
63 |
+
tensor<fp16, [32]> const_16_to_fp16 = const()[name = tensor<string, []>("const_16_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(37184)))];
|
64 |
+
tensor<fp16, [1, 32, 80, 998]> input_35_cast_fp16 = conv(bias = const_16_to_fp16, dilations = input_33_dilations_0, groups = input_33_groups_0, pad = input_33_pad_0, pad_type = input_33_pad_type_0, strides = input_33_strides_0, weight = const_15_to_fp16_palettized, x = input_31_cast_fp16)[name = tensor<string, []>("input_35_cast_fp16")];
|
65 |
+
tensor<fp16, [1, 32, 80, 998]> input_37_cast_fp16 = relu(x = input_35_cast_fp16)[name = tensor<string, []>("input_37_cast_fp16")];
|
66 |
+
tensor<string, []> input_39_pad_type_0 = const()[name = tensor<string, []>("input_39_pad_type_0"), val = tensor<string, []>("custom")];
|
67 |
+
tensor<int32, [4]> input_39_pad_0 = const()[name = tensor<string, []>("input_39_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
68 |
+
tensor<int32, [2]> input_39_strides_0 = const()[name = tensor<string, []>("input_39_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
69 |
+
tensor<int32, [2]> input_39_dilations_0 = const()[name = tensor<string, []>("input_39_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
70 |
+
tensor<int32, []> input_39_groups_0 = const()[name = tensor<string, []>("input_39_groups_0"), val = tensor<int32, []>(1)];
|
71 |
+
tensor<fp16, [32, 32, 3, 3]> const_17_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [6912]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(37312))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(44288))), name = tensor<string, []>("const_17_to_fp16_palettized"), shape = tensor<uint32, [4]>([32, 32, 3, 3])];
|
72 |
+
tensor<fp16, [32]> const_18_to_fp16 = const()[name = tensor<string, []>("const_18_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(44480)))];
|
73 |
+
tensor<fp16, [1, 32, 80, 998]> out_5_cast_fp16 = conv(bias = const_18_to_fp16, dilations = input_39_dilations_0, groups = input_39_groups_0, pad = input_39_pad_0, pad_type = input_39_pad_type_0, strides = input_39_strides_0, weight = const_17_to_fp16_palettized, x = input_37_cast_fp16)[name = tensor<string, []>("out_5_cast_fp16")];
|
74 |
+
tensor<fp16, [1, 32, 80, 998]> input_41_cast_fp16 = add(x = out_5_cast_fp16, y = input_31_cast_fp16)[name = tensor<string, []>("input_41_cast_fp16")];
|
75 |
+
tensor<fp16, [1, 32, 80, 998]> input_43_cast_fp16 = relu(x = input_41_cast_fp16)[name = tensor<string, []>("input_43_cast_fp16")];
|
76 |
+
tensor<string, []> input_45_pad_type_0 = const()[name = tensor<string, []>("input_45_pad_type_0"), val = tensor<string, []>("custom")];
|
77 |
+
tensor<int32, [4]> input_45_pad_0 = const()[name = tensor<string, []>("input_45_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
78 |
+
tensor<int32, [2]> input_45_strides_0 = const()[name = tensor<string, []>("input_45_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
79 |
+
tensor<int32, [2]> input_45_dilations_0 = const()[name = tensor<string, []>("input_45_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
80 |
+
tensor<int32, []> input_45_groups_0 = const()[name = tensor<string, []>("input_45_groups_0"), val = tensor<int32, []>(1)];
|
81 |
+
tensor<fp16, [64, 32, 3, 3]> const_19_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [13824]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(44608))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(58496))), name = tensor<string, []>("const_19_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 32, 3, 3])];
|
82 |
+
tensor<fp16, [64]> const_20_to_fp16 = const()[name = tensor<string, []>("const_20_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(58688)))];
|
83 |
+
tensor<fp16, [1, 64, 40, 499]> input_47_cast_fp16 = conv(bias = const_20_to_fp16, dilations = input_45_dilations_0, groups = input_45_groups_0, pad = input_45_pad_0, pad_type = input_45_pad_type_0, strides = input_45_strides_0, weight = const_19_to_fp16_palettized, x = input_43_cast_fp16)[name = tensor<string, []>("input_47_cast_fp16")];
|
84 |
+
tensor<fp16, [1, 64, 40, 499]> input_49_cast_fp16 = relu(x = input_47_cast_fp16)[name = tensor<string, []>("input_49_cast_fp16")];
|
85 |
+
tensor<string, []> input_51_pad_type_0 = const()[name = tensor<string, []>("input_51_pad_type_0"), val = tensor<string, []>("custom")];
|
86 |
+
tensor<int32, [4]> input_51_pad_0 = const()[name = tensor<string, []>("input_51_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
87 |
+
tensor<int32, [2]> input_51_strides_0 = const()[name = tensor<string, []>("input_51_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
88 |
+
tensor<int32, [2]> input_51_dilations_0 = const()[name = tensor<string, []>("input_51_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
89 |
+
tensor<int32, []> input_51_groups_0 = const()[name = tensor<string, []>("input_51_groups_0"), val = tensor<int32, []>(1)];
|
90 |
+
tensor<fp16, [64, 64, 3, 3]> const_21_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [27648]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(58880))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(86592))), name = tensor<string, []>("const_21_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
91 |
+
tensor<fp16, [64]> const_22_to_fp16 = const()[name = tensor<string, []>("const_22_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(86784)))];
|
92 |
+
tensor<fp16, [1, 64, 40, 499]> out_7_cast_fp16 = conv(bias = const_22_to_fp16, dilations = input_51_dilations_0, groups = input_51_groups_0, pad = input_51_pad_0, pad_type = input_51_pad_type_0, strides = input_51_strides_0, weight = const_21_to_fp16_palettized, x = input_49_cast_fp16)[name = tensor<string, []>("out_7_cast_fp16")];
|
93 |
+
tensor<string, []> input_53_pad_type_0 = const()[name = tensor<string, []>("input_53_pad_type_0"), val = tensor<string, []>("valid")];
|
94 |
+
tensor<int32, [2]> input_53_strides_0 = const()[name = tensor<string, []>("input_53_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
95 |
+
tensor<int32, [4]> input_53_pad_0 = const()[name = tensor<string, []>("input_53_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
96 |
+
tensor<int32, [2]> input_53_dilations_0 = const()[name = tensor<string, []>("input_53_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
97 |
+
tensor<int32, []> input_53_groups_0 = const()[name = tensor<string, []>("input_53_groups_0"), val = tensor<int32, []>(1)];
|
98 |
+
tensor<fp16, [64, 32, 1, 1]> const_23_to_fp16 = const()[name = tensor<string, []>("const_23_to_fp16"), val = tensor<fp16, [64, 32, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(86976)))];
|
99 |
+
tensor<fp16, [64]> const_24_to_fp16 = const()[name = tensor<string, []>("const_24_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91136)))];
|
100 |
+
tensor<fp16, [1, 64, 40, 499]> var_171_cast_fp16 = conv(bias = const_24_to_fp16, dilations = input_53_dilations_0, groups = input_53_groups_0, pad = input_53_pad_0, pad_type = input_53_pad_type_0, strides = input_53_strides_0, weight = const_23_to_fp16, x = input_43_cast_fp16)[name = tensor<string, []>("op_171_cast_fp16")];
|
101 |
+
tensor<fp16, [1, 64, 40, 499]> input_55_cast_fp16 = add(x = out_7_cast_fp16, y = var_171_cast_fp16)[name = tensor<string, []>("input_55_cast_fp16")];
|
102 |
+
tensor<fp16, [1, 64, 40, 499]> input_57_cast_fp16 = relu(x = input_55_cast_fp16)[name = tensor<string, []>("input_57_cast_fp16")];
|
103 |
+
tensor<string, []> input_59_pad_type_0 = const()[name = tensor<string, []>("input_59_pad_type_0"), val = tensor<string, []>("custom")];
|
104 |
+
tensor<int32, [4]> input_59_pad_0 = const()[name = tensor<string, []>("input_59_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
105 |
+
tensor<int32, [2]> input_59_strides_0 = const()[name = tensor<string, []>("input_59_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
106 |
+
tensor<int32, [2]> input_59_dilations_0 = const()[name = tensor<string, []>("input_59_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
107 |
+
tensor<int32, []> input_59_groups_0 = const()[name = tensor<string, []>("input_59_groups_0"), val = tensor<int32, []>(1)];
|
108 |
+
tensor<fp16, [64, 64, 3, 3]> const_25_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [27648]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(91328))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119040))), name = tensor<string, []>("const_25_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
109 |
+
tensor<fp16, [64]> const_26_to_fp16 = const()[name = tensor<string, []>("const_26_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119232)))];
|
110 |
+
tensor<fp16, [1, 64, 40, 499]> input_61_cast_fp16 = conv(bias = const_26_to_fp16, dilations = input_59_dilations_0, groups = input_59_groups_0, pad = input_59_pad_0, pad_type = input_59_pad_type_0, strides = input_59_strides_0, weight = const_25_to_fp16_palettized, x = input_57_cast_fp16)[name = tensor<string, []>("input_61_cast_fp16")];
|
111 |
+
tensor<fp16, [1, 64, 40, 499]> input_63_cast_fp16 = relu(x = input_61_cast_fp16)[name = tensor<string, []>("input_63_cast_fp16")];
|
112 |
+
tensor<string, []> input_65_pad_type_0 = const()[name = tensor<string, []>("input_65_pad_type_0"), val = tensor<string, []>("custom")];
|
113 |
+
tensor<int32, [4]> input_65_pad_0 = const()[name = tensor<string, []>("input_65_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
114 |
+
tensor<int32, [2]> input_65_strides_0 = const()[name = tensor<string, []>("input_65_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
115 |
+
tensor<int32, [2]> input_65_dilations_0 = const()[name = tensor<string, []>("input_65_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
116 |
+
tensor<int32, []> input_65_groups_0 = const()[name = tensor<string, []>("input_65_groups_0"), val = tensor<int32, []>(1)];
|
117 |
+
tensor<fp16, [64, 64, 3, 3]> const_27_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [27648]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(119424))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(147136))), name = tensor<string, []>("const_27_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
118 |
+
tensor<fp16, [64]> const_28_to_fp16 = const()[name = tensor<string, []>("const_28_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(147328)))];
|
119 |
+
tensor<fp16, [1, 64, 40, 499]> out_9_cast_fp16 = conv(bias = const_28_to_fp16, dilations = input_65_dilations_0, groups = input_65_groups_0, pad = input_65_pad_0, pad_type = input_65_pad_type_0, strides = input_65_strides_0, weight = const_27_to_fp16_palettized, x = input_63_cast_fp16)[name = tensor<string, []>("out_9_cast_fp16")];
|
120 |
+
tensor<fp16, [1, 64, 40, 499]> input_67_cast_fp16 = add(x = out_9_cast_fp16, y = input_57_cast_fp16)[name = tensor<string, []>("input_67_cast_fp16")];
|
121 |
+
tensor<fp16, [1, 64, 40, 499]> input_69_cast_fp16 = relu(x = input_67_cast_fp16)[name = tensor<string, []>("input_69_cast_fp16")];
|
122 |
+
tensor<string, []> input_71_pad_type_0 = const()[name = tensor<string, []>("input_71_pad_type_0"), val = tensor<string, []>("custom")];
|
123 |
+
tensor<int32, [4]> input_71_pad_0 = const()[name = tensor<string, []>("input_71_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
124 |
+
tensor<int32, [2]> input_71_strides_0 = const()[name = tensor<string, []>("input_71_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
125 |
+
tensor<int32, [2]> input_71_dilations_0 = const()[name = tensor<string, []>("input_71_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
126 |
+
tensor<int32, []> input_71_groups_0 = const()[name = tensor<string, []>("input_71_groups_0"), val = tensor<int32, []>(1)];
|
127 |
+
tensor<fp16, [64, 64, 3, 3]> const_29_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [27648]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(147520))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(175232))), name = tensor<string, []>("const_29_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
128 |
+
tensor<fp16, [64]> const_30_to_fp16 = const()[name = tensor<string, []>("const_30_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(175424)))];
|
129 |
+
tensor<fp16, [1, 64, 40, 499]> input_73_cast_fp16 = conv(bias = const_30_to_fp16, dilations = input_71_dilations_0, groups = input_71_groups_0, pad = input_71_pad_0, pad_type = input_71_pad_type_0, strides = input_71_strides_0, weight = const_29_to_fp16_palettized, x = input_69_cast_fp16)[name = tensor<string, []>("input_73_cast_fp16")];
|
130 |
+
tensor<fp16, [1, 64, 40, 499]> input_75_cast_fp16 = relu(x = input_73_cast_fp16)[name = tensor<string, []>("input_75_cast_fp16")];
|
131 |
+
tensor<string, []> input_77_pad_type_0 = const()[name = tensor<string, []>("input_77_pad_type_0"), val = tensor<string, []>("custom")];
|
132 |
+
tensor<int32, [4]> input_77_pad_0 = const()[name = tensor<string, []>("input_77_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
133 |
+
tensor<int32, [2]> input_77_strides_0 = const()[name = tensor<string, []>("input_77_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
134 |
+
tensor<int32, [2]> input_77_dilations_0 = const()[name = tensor<string, []>("input_77_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
135 |
+
tensor<int32, []> input_77_groups_0 = const()[name = tensor<string, []>("input_77_groups_0"), val = tensor<int32, []>(1)];
|
136 |
+
tensor<fp16, [64, 64, 3, 3]> const_31_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [27648]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(175616))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(203328))), name = tensor<string, []>("const_31_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
137 |
+
tensor<fp16, [64]> const_32_to_fp16 = const()[name = tensor<string, []>("const_32_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(203520)))];
|
138 |
+
tensor<fp16, [1, 64, 40, 499]> out_11_cast_fp16 = conv(bias = const_32_to_fp16, dilations = input_77_dilations_0, groups = input_77_groups_0, pad = input_77_pad_0, pad_type = input_77_pad_type_0, strides = input_77_strides_0, weight = const_31_to_fp16_palettized, x = input_75_cast_fp16)[name = tensor<string, []>("out_11_cast_fp16")];
|
139 |
+
tensor<fp16, [1, 64, 40, 499]> input_79_cast_fp16 = add(x = out_11_cast_fp16, y = input_69_cast_fp16)[name = tensor<string, []>("input_79_cast_fp16")];
|
140 |
+
tensor<fp16, [1, 64, 40, 499]> input_81_cast_fp16 = relu(x = input_79_cast_fp16)[name = tensor<string, []>("input_81_cast_fp16")];
|
141 |
+
tensor<string, []> input_83_pad_type_0 = const()[name = tensor<string, []>("input_83_pad_type_0"), val = tensor<string, []>("custom")];
|
142 |
+
tensor<int32, [4]> input_83_pad_0 = const()[name = tensor<string, []>("input_83_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
143 |
+
tensor<int32, [2]> input_83_strides_0 = const()[name = tensor<string, []>("input_83_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
144 |
+
tensor<int32, [2]> input_83_dilations_0 = const()[name = tensor<string, []>("input_83_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
145 |
+
tensor<int32, []> input_83_groups_0 = const()[name = tensor<string, []>("input_83_groups_0"), val = tensor<int32, []>(1)];
|
146 |
+
tensor<fp16, [64, 64, 3, 3]> const_33_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [27648]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(203712))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(231424))), name = tensor<string, []>("const_33_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
147 |
+
tensor<fp16, [64]> const_34_to_fp16 = const()[name = tensor<string, []>("const_34_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(231616)))];
|
148 |
+
tensor<fp16, [1, 64, 40, 499]> input_85_cast_fp16 = conv(bias = const_34_to_fp16, dilations = input_83_dilations_0, groups = input_83_groups_0, pad = input_83_pad_0, pad_type = input_83_pad_type_0, strides = input_83_strides_0, weight = const_33_to_fp16_palettized, x = input_81_cast_fp16)[name = tensor<string, []>("input_85_cast_fp16")];
|
149 |
+
tensor<fp16, [1, 64, 40, 499]> input_87_cast_fp16 = relu(x = input_85_cast_fp16)[name = tensor<string, []>("input_87_cast_fp16")];
|
150 |
+
tensor<string, []> input_89_pad_type_0 = const()[name = tensor<string, []>("input_89_pad_type_0"), val = tensor<string, []>("custom")];
|
151 |
+
tensor<int32, [4]> input_89_pad_0 = const()[name = tensor<string, []>("input_89_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
152 |
+
tensor<int32, [2]> input_89_strides_0 = const()[name = tensor<string, []>("input_89_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
153 |
+
tensor<int32, [2]> input_89_dilations_0 = const()[name = tensor<string, []>("input_89_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
154 |
+
tensor<int32, []> input_89_groups_0 = const()[name = tensor<string, []>("input_89_groups_0"), val = tensor<int32, []>(1)];
|
155 |
+
tensor<fp16, [64, 64, 3, 3]> const_35_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [27648]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(231808))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(259520))), name = tensor<string, []>("const_35_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
156 |
+
tensor<fp16, [64]> const_36_to_fp16 = const()[name = tensor<string, []>("const_36_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(259712)))];
|
157 |
+
tensor<fp16, [1, 64, 40, 499]> out_13_cast_fp16 = conv(bias = const_36_to_fp16, dilations = input_89_dilations_0, groups = input_89_groups_0, pad = input_89_pad_0, pad_type = input_89_pad_type_0, strides = input_89_strides_0, weight = const_35_to_fp16_palettized, x = input_87_cast_fp16)[name = tensor<string, []>("out_13_cast_fp16")];
|
158 |
+
tensor<fp16, [1, 64, 40, 499]> input_91_cast_fp16 = add(x = out_13_cast_fp16, y = input_81_cast_fp16)[name = tensor<string, []>("input_91_cast_fp16")];
|
159 |
+
tensor<fp16, [1, 64, 40, 499]> input_93_cast_fp16 = relu(x = input_91_cast_fp16)[name = tensor<string, []>("input_93_cast_fp16")];
|
160 |
+
tensor<string, []> input_95_pad_type_0 = const()[name = tensor<string, []>("input_95_pad_type_0"), val = tensor<string, []>("custom")];
|
161 |
+
tensor<int32, [4]> input_95_pad_0 = const()[name = tensor<string, []>("input_95_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
162 |
+
tensor<int32, [2]> input_95_strides_0 = const()[name = tensor<string, []>("input_95_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
163 |
+
tensor<int32, [2]> input_95_dilations_0 = const()[name = tensor<string, []>("input_95_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
164 |
+
tensor<int32, []> input_95_groups_0 = const()[name = tensor<string, []>("input_95_groups_0"), val = tensor<int32, []>(1)];
|
165 |
+
tensor<fp16, [128, 64, 3, 3]> const_37_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [55296]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(259904))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(315264))), name = tensor<string, []>("const_37_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 64, 3, 3])];
|
166 |
+
tensor<fp16, [128]> const_38_to_fp16 = const()[name = tensor<string, []>("const_38_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(315456)))];
|
167 |
+
tensor<fp16, [1, 128, 20, 250]> input_97_cast_fp16 = conv(bias = const_38_to_fp16, dilations = input_95_dilations_0, groups = input_95_groups_0, pad = input_95_pad_0, pad_type = input_95_pad_type_0, strides = input_95_strides_0, weight = const_37_to_fp16_palettized, x = input_93_cast_fp16)[name = tensor<string, []>("input_97_cast_fp16")];
|
168 |
+
tensor<fp16, [1, 128, 20, 250]> input_99_cast_fp16 = relu(x = input_97_cast_fp16)[name = tensor<string, []>("input_99_cast_fp16")];
|
169 |
+
tensor<string, []> input_101_pad_type_0 = const()[name = tensor<string, []>("input_101_pad_type_0"), val = tensor<string, []>("custom")];
|
170 |
+
tensor<int32, [4]> input_101_pad_0 = const()[name = tensor<string, []>("input_101_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
171 |
+
tensor<int32, [2]> input_101_strides_0 = const()[name = tensor<string, []>("input_101_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
172 |
+
tensor<int32, [2]> input_101_dilations_0 = const()[name = tensor<string, []>("input_101_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
173 |
+
tensor<int32, []> input_101_groups_0 = const()[name = tensor<string, []>("input_101_groups_0"), val = tensor<int32, []>(1)];
|
174 |
+
tensor<fp16, [128, 128, 3, 3]> const_39_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [110592]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(315776))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(426432))), name = tensor<string, []>("const_39_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
175 |
+
tensor<fp16, [128]> const_40_to_fp16 = const()[name = tensor<string, []>("const_40_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(426624)))];
|
176 |
+
tensor<fp16, [1, 128, 20, 250]> out_15_cast_fp16 = conv(bias = const_40_to_fp16, dilations = input_101_dilations_0, groups = input_101_groups_0, pad = input_101_pad_0, pad_type = input_101_pad_type_0, strides = input_101_strides_0, weight = const_39_to_fp16_palettized, x = input_99_cast_fp16)[name = tensor<string, []>("out_15_cast_fp16")];
|
177 |
+
tensor<string, []> input_103_pad_type_0 = const()[name = tensor<string, []>("input_103_pad_type_0"), val = tensor<string, []>("valid")];
|
178 |
+
tensor<int32, [2]> input_103_strides_0 = const()[name = tensor<string, []>("input_103_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
179 |
+
tensor<int32, [4]> input_103_pad_0 = const()[name = tensor<string, []>("input_103_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
180 |
+
tensor<int32, [2]> input_103_dilations_0 = const()[name = tensor<string, []>("input_103_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
181 |
+
tensor<int32, []> input_103_groups_0 = const()[name = tensor<string, []>("input_103_groups_0"), val = tensor<int32, []>(1)];
|
182 |
+
tensor<fp16, [128, 64, 1, 1]> const_41_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [6144]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(426944))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(433152))), name = tensor<string, []>("const_41_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 64, 1, 1])];
|
183 |
+
tensor<fp16, [128]> const_42_to_fp16 = const()[name = tensor<string, []>("const_42_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(433344)))];
|
184 |
+
tensor<fp16, [1, 128, 20, 250]> var_307_cast_fp16 = conv(bias = const_42_to_fp16, dilations = input_103_dilations_0, groups = input_103_groups_0, pad = input_103_pad_0, pad_type = input_103_pad_type_0, strides = input_103_strides_0, weight = const_41_to_fp16_palettized, x = input_93_cast_fp16)[name = tensor<string, []>("op_307_cast_fp16")];
|
185 |
+
tensor<fp16, [1, 128, 20, 250]> input_105_cast_fp16 = add(x = out_15_cast_fp16, y = var_307_cast_fp16)[name = tensor<string, []>("input_105_cast_fp16")];
|
186 |
+
tensor<fp16, [1, 128, 20, 250]> input_107_cast_fp16 = relu(x = input_105_cast_fp16)[name = tensor<string, []>("input_107_cast_fp16")];
|
187 |
+
tensor<string, []> input_109_pad_type_0 = const()[name = tensor<string, []>("input_109_pad_type_0"), val = tensor<string, []>("custom")];
|
188 |
+
tensor<int32, [4]> input_109_pad_0 = const()[name = tensor<string, []>("input_109_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
189 |
+
tensor<int32, [2]> input_109_strides_0 = const()[name = tensor<string, []>("input_109_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
190 |
+
tensor<int32, [2]> input_109_dilations_0 = const()[name = tensor<string, []>("input_109_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
191 |
+
tensor<int32, []> input_109_groups_0 = const()[name = tensor<string, []>("input_109_groups_0"), val = tensor<int32, []>(1)];
|
192 |
+
tensor<fp16, [128, 128, 3, 3]> const_43_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [110592]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(433664))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(544320))), name = tensor<string, []>("const_43_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
193 |
+
tensor<fp16, [128]> const_44_to_fp16 = const()[name = tensor<string, []>("const_44_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(544512)))];
|
194 |
+
tensor<fp16, [1, 128, 20, 250]> input_111_cast_fp16 = conv(bias = const_44_to_fp16, dilations = input_109_dilations_0, groups = input_109_groups_0, pad = input_109_pad_0, pad_type = input_109_pad_type_0, strides = input_109_strides_0, weight = const_43_to_fp16_palettized, x = input_107_cast_fp16)[name = tensor<string, []>("input_111_cast_fp16")];
|
195 |
+
tensor<fp16, [1, 128, 20, 250]> input_113_cast_fp16 = relu(x = input_111_cast_fp16)[name = tensor<string, []>("input_113_cast_fp16")];
|
196 |
+
tensor<string, []> input_115_pad_type_0 = const()[name = tensor<string, []>("input_115_pad_type_0"), val = tensor<string, []>("custom")];
|
197 |
+
tensor<int32, [4]> input_115_pad_0 = const()[name = tensor<string, []>("input_115_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
198 |
+
tensor<int32, [2]> input_115_strides_0 = const()[name = tensor<string, []>("input_115_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
199 |
+
tensor<int32, [2]> input_115_dilations_0 = const()[name = tensor<string, []>("input_115_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
200 |
+
tensor<int32, []> input_115_groups_0 = const()[name = tensor<string, []>("input_115_groups_0"), val = tensor<int32, []>(1)];
|
201 |
+
tensor<fp16, [128, 128, 3, 3]> const_45_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [110592]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(544832))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(655488))), name = tensor<string, []>("const_45_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
202 |
+
tensor<fp16, [128]> const_46_to_fp16 = const()[name = tensor<string, []>("const_46_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(655680)))];
|
203 |
+
tensor<fp16, [1, 128, 20, 250]> out_17_cast_fp16 = conv(bias = const_46_to_fp16, dilations = input_115_dilations_0, groups = input_115_groups_0, pad = input_115_pad_0, pad_type = input_115_pad_type_0, strides = input_115_strides_0, weight = const_45_to_fp16_palettized, x = input_113_cast_fp16)[name = tensor<string, []>("out_17_cast_fp16")];
|
204 |
+
tensor<fp16, [1, 128, 20, 250]> input_117_cast_fp16 = add(x = out_17_cast_fp16, y = input_107_cast_fp16)[name = tensor<string, []>("input_117_cast_fp16")];
|
205 |
+
tensor<fp16, [1, 128, 20, 250]> input_119_cast_fp16 = relu(x = input_117_cast_fp16)[name = tensor<string, []>("input_119_cast_fp16")];
|
206 |
+
tensor<string, []> input_121_pad_type_0 = const()[name = tensor<string, []>("input_121_pad_type_0"), val = tensor<string, []>("custom")];
|
207 |
+
tensor<int32, [4]> input_121_pad_0 = const()[name = tensor<string, []>("input_121_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
208 |
+
tensor<int32, [2]> input_121_strides_0 = const()[name = tensor<string, []>("input_121_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
209 |
+
tensor<int32, [2]> input_121_dilations_0 = const()[name = tensor<string, []>("input_121_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
210 |
+
tensor<int32, []> input_121_groups_0 = const()[name = tensor<string, []>("input_121_groups_0"), val = tensor<int32, []>(1)];
|
211 |
+
tensor<fp16, [128, 128, 3, 3]> const_47_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [110592]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(656000))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(766656))), name = tensor<string, []>("const_47_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
212 |
+
tensor<fp16, [128]> const_48_to_fp16 = const()[name = tensor<string, []>("const_48_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(766848)))];
|
213 |
+
tensor<fp16, [1, 128, 20, 250]> input_123_cast_fp16 = conv(bias = const_48_to_fp16, dilations = input_121_dilations_0, groups = input_121_groups_0, pad = input_121_pad_0, pad_type = input_121_pad_type_0, strides = input_121_strides_0, weight = const_47_to_fp16_palettized, x = input_119_cast_fp16)[name = tensor<string, []>("input_123_cast_fp16")];
|
214 |
+
tensor<fp16, [1, 128, 20, 250]> input_125_cast_fp16 = relu(x = input_123_cast_fp16)[name = tensor<string, []>("input_125_cast_fp16")];
|
215 |
+
tensor<string, []> input_127_pad_type_0 = const()[name = tensor<string, []>("input_127_pad_type_0"), val = tensor<string, []>("custom")];
|
216 |
+
tensor<int32, [4]> input_127_pad_0 = const()[name = tensor<string, []>("input_127_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
217 |
+
tensor<int32, [2]> input_127_strides_0 = const()[name = tensor<string, []>("input_127_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
218 |
+
tensor<int32, [2]> input_127_dilations_0 = const()[name = tensor<string, []>("input_127_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
219 |
+
tensor<int32, []> input_127_groups_0 = const()[name = tensor<string, []>("input_127_groups_0"), val = tensor<int32, []>(1)];
|
220 |
+
tensor<fp16, [128, 128, 3, 3]> const_49_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [110592]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(767168))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(877824))), name = tensor<string, []>("const_49_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
221 |
+
tensor<fp16, [128]> const_50_to_fp16 = const()[name = tensor<string, []>("const_50_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(878016)))];
|
222 |
+
tensor<fp16, [1, 128, 20, 250]> out_19_cast_fp16 = conv(bias = const_50_to_fp16, dilations = input_127_dilations_0, groups = input_127_groups_0, pad = input_127_pad_0, pad_type = input_127_pad_type_0, strides = input_127_strides_0, weight = const_49_to_fp16_palettized, x = input_125_cast_fp16)[name = tensor<string, []>("out_19_cast_fp16")];
|
223 |
+
tensor<fp16, [1, 128, 20, 250]> input_129_cast_fp16 = add(x = out_19_cast_fp16, y = input_119_cast_fp16)[name = tensor<string, []>("input_129_cast_fp16")];
|
224 |
+
tensor<fp16, [1, 128, 20, 250]> input_131_cast_fp16 = relu(x = input_129_cast_fp16)[name = tensor<string, []>("input_131_cast_fp16")];
|
225 |
+
tensor<string, []> input_133_pad_type_0 = const()[name = tensor<string, []>("input_133_pad_type_0"), val = tensor<string, []>("custom")];
|
226 |
+
tensor<int32, [4]> input_133_pad_0 = const()[name = tensor<string, []>("input_133_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
227 |
+
tensor<int32, [2]> input_133_strides_0 = const()[name = tensor<string, []>("input_133_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
228 |
+
tensor<int32, [2]> input_133_dilations_0 = const()[name = tensor<string, []>("input_133_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
229 |
+
tensor<int32, []> input_133_groups_0 = const()[name = tensor<string, []>("input_133_groups_0"), val = tensor<int32, []>(1)];
|
230 |
+
tensor<fp16, [128, 128, 3, 3]> const_51_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [110592]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(878336))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(988992))), name = tensor<string, []>("const_51_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
231 |
+
tensor<fp16, [128]> const_52_to_fp16 = const()[name = tensor<string, []>("const_52_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(989184)))];
|
232 |
+
tensor<fp16, [1, 128, 20, 250]> input_135_cast_fp16 = conv(bias = const_52_to_fp16, dilations = input_133_dilations_0, groups = input_133_groups_0, pad = input_133_pad_0, pad_type = input_133_pad_type_0, strides = input_133_strides_0, weight = const_51_to_fp16_palettized, x = input_131_cast_fp16)[name = tensor<string, []>("input_135_cast_fp16")];
|
233 |
+
tensor<fp16, [1, 128, 20, 250]> input_137_cast_fp16 = relu(x = input_135_cast_fp16)[name = tensor<string, []>("input_137_cast_fp16")];
|
234 |
+
tensor<string, []> input_139_pad_type_0 = const()[name = tensor<string, []>("input_139_pad_type_0"), val = tensor<string, []>("custom")];
|
235 |
+
tensor<int32, [4]> input_139_pad_0 = const()[name = tensor<string, []>("input_139_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
236 |
+
tensor<int32, [2]> input_139_strides_0 = const()[name = tensor<string, []>("input_139_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
237 |
+
tensor<int32, [2]> input_139_dilations_0 = const()[name = tensor<string, []>("input_139_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
238 |
+
tensor<int32, []> input_139_groups_0 = const()[name = tensor<string, []>("input_139_groups_0"), val = tensor<int32, []>(1)];
|
239 |
+
tensor<fp16, [128, 128, 3, 3]> const_53_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [110592]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(989504))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1100160))), name = tensor<string, []>("const_53_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
240 |
+
tensor<fp16, [128]> const_54_to_fp16 = const()[name = tensor<string, []>("const_54_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1100352)))];
|
241 |
+
tensor<fp16, [1, 128, 20, 250]> out_21_cast_fp16 = conv(bias = const_54_to_fp16, dilations = input_139_dilations_0, groups = input_139_groups_0, pad = input_139_pad_0, pad_type = input_139_pad_type_0, strides = input_139_strides_0, weight = const_53_to_fp16_palettized, x = input_137_cast_fp16)[name = tensor<string, []>("out_21_cast_fp16")];
|
242 |
+
tensor<fp16, [1, 128, 20, 250]> input_141_cast_fp16 = add(x = out_21_cast_fp16, y = input_131_cast_fp16)[name = tensor<string, []>("input_141_cast_fp16")];
|
243 |
+
tensor<fp16, [1, 128, 20, 250]> input_143_cast_fp16 = relu(x = input_141_cast_fp16)[name = tensor<string, []>("input_143_cast_fp16")];
|
244 |
+
tensor<string, []> input_145_pad_type_0 = const()[name = tensor<string, []>("input_145_pad_type_0"), val = tensor<string, []>("custom")];
|
245 |
+
tensor<int32, [4]> input_145_pad_0 = const()[name = tensor<string, []>("input_145_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
246 |
+
tensor<int32, [2]> input_145_strides_0 = const()[name = tensor<string, []>("input_145_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
247 |
+
tensor<int32, [2]> input_145_dilations_0 = const()[name = tensor<string, []>("input_145_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
248 |
+
tensor<int32, []> input_145_groups_0 = const()[name = tensor<string, []>("input_145_groups_0"), val = tensor<int32, []>(1)];
|
249 |
+
tensor<fp16, [128, 128, 3, 3]> const_55_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [110592]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1100672))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1211328))), name = tensor<string, []>("const_55_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
250 |
+
tensor<fp16, [128]> const_56_to_fp16 = const()[name = tensor<string, []>("const_56_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1211520)))];
|
251 |
+
tensor<fp16, [1, 128, 20, 250]> input_147_cast_fp16 = conv(bias = const_56_to_fp16, dilations = input_145_dilations_0, groups = input_145_groups_0, pad = input_145_pad_0, pad_type = input_145_pad_type_0, strides = input_145_strides_0, weight = const_55_to_fp16_palettized, x = input_143_cast_fp16)[name = tensor<string, []>("input_147_cast_fp16")];
|
252 |
+
tensor<fp16, [1, 128, 20, 250]> input_149_cast_fp16 = relu(x = input_147_cast_fp16)[name = tensor<string, []>("input_149_cast_fp16")];
|
253 |
+
tensor<string, []> input_151_pad_type_0 = const()[name = tensor<string, []>("input_151_pad_type_0"), val = tensor<string, []>("custom")];
|
254 |
+
tensor<int32, [4]> input_151_pad_0 = const()[name = tensor<string, []>("input_151_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
255 |
+
tensor<int32, [2]> input_151_strides_0 = const()[name = tensor<string, []>("input_151_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
256 |
+
tensor<int32, [2]> input_151_dilations_0 = const()[name = tensor<string, []>("input_151_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
257 |
+
tensor<int32, []> input_151_groups_0 = const()[name = tensor<string, []>("input_151_groups_0"), val = tensor<int32, []>(1)];
|
258 |
+
tensor<fp16, [128, 128, 3, 3]> const_57_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [110592]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1211840))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1322496))), name = tensor<string, []>("const_57_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
259 |
+
tensor<fp16, [128]> const_58_to_fp16 = const()[name = tensor<string, []>("const_58_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1322688)))];
|
260 |
+
tensor<fp16, [1, 128, 20, 250]> out_23_cast_fp16 = conv(bias = const_58_to_fp16, dilations = input_151_dilations_0, groups = input_151_groups_0, pad = input_151_pad_0, pad_type = input_151_pad_type_0, strides = input_151_strides_0, weight = const_57_to_fp16_palettized, x = input_149_cast_fp16)[name = tensor<string, []>("out_23_cast_fp16")];
|
261 |
+
tensor<fp16, [1, 128, 20, 250]> input_153_cast_fp16 = add(x = out_23_cast_fp16, y = input_143_cast_fp16)[name = tensor<string, []>("input_153_cast_fp16")];
|
262 |
+
tensor<fp16, [1, 128, 20, 250]> input_155_cast_fp16 = relu(x = input_153_cast_fp16)[name = tensor<string, []>("input_155_cast_fp16")];
|
263 |
+
tensor<string, []> input_157_pad_type_0 = const()[name = tensor<string, []>("input_157_pad_type_0"), val = tensor<string, []>("custom")];
|
264 |
+
tensor<int32, [4]> input_157_pad_0 = const()[name = tensor<string, []>("input_157_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
265 |
+
tensor<int32, [2]> input_157_strides_0 = const()[name = tensor<string, []>("input_157_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
266 |
+
tensor<int32, [2]> input_157_dilations_0 = const()[name = tensor<string, []>("input_157_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
267 |
+
tensor<int32, []> input_157_groups_0 = const()[name = tensor<string, []>("input_157_groups_0"), val = tensor<int32, []>(1)];
|
268 |
+
tensor<fp16, [128, 128, 3, 3]> const_59_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [110592]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1323008))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1433664))), name = tensor<string, []>("const_59_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
269 |
+
tensor<fp16, [128]> const_60_to_fp16 = const()[name = tensor<string, []>("const_60_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1433856)))];
|
270 |
+
tensor<fp16, [1, 128, 20, 250]> input_159_cast_fp16 = conv(bias = const_60_to_fp16, dilations = input_157_dilations_0, groups = input_157_groups_0, pad = input_157_pad_0, pad_type = input_157_pad_type_0, strides = input_157_strides_0, weight = const_59_to_fp16_palettized, x = input_155_cast_fp16)[name = tensor<string, []>("input_159_cast_fp16")];
|
271 |
+
tensor<fp16, [1, 128, 20, 250]> input_161_cast_fp16 = relu(x = input_159_cast_fp16)[name = tensor<string, []>("input_161_cast_fp16")];
|
272 |
+
tensor<string, []> input_163_pad_type_0 = const()[name = tensor<string, []>("input_163_pad_type_0"), val = tensor<string, []>("custom")];
|
273 |
+
tensor<int32, [4]> input_163_pad_0 = const()[name = tensor<string, []>("input_163_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
274 |
+
tensor<int32, [2]> input_163_strides_0 = const()[name = tensor<string, []>("input_163_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
275 |
+
tensor<int32, [2]> input_163_dilations_0 = const()[name = tensor<string, []>("input_163_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
276 |
+
tensor<int32, []> input_163_groups_0 = const()[name = tensor<string, []>("input_163_groups_0"), val = tensor<int32, []>(1)];
|
277 |
+
tensor<fp16, [128, 128, 3, 3]> const_61_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [110592]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1434176))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1544832))), name = tensor<string, []>("const_61_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
278 |
+
tensor<fp16, [128]> const_62_to_fp16 = const()[name = tensor<string, []>("const_62_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1545024)))];
|
279 |
+
tensor<fp16, [1, 128, 20, 250]> out_25_cast_fp16 = conv(bias = const_62_to_fp16, dilations = input_163_dilations_0, groups = input_163_groups_0, pad = input_163_pad_0, pad_type = input_163_pad_type_0, strides = input_163_strides_0, weight = const_61_to_fp16_palettized, x = input_161_cast_fp16)[name = tensor<string, []>("out_25_cast_fp16")];
|
280 |
+
tensor<fp16, [1, 128, 20, 250]> input_165_cast_fp16 = add(x = out_25_cast_fp16, y = input_155_cast_fp16)[name = tensor<string, []>("input_165_cast_fp16")];
|
281 |
+
tensor<fp16, [1, 128, 20, 250]> input_167_cast_fp16 = relu(x = input_165_cast_fp16)[name = tensor<string, []>("input_167_cast_fp16")];
|
282 |
+
tensor<string, []> input_169_pad_type_0 = const()[name = tensor<string, []>("input_169_pad_type_0"), val = tensor<string, []>("custom")];
|
283 |
+
tensor<int32, [4]> input_169_pad_0 = const()[name = tensor<string, []>("input_169_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
284 |
+
tensor<int32, [2]> input_169_strides_0 = const()[name = tensor<string, []>("input_169_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
285 |
+
tensor<int32, [2]> input_169_dilations_0 = const()[name = tensor<string, []>("input_169_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
286 |
+
tensor<int32, []> input_169_groups_0 = const()[name = tensor<string, []>("input_169_groups_0"), val = tensor<int32, []>(1)];
|
287 |
+
tensor<fp16, [256, 128, 3, 3]> const_63_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [221184]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1545344))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1766592))), name = tensor<string, []>("const_63_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 128, 3, 3])];
|
288 |
+
tensor<fp16, [256]> const_64_to_fp16 = const()[name = tensor<string, []>("const_64_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1766784)))];
|
289 |
+
tensor<fp16, [1, 256, 10, 125]> input_171_cast_fp16 = conv(bias = const_64_to_fp16, dilations = input_169_dilations_0, groups = input_169_groups_0, pad = input_169_pad_0, pad_type = input_169_pad_type_0, strides = input_169_strides_0, weight = const_63_to_fp16_palettized, x = input_167_cast_fp16)[name = tensor<string, []>("input_171_cast_fp16")];
|
290 |
+
tensor<fp16, [1, 256, 10, 125]> input_173_cast_fp16 = relu(x = input_171_cast_fp16)[name = tensor<string, []>("input_173_cast_fp16")];
|
291 |
+
tensor<string, []> input_175_pad_type_0 = const()[name = tensor<string, []>("input_175_pad_type_0"), val = tensor<string, []>("custom")];
|
292 |
+
tensor<int32, [4]> input_175_pad_0 = const()[name = tensor<string, []>("input_175_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
293 |
+
tensor<int32, [2]> input_175_strides_0 = const()[name = tensor<string, []>("input_175_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
294 |
+
tensor<int32, [2]> input_175_dilations_0 = const()[name = tensor<string, []>("input_175_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
295 |
+
tensor<int32, []> input_175_groups_0 = const()[name = tensor<string, []>("input_175_groups_0"), val = tensor<int32, []>(1)];
|
296 |
+
tensor<fp16, [256, 256, 3, 3]> const_65_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1767360))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2209792))), name = tensor<string, []>("const_65_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 256, 3, 3])];
|
297 |
+
tensor<fp16, [256]> const_66_to_fp16 = const()[name = tensor<string, []>("const_66_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2209984)))];
|
298 |
+
tensor<fp16, [1, 256, 10, 125]> out_27_cast_fp16 = conv(bias = const_66_to_fp16, dilations = input_175_dilations_0, groups = input_175_groups_0, pad = input_175_pad_0, pad_type = input_175_pad_type_0, strides = input_175_strides_0, weight = const_65_to_fp16_palettized, x = input_173_cast_fp16)[name = tensor<string, []>("out_27_cast_fp16")];
|
299 |
+
tensor<string, []> input_177_pad_type_0 = const()[name = tensor<string, []>("input_177_pad_type_0"), val = tensor<string, []>("valid")];
|
300 |
+
tensor<int32, [2]> input_177_strides_0 = const()[name = tensor<string, []>("input_177_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
301 |
+
tensor<int32, [4]> input_177_pad_0 = const()[name = tensor<string, []>("input_177_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
302 |
+
tensor<int32, [2]> input_177_dilations_0 = const()[name = tensor<string, []>("input_177_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
303 |
+
tensor<int32, []> input_177_groups_0 = const()[name = tensor<string, []>("input_177_groups_0"), val = tensor<int32, []>(1)];
|
304 |
+
tensor<fp16, [256, 128, 1, 1]> const_67_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [24576]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2210560))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2235200))), name = tensor<string, []>("const_67_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 128, 1, 1])];
|
305 |
+
tensor<fp16, [256]> const_68_to_fp16 = const()[name = tensor<string, []>("const_68_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2235392)))];
|
306 |
+
tensor<fp16, [1, 256, 10, 125]> var_498_cast_fp16 = conv(bias = const_68_to_fp16, dilations = input_177_dilations_0, groups = input_177_groups_0, pad = input_177_pad_0, pad_type = input_177_pad_type_0, strides = input_177_strides_0, weight = const_67_to_fp16_palettized, x = input_167_cast_fp16)[name = tensor<string, []>("op_498_cast_fp16")];
|
307 |
+
tensor<fp16, [1, 256, 10, 125]> input_179_cast_fp16 = add(x = out_27_cast_fp16, y = var_498_cast_fp16)[name = tensor<string, []>("input_179_cast_fp16")];
|
308 |
+
tensor<fp16, [1, 256, 10, 125]> input_181_cast_fp16 = relu(x = input_179_cast_fp16)[name = tensor<string, []>("input_181_cast_fp16")];
|
309 |
+
tensor<string, []> input_183_pad_type_0 = const()[name = tensor<string, []>("input_183_pad_type_0"), val = tensor<string, []>("custom")];
|
310 |
+
tensor<int32, [4]> input_183_pad_0 = const()[name = tensor<string, []>("input_183_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
311 |
+
tensor<int32, [2]> input_183_strides_0 = const()[name = tensor<string, []>("input_183_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
312 |
+
tensor<int32, [2]> input_183_dilations_0 = const()[name = tensor<string, []>("input_183_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
313 |
+
tensor<int32, []> input_183_groups_0 = const()[name = tensor<string, []>("input_183_groups_0"), val = tensor<int32, []>(1)];
|
314 |
+
tensor<fp16, [256, 256, 3, 3]> const_69_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2235968))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2678400))), name = tensor<string, []>("const_69_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 256, 3, 3])];
|
315 |
+
tensor<fp16, [256]> const_70_to_fp16 = const()[name = tensor<string, []>("const_70_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2678592)))];
|
316 |
+
tensor<fp16, [1, 256, 10, 125]> input_185_cast_fp16 = conv(bias = const_70_to_fp16, dilations = input_183_dilations_0, groups = input_183_groups_0, pad = input_183_pad_0, pad_type = input_183_pad_type_0, strides = input_183_strides_0, weight = const_69_to_fp16_palettized, x = input_181_cast_fp16)[name = tensor<string, []>("input_185_cast_fp16")];
|
317 |
+
tensor<fp16, [1, 256, 10, 125]> input_187_cast_fp16 = relu(x = input_185_cast_fp16)[name = tensor<string, []>("input_187_cast_fp16")];
|
318 |
+
tensor<string, []> input_189_pad_type_0 = const()[name = tensor<string, []>("input_189_pad_type_0"), val = tensor<string, []>("custom")];
|
319 |
+
tensor<int32, [4]> input_189_pad_0 = const()[name = tensor<string, []>("input_189_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
320 |
+
tensor<int32, [2]> input_189_strides_0 = const()[name = tensor<string, []>("input_189_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
321 |
+
tensor<int32, [2]> input_189_dilations_0 = const()[name = tensor<string, []>("input_189_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
322 |
+
tensor<int32, []> input_189_groups_0 = const()[name = tensor<string, []>("input_189_groups_0"), val = tensor<int32, []>(1)];
|
323 |
+
tensor<fp16, [256, 256, 3, 3]> const_71_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2679168))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3121600))), name = tensor<string, []>("const_71_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 256, 3, 3])];
|
324 |
+
tensor<fp16, [256]> const_72_to_fp16 = const()[name = tensor<string, []>("const_72_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3121792)))];
|
325 |
+
tensor<fp16, [1, 256, 10, 125]> out_29_cast_fp16 = conv(bias = const_72_to_fp16, dilations = input_189_dilations_0, groups = input_189_groups_0, pad = input_189_pad_0, pad_type = input_189_pad_type_0, strides = input_189_strides_0, weight = const_71_to_fp16_palettized, x = input_187_cast_fp16)[name = tensor<string, []>("out_29_cast_fp16")];
|
326 |
+
tensor<fp16, [1, 256, 10, 125]> input_191_cast_fp16 = add(x = out_29_cast_fp16, y = input_181_cast_fp16)[name = tensor<string, []>("input_191_cast_fp16")];
|
327 |
+
tensor<fp16, [1, 256, 10, 125]> input_193_cast_fp16 = relu(x = input_191_cast_fp16)[name = tensor<string, []>("input_193_cast_fp16")];
|
328 |
+
tensor<string, []> input_195_pad_type_0 = const()[name = tensor<string, []>("input_195_pad_type_0"), val = tensor<string, []>("custom")];
|
329 |
+
tensor<int32, [4]> input_195_pad_0 = const()[name = tensor<string, []>("input_195_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
330 |
+
tensor<int32, [2]> input_195_strides_0 = const()[name = tensor<string, []>("input_195_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
331 |
+
tensor<int32, [2]> input_195_dilations_0 = const()[name = tensor<string, []>("input_195_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
332 |
+
tensor<int32, []> input_195_groups_0 = const()[name = tensor<string, []>("input_195_groups_0"), val = tensor<int32, []>(1)];
|
333 |
+
tensor<fp16, [256, 256, 3, 3]> const_73_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3122368))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3564800))), name = tensor<string, []>("const_73_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 256, 3, 3])];
|
334 |
+
tensor<fp16, [256]> const_74_to_fp16 = const()[name = tensor<string, []>("const_74_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3564992)))];
|
335 |
+
tensor<fp16, [1, 256, 10, 125]> input_197_cast_fp16 = conv(bias = const_74_to_fp16, dilations = input_195_dilations_0, groups = input_195_groups_0, pad = input_195_pad_0, pad_type = input_195_pad_type_0, strides = input_195_strides_0, weight = const_73_to_fp16_palettized, x = input_193_cast_fp16)[name = tensor<string, []>("input_197_cast_fp16")];
|
336 |
+
tensor<fp16, [1, 256, 10, 125]> input_199_cast_fp16 = relu(x = input_197_cast_fp16)[name = tensor<string, []>("input_199_cast_fp16")];
|
337 |
+
tensor<string, []> input_201_pad_type_0 = const()[name = tensor<string, []>("input_201_pad_type_0"), val = tensor<string, []>("custom")];
|
338 |
+
tensor<int32, [4]> input_201_pad_0 = const()[name = tensor<string, []>("input_201_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
339 |
+
tensor<int32, [2]> input_201_strides_0 = const()[name = tensor<string, []>("input_201_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
340 |
+
tensor<int32, [2]> input_201_dilations_0 = const()[name = tensor<string, []>("input_201_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
341 |
+
tensor<int32, []> input_201_groups_0 = const()[name = tensor<string, []>("input_201_groups_0"), val = tensor<int32, []>(1)];
|
342 |
+
tensor<fp16, [256, 256, 3, 3]> const_75_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [442368]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3565568))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4008000))), name = tensor<string, []>("const_75_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 256, 3, 3])];
|
343 |
+
tensor<fp16, [256]> const_76_to_fp16 = const()[name = tensor<string, []>("const_76_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4008192)))];
|
344 |
+
tensor<fp16, [1, 256, 10, 125]> out_cast_fp16 = conv(bias = const_76_to_fp16, dilations = input_201_dilations_0, groups = input_201_groups_0, pad = input_201_pad_0, pad_type = input_201_pad_type_0, strides = input_201_strides_0, weight = const_75_to_fp16_palettized, x = input_199_cast_fp16)[name = tensor<string, []>("out_cast_fp16")];
|
345 |
+
tensor<fp16, [1, 256, 10, 125]> input_203_cast_fp16 = add(x = out_cast_fp16, y = input_193_cast_fp16)[name = tensor<string, []>("input_203_cast_fp16")];
|
346 |
+
tensor<fp16, [1, 256, 10, 125]> x_cast_fp16 = relu(x = input_203_cast_fp16)[name = tensor<string, []>("x_cast_fp16")];
|
347 |
+
tensor<int32, [3]> var_577 = const()[name = tensor<string, []>("op_577"), val = tensor<int32, [3]>([1, 2560, 125])];
|
348 |
+
tensor<fp16, [1, 2560, 125]> sequences_cast_fp16 = reshape(shape = var_577, x = x_cast_fp16)[name = tensor<string, []>("sequences_cast_fp16")];
|
349 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([3])];
|
350 |
+
tensor<fp16, [1, 3, 589, 1]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = speaker_masks)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
|
351 |
+
tensor<fp32, []> upsample_nearest_neighbor_0_scale_factor_height_0 = const()[name = tensor<string, []>("upsample_nearest_neighbor_0_scale_factor_height_0"), val = tensor<fp32, []>(0x1.b2a2a4p-3)];
|
352 |
+
tensor<fp32, []> upsample_nearest_neighbor_0_scale_factor_width_0 = const()[name = tensor<string, []>("upsample_nearest_neighbor_0_scale_factor_width_0"), val = tensor<fp32, []>(0x1p+0)];
|
353 |
+
tensor<fp16, [1, 3, 125, 1]> upsample_nearest_neighbor_0_cast_fp16 = upsample_nearest_neighbor(scale_factor_height = upsample_nearest_neighbor_0_scale_factor_height_0, scale_factor_width = upsample_nearest_neighbor_0_scale_factor_width_0, x = expand_dims_0_cast_fp16)[name = tensor<string, []>("upsample_nearest_neighbor_0_cast_fp16")];
|
354 |
+
tensor<int32, [1]> weights_1_axes_0 = const()[name = tensor<string, []>("weights_1_axes_0"), val = tensor<int32, [1]>([3])];
|
355 |
+
tensor<fp16, [1, 3, 125]> weights_1_cast_fp16 = squeeze(axes = weights_1_axes_0, x = upsample_nearest_neighbor_0_cast_fp16)[name = tensor<string, []>("weights_1_cast_fp16")];
|
356 |
+
tensor<int32, [3]> var_583_begin_0 = const()[name = tensor<string, []>("op_583_begin_0"), val = tensor<int32, [3]>([0, 0, 0])];
|
357 |
+
tensor<int32, [3]> var_583_end_0 = const()[name = tensor<string, []>("op_583_end_0"), val = tensor<int32, [3]>([1, 1, 125])];
|
358 |
+
tensor<bool, [3]> var_583_end_mask_0 = const()[name = tensor<string, []>("op_583_end_mask_0"), val = tensor<bool, [3]>([true, false, true])];
|
359 |
+
tensor<bool, [3]> var_583_squeeze_mask_0 = const()[name = tensor<string, []>("op_583_squeeze_mask_0"), val = tensor<bool, [3]>([false, true, false])];
|
360 |
+
tensor<fp16, [1, 125]> var_583_cast_fp16 = slice_by_index(begin = var_583_begin_0, end = var_583_end_0, end_mask = var_583_end_mask_0, squeeze_mask = var_583_squeeze_mask_0, x = weights_1_cast_fp16)[name = tensor<string, []>("op_583_cast_fp16")];
|
361 |
+
tensor<int32, [1]> weights_5_axes_0 = const()[name = tensor<string, []>("weights_5_axes_0"), val = tensor<int32, [1]>([1])];
|
362 |
+
tensor<fp16, [1, 1, 125]> weights_5_cast_fp16 = expand_dims(axes = weights_5_axes_0, x = var_583_cast_fp16)[name = tensor<string, []>("weights_5_cast_fp16")];
|
363 |
+
tensor<int32, [1]> var_587_axes_0 = const()[name = tensor<string, []>("op_587_axes_0"), val = tensor<int32, [1]>([2])];
|
364 |
+
tensor<bool, []> var_587_keep_dims_0 = const()[name = tensor<string, []>("op_587_keep_dims_0"), val = tensor<bool, []>(false)];
|
365 |
+
tensor<fp16, [1, 1]> var_587_cast_fp16 = reduce_sum(axes = var_587_axes_0, keep_dims = var_587_keep_dims_0, x = weights_5_cast_fp16)[name = tensor<string, []>("op_587_cast_fp16")];
|
366 |
+
tensor<fp16, []> var_588_to_fp16 = const()[name = tensor<string, []>("op_588_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
367 |
+
tensor<fp16, [1, 1]> v1_1_cast_fp16 = add(x = var_587_cast_fp16, y = var_588_to_fp16)[name = tensor<string, []>("v1_1_cast_fp16")];
|
368 |
+
tensor<fp16, [1, 2560, 125]> var_590_cast_fp16 = mul(x = sequences_cast_fp16, y = weights_5_cast_fp16)[name = tensor<string, []>("op_590_cast_fp16")];
|
369 |
+
tensor<int32, [1]> var_592_axes_0 = const()[name = tensor<string, []>("op_592_axes_0"), val = tensor<int32, [1]>([2])];
|
370 |
+
tensor<bool, []> var_592_keep_dims_0 = const()[name = tensor<string, []>("op_592_keep_dims_0"), val = tensor<bool, []>(false)];
|
371 |
+
tensor<fp16, [1, 2560]> var_592_cast_fp16 = reduce_sum(axes = var_592_axes_0, keep_dims = var_592_keep_dims_0, x = var_590_cast_fp16)[name = tensor<string, []>("op_592_cast_fp16")];
|
372 |
+
tensor<fp16, [1, 2560]> mean_1_cast_fp16 = real_div(x = var_592_cast_fp16, y = v1_1_cast_fp16)[name = tensor<string, []>("mean_1_cast_fp16")];
|
373 |
+
tensor<int32, [1]> var_594_axes_0 = const()[name = tensor<string, []>("op_594_axes_0"), val = tensor<int32, [1]>([2])];
|
374 |
+
tensor<fp16, [1, 2560, 1]> var_594_cast_fp16 = expand_dims(axes = var_594_axes_0, x = mean_1_cast_fp16)[name = tensor<string, []>("op_594_cast_fp16")];
|
375 |
+
tensor<fp16, [1, 2560, 125]> var_595_cast_fp16 = sub(x = sequences_cast_fp16, y = var_594_cast_fp16)[name = tensor<string, []>("op_595_cast_fp16")];
|
376 |
+
tensor<fp16, [1, 2560, 125]> dx2_1_cast_fp16 = mul(x = var_595_cast_fp16, y = var_595_cast_fp16)[name = tensor<string, []>("dx2_1_cast_fp16")];
|
377 |
+
tensor<fp16, [1, 1, 125]> var_597_cast_fp16 = mul(x = weights_5_cast_fp16, y = weights_5_cast_fp16)[name = tensor<string, []>("op_597_cast_fp16")];
|
378 |
+
tensor<int32, [1]> v2_1_axes_0 = const()[name = tensor<string, []>("v2_1_axes_0"), val = tensor<int32, [1]>([2])];
|
379 |
+
tensor<bool, []> v2_1_keep_dims_0 = const()[name = tensor<string, []>("v2_1_keep_dims_0"), val = tensor<bool, []>(false)];
|
380 |
+
tensor<fp16, [1, 1]> v2_1_cast_fp16 = reduce_sum(axes = v2_1_axes_0, keep_dims = v2_1_keep_dims_0, x = var_597_cast_fp16)[name = tensor<string, []>("v2_1_cast_fp16")];
|
381 |
+
tensor<fp16, [1, 2560, 125]> var_600_cast_fp16 = mul(x = dx2_1_cast_fp16, y = weights_5_cast_fp16)[name = tensor<string, []>("op_600_cast_fp16")];
|
382 |
+
tensor<int32, [1]> var_602_axes_0 = const()[name = tensor<string, []>("op_602_axes_0"), val = tensor<int32, [1]>([2])];
|
383 |
+
tensor<bool, []> var_602_keep_dims_0 = const()[name = tensor<string, []>("op_602_keep_dims_0"), val = tensor<bool, []>(false)];
|
384 |
+
tensor<fp16, [1, 2560]> var_602_cast_fp16 = reduce_sum(axes = var_602_axes_0, keep_dims = var_602_keep_dims_0, x = var_600_cast_fp16)[name = tensor<string, []>("op_602_cast_fp16")];
|
385 |
+
tensor<fp16, [1, 1]> var_603_cast_fp16 = real_div(x = v2_1_cast_fp16, y = v1_1_cast_fp16)[name = tensor<string, []>("op_603_cast_fp16")];
|
386 |
+
tensor<fp16, [1, 1]> var_604_cast_fp16 = sub(x = v1_1_cast_fp16, y = var_603_cast_fp16)[name = tensor<string, []>("op_604_cast_fp16")];
|
387 |
+
tensor<fp16, []> var_605_to_fp16 = const()[name = tensor<string, []>("op_605_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
388 |
+
tensor<fp16, [1, 1]> var_606_cast_fp16 = add(x = var_604_cast_fp16, y = var_605_to_fp16)[name = tensor<string, []>("op_606_cast_fp16")];
|
389 |
+
tensor<fp16, [1, 2560]> var_1_cast_fp16 = real_div(x = var_602_cast_fp16, y = var_606_cast_fp16)[name = tensor<string, []>("var_1_cast_fp16")];
|
390 |
+
tensor<fp16, [1, 2560]> std_1_cast_fp16 = sqrt(x = var_1_cast_fp16)[name = tensor<string, []>("std_1_cast_fp16")];
|
391 |
+
tensor<bool, []> var_610_interleave_0 = const()[name = tensor<string, []>("op_610_interleave_0"), val = tensor<bool, []>(false)];
|
392 |
+
tensor<fp16, [1, 5120]> var_610_cast_fp16 = concat(axis = var_12, interleave = var_610_interleave_0, values = (mean_1_cast_fp16, std_1_cast_fp16))[name = tensor<string, []>("op_610_cast_fp16")];
|
393 |
+
tensor<int32, [3]> var_612_begin_0 = const()[name = tensor<string, []>("op_612_begin_0"), val = tensor<int32, [3]>([0, 1, 0])];
|
394 |
+
tensor<int32, [3]> var_612_end_0 = const()[name = tensor<string, []>("op_612_end_0"), val = tensor<int32, [3]>([1, 2, 125])];
|
395 |
+
tensor<bool, [3]> var_612_end_mask_0 = const()[name = tensor<string, []>("op_612_end_mask_0"), val = tensor<bool, [3]>([true, false, true])];
|
396 |
+
tensor<bool, [3]> var_612_squeeze_mask_0 = const()[name = tensor<string, []>("op_612_squeeze_mask_0"), val = tensor<bool, [3]>([false, true, false])];
|
397 |
+
tensor<fp16, [1, 125]> var_612_cast_fp16 = slice_by_index(begin = var_612_begin_0, end = var_612_end_0, end_mask = var_612_end_mask_0, squeeze_mask = var_612_squeeze_mask_0, x = weights_1_cast_fp16)[name = tensor<string, []>("op_612_cast_fp16")];
|
398 |
+
tensor<int32, [1]> weights_9_axes_0 = const()[name = tensor<string, []>("weights_9_axes_0"), val = tensor<int32, [1]>([1])];
|
399 |
+
tensor<fp16, [1, 1, 125]> weights_9_cast_fp16 = expand_dims(axes = weights_9_axes_0, x = var_612_cast_fp16)[name = tensor<string, []>("weights_9_cast_fp16")];
|
400 |
+
tensor<int32, [1]> var_616_axes_0 = const()[name = tensor<string, []>("op_616_axes_0"), val = tensor<int32, [1]>([2])];
|
401 |
+
tensor<bool, []> var_616_keep_dims_0 = const()[name = tensor<string, []>("op_616_keep_dims_0"), val = tensor<bool, []>(false)];
|
402 |
+
tensor<fp16, [1, 1]> var_616_cast_fp16 = reduce_sum(axes = var_616_axes_0, keep_dims = var_616_keep_dims_0, x = weights_9_cast_fp16)[name = tensor<string, []>("op_616_cast_fp16")];
|
403 |
+
tensor<fp16, []> var_617_to_fp16 = const()[name = tensor<string, []>("op_617_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
404 |
+
tensor<fp16, [1, 1]> v1_3_cast_fp16 = add(x = var_616_cast_fp16, y = var_617_to_fp16)[name = tensor<string, []>("v1_3_cast_fp16")];
|
405 |
+
tensor<fp16, [1, 2560, 125]> var_619_cast_fp16 = mul(x = sequences_cast_fp16, y = weights_9_cast_fp16)[name = tensor<string, []>("op_619_cast_fp16")];
|
406 |
+
tensor<int32, [1]> var_621_axes_0 = const()[name = tensor<string, []>("op_621_axes_0"), val = tensor<int32, [1]>([2])];
|
407 |
+
tensor<bool, []> var_621_keep_dims_0 = const()[name = tensor<string, []>("op_621_keep_dims_0"), val = tensor<bool, []>(false)];
|
408 |
+
tensor<fp16, [1, 2560]> var_621_cast_fp16 = reduce_sum(axes = var_621_axes_0, keep_dims = var_621_keep_dims_0, x = var_619_cast_fp16)[name = tensor<string, []>("op_621_cast_fp16")];
|
409 |
+
tensor<fp16, [1, 2560]> mean_3_cast_fp16 = real_div(x = var_621_cast_fp16, y = v1_3_cast_fp16)[name = tensor<string, []>("mean_3_cast_fp16")];
|
410 |
+
tensor<int32, [1]> var_623_axes_0 = const()[name = tensor<string, []>("op_623_axes_0"), val = tensor<int32, [1]>([2])];
|
411 |
+
tensor<fp16, [1, 2560, 1]> var_623_cast_fp16 = expand_dims(axes = var_623_axes_0, x = mean_3_cast_fp16)[name = tensor<string, []>("op_623_cast_fp16")];
|
412 |
+
tensor<fp16, [1, 2560, 125]> var_624_cast_fp16 = sub(x = sequences_cast_fp16, y = var_623_cast_fp16)[name = tensor<string, []>("op_624_cast_fp16")];
|
413 |
+
tensor<fp16, [1, 2560, 125]> dx2_3_cast_fp16 = mul(x = var_624_cast_fp16, y = var_624_cast_fp16)[name = tensor<string, []>("dx2_3_cast_fp16")];
|
414 |
+
tensor<fp16, [1, 1, 125]> var_626_cast_fp16 = mul(x = weights_9_cast_fp16, y = weights_9_cast_fp16)[name = tensor<string, []>("op_626_cast_fp16")];
|
415 |
+
tensor<int32, [1]> v2_3_axes_0 = const()[name = tensor<string, []>("v2_3_axes_0"), val = tensor<int32, [1]>([2])];
|
416 |
+
tensor<bool, []> v2_3_keep_dims_0 = const()[name = tensor<string, []>("v2_3_keep_dims_0"), val = tensor<bool, []>(false)];
|
417 |
+
tensor<fp16, [1, 1]> v2_3_cast_fp16 = reduce_sum(axes = v2_3_axes_0, keep_dims = v2_3_keep_dims_0, x = var_626_cast_fp16)[name = tensor<string, []>("v2_3_cast_fp16")];
|
418 |
+
tensor<fp16, [1, 2560, 125]> var_629_cast_fp16 = mul(x = dx2_3_cast_fp16, y = weights_9_cast_fp16)[name = tensor<string, []>("op_629_cast_fp16")];
|
419 |
+
tensor<int32, [1]> var_631_axes_0 = const()[name = tensor<string, []>("op_631_axes_0"), val = tensor<int32, [1]>([2])];
|
420 |
+
tensor<bool, []> var_631_keep_dims_0 = const()[name = tensor<string, []>("op_631_keep_dims_0"), val = tensor<bool, []>(false)];
|
421 |
+
tensor<fp16, [1, 2560]> var_631_cast_fp16 = reduce_sum(axes = var_631_axes_0, keep_dims = var_631_keep_dims_0, x = var_629_cast_fp16)[name = tensor<string, []>("op_631_cast_fp16")];
|
422 |
+
tensor<fp16, [1, 1]> var_632_cast_fp16 = real_div(x = v2_3_cast_fp16, y = v1_3_cast_fp16)[name = tensor<string, []>("op_632_cast_fp16")];
|
423 |
+
tensor<fp16, [1, 1]> var_633_cast_fp16 = sub(x = v1_3_cast_fp16, y = var_632_cast_fp16)[name = tensor<string, []>("op_633_cast_fp16")];
|
424 |
+
tensor<fp16, []> var_634_to_fp16 = const()[name = tensor<string, []>("op_634_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
425 |
+
tensor<fp16, [1, 1]> var_635_cast_fp16 = add(x = var_633_cast_fp16, y = var_634_to_fp16)[name = tensor<string, []>("op_635_cast_fp16")];
|
426 |
+
tensor<fp16, [1, 2560]> var_3_cast_fp16 = real_div(x = var_631_cast_fp16, y = var_635_cast_fp16)[name = tensor<string, []>("var_3_cast_fp16")];
|
427 |
+
tensor<fp16, [1, 2560]> std_3_cast_fp16 = sqrt(x = var_3_cast_fp16)[name = tensor<string, []>("std_3_cast_fp16")];
|
428 |
+
tensor<bool, []> var_639_interleave_0 = const()[name = tensor<string, []>("op_639_interleave_0"), val = tensor<bool, []>(false)];
|
429 |
+
tensor<fp16, [1, 5120]> var_639_cast_fp16 = concat(axis = var_12, interleave = var_639_interleave_0, values = (mean_3_cast_fp16, std_3_cast_fp16))[name = tensor<string, []>("op_639_cast_fp16")];
|
430 |
+
tensor<int32, [3]> var_641_begin_0 = const()[name = tensor<string, []>("op_641_begin_0"), val = tensor<int32, [3]>([0, 2, 0])];
|
431 |
+
tensor<int32, [3]> var_641_end_0 = const()[name = tensor<string, []>("op_641_end_0"), val = tensor<int32, [3]>([1, 3, 125])];
|
432 |
+
tensor<bool, [3]> var_641_end_mask_0 = const()[name = tensor<string, []>("op_641_end_mask_0"), val = tensor<bool, [3]>([true, false, true])];
|
433 |
+
tensor<bool, [3]> var_641_squeeze_mask_0 = const()[name = tensor<string, []>("op_641_squeeze_mask_0"), val = tensor<bool, [3]>([false, true, false])];
|
434 |
+
tensor<fp16, [1, 125]> var_641_cast_fp16 = slice_by_index(begin = var_641_begin_0, end = var_641_end_0, end_mask = var_641_end_mask_0, squeeze_mask = var_641_squeeze_mask_0, x = weights_1_cast_fp16)[name = tensor<string, []>("op_641_cast_fp16")];
|
435 |
+
tensor<int32, [1]> weights_axes_0 = const()[name = tensor<string, []>("weights_axes_0"), val = tensor<int32, [1]>([1])];
|
436 |
+
tensor<fp16, [1, 1, 125]> weights_cast_fp16 = expand_dims(axes = weights_axes_0, x = var_641_cast_fp16)[name = tensor<string, []>("weights_cast_fp16")];
|
437 |
+
tensor<int32, [1]> var_645_axes_0 = const()[name = tensor<string, []>("op_645_axes_0"), val = tensor<int32, [1]>([2])];
|
438 |
+
tensor<bool, []> var_645_keep_dims_0 = const()[name = tensor<string, []>("op_645_keep_dims_0"), val = tensor<bool, []>(false)];
|
439 |
+
tensor<fp16, [1, 1]> var_645_cast_fp16 = reduce_sum(axes = var_645_axes_0, keep_dims = var_645_keep_dims_0, x = weights_cast_fp16)[name = tensor<string, []>("op_645_cast_fp16")];
|
440 |
+
tensor<fp16, []> var_646_to_fp16 = const()[name = tensor<string, []>("op_646_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
441 |
+
tensor<fp16, [1, 1]> v1_cast_fp16 = add(x = var_645_cast_fp16, y = var_646_to_fp16)[name = tensor<string, []>("v1_cast_fp16")];
|
442 |
+
tensor<fp16, [1, 2560, 125]> var_648_cast_fp16 = mul(x = sequences_cast_fp16, y = weights_cast_fp16)[name = tensor<string, []>("op_648_cast_fp16")];
|
443 |
+
tensor<int32, [1]> var_650_axes_0 = const()[name = tensor<string, []>("op_650_axes_0"), val = tensor<int32, [1]>([2])];
|
444 |
+
tensor<bool, []> var_650_keep_dims_0 = const()[name = tensor<string, []>("op_650_keep_dims_0"), val = tensor<bool, []>(false)];
|
445 |
+
tensor<fp16, [1, 2560]> var_650_cast_fp16 = reduce_sum(axes = var_650_axes_0, keep_dims = var_650_keep_dims_0, x = var_648_cast_fp16)[name = tensor<string, []>("op_650_cast_fp16")];
|
446 |
+
tensor<fp16, [1, 2560]> mean_cast_fp16 = real_div(x = var_650_cast_fp16, y = v1_cast_fp16)[name = tensor<string, []>("mean_cast_fp16")];
|
447 |
+
tensor<int32, [1]> var_652_axes_0 = const()[name = tensor<string, []>("op_652_axes_0"), val = tensor<int32, [1]>([2])];
|
448 |
+
tensor<fp16, [1, 2560, 1]> var_652_cast_fp16 = expand_dims(axes = var_652_axes_0, x = mean_cast_fp16)[name = tensor<string, []>("op_652_cast_fp16")];
|
449 |
+
tensor<fp16, [1, 2560, 125]> var_653_cast_fp16 = sub(x = sequences_cast_fp16, y = var_652_cast_fp16)[name = tensor<string, []>("op_653_cast_fp16")];
|
450 |
+
tensor<fp16, [1, 2560, 125]> dx2_cast_fp16 = mul(x = var_653_cast_fp16, y = var_653_cast_fp16)[name = tensor<string, []>("dx2_cast_fp16")];
|
451 |
+
tensor<fp16, [1, 1, 125]> var_655_cast_fp16 = mul(x = weights_cast_fp16, y = weights_cast_fp16)[name = tensor<string, []>("op_655_cast_fp16")];
|
452 |
+
tensor<int32, [1]> v2_axes_0 = const()[name = tensor<string, []>("v2_axes_0"), val = tensor<int32, [1]>([2])];
|
453 |
+
tensor<bool, []> v2_keep_dims_0 = const()[name = tensor<string, []>("v2_keep_dims_0"), val = tensor<bool, []>(false)];
|
454 |
+
tensor<fp16, [1, 1]> v2_cast_fp16 = reduce_sum(axes = v2_axes_0, keep_dims = v2_keep_dims_0, x = var_655_cast_fp16)[name = tensor<string, []>("v2_cast_fp16")];
|
455 |
+
tensor<fp16, [1, 2560, 125]> var_658_cast_fp16 = mul(x = dx2_cast_fp16, y = weights_cast_fp16)[name = tensor<string, []>("op_658_cast_fp16")];
|
456 |
+
tensor<int32, [1]> var_660_axes_0 = const()[name = tensor<string, []>("op_660_axes_0"), val = tensor<int32, [1]>([2])];
|
457 |
+
tensor<bool, []> var_660_keep_dims_0 = const()[name = tensor<string, []>("op_660_keep_dims_0"), val = tensor<bool, []>(false)];
|
458 |
+
tensor<fp16, [1, 2560]> var_660_cast_fp16 = reduce_sum(axes = var_660_axes_0, keep_dims = var_660_keep_dims_0, x = var_658_cast_fp16)[name = tensor<string, []>("op_660_cast_fp16")];
|
459 |
+
tensor<fp16, [1, 1]> var_661_cast_fp16 = real_div(x = v2_cast_fp16, y = v1_cast_fp16)[name = tensor<string, []>("op_661_cast_fp16")];
|
460 |
+
tensor<fp16, [1, 1]> var_662_cast_fp16 = sub(x = v1_cast_fp16, y = var_661_cast_fp16)[name = tensor<string, []>("op_662_cast_fp16")];
|
461 |
+
tensor<fp16, []> var_663_to_fp16 = const()[name = tensor<string, []>("op_663_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
462 |
+
tensor<fp16, [1, 1]> var_664_cast_fp16 = add(x = var_662_cast_fp16, y = var_663_to_fp16)[name = tensor<string, []>("op_664_cast_fp16")];
|
463 |
+
tensor<fp16, [1, 2560]> var_cast_fp16 = real_div(x = var_660_cast_fp16, y = var_664_cast_fp16)[name = tensor<string, []>("var_cast_fp16")];
|
464 |
+
tensor<fp16, [1, 2560]> std_cast_fp16 = sqrt(x = var_cast_fp16)[name = tensor<string, []>("std_cast_fp16")];
|
465 |
+
tensor<bool, []> var_668_interleave_0 = const()[name = tensor<string, []>("op_668_interleave_0"), val = tensor<bool, []>(false)];
|
466 |
+
tensor<fp16, [1, 5120]> var_668_cast_fp16 = concat(axis = var_12, interleave = var_668_interleave_0, values = (mean_cast_fp16, std_cast_fp16))[name = tensor<string, []>("op_668_cast_fp16")];
|
467 |
+
tensor<int32, []> input_axis_0 = const()[name = tensor<string, []>("input_axis_0"), val = tensor<int32, []>(1)];
|
468 |
+
tensor<fp16, [1, 3, 5120]> input_cast_fp16 = stack(axis = input_axis_0, values = (var_610_cast_fp16, var_639_cast_fp16, var_668_cast_fp16))[name = tensor<string, []>("input_cast_fp16")];
|
469 |
+
tensor<fp16, [256, 5120]> model_resnet_seg_1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [983040]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4008768))), lut = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4991872))), name = tensor<string, []>("model_resnet_seg_1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([256, 5120])];
|
470 |
+
tensor<fp16, [256]> model_resnet_seg_1_bias_to_fp16 = const()[name = tensor<string, []>("model_resnet_seg_1_bias_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4992064)))];
|
471 |
+
tensor<fp16, [1, 3, 256]> speaker_embeddings = linear(bias = model_resnet_seg_1_bias_to_fp16, weight = model_resnet_seg_1_weight_to_fp16_palettized, x = input_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
|
472 |
+
} -> (speaker_embeddings);
|
473 |
+
}
|
speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:60cbb0421b3878825a27359253869a44dcc0aa238d8a62a937d9365f169ed6cb
|
3 |
+
size 4992640
|
speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedderPreprocessor.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5dd1e6ea694479da669d42d9752db8ebffdc7582b80c90f06452e2ed1f72cf8f
|
3 |
+
size 243
|
speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedderPreprocessor.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7b2c40c7ba306c196e738d8d4efce7e59234875e854553a072cc4f964f6cb91e
|
3 |
+
size 330
|
speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedderPreprocessor.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Float32",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 Γ 998 Γ 80)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 998, 80]",
|
13 |
+
"name" : "preprocessor_output_1",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"modelParameters" : [
|
18 |
+
|
19 |
+
],
|
20 |
+
"specificationVersion" : 7,
|
21 |
+
"mlProgramOperationTypeHistogram" : {
|
22 |
+
"Ios16.cast" : 2,
|
23 |
+
"Ios16.mul" : 4,
|
24 |
+
"SliceByIndex" : 2,
|
25 |
+
"Transpose" : 2,
|
26 |
+
"SlidingWindows" : 1,
|
27 |
+
"Ios16.sub" : 3,
|
28 |
+
"Ios16.log" : 1,
|
29 |
+
"Ios16.reduceMean" : 2,
|
30 |
+
"Ios16.square" : 2,
|
31 |
+
"Squeeze" : 2,
|
32 |
+
"Ios16.matmul" : 2,
|
33 |
+
"Ios16.add" : 1,
|
34 |
+
"Ios16.linear" : 1,
|
35 |
+
"ExpandDims" : 4,
|
36 |
+
"Ios16.gather" : 2,
|
37 |
+
"Ios16.maximum" : 1,
|
38 |
+
"Identity" : 1,
|
39 |
+
"Pad" : 2
|
40 |
+
},
|
41 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
42 |
+
"isUpdatable" : "0",
|
43 |
+
"stateSchema" : [
|
44 |
+
|
45 |
+
],
|
46 |
+
"availability" : {
|
47 |
+
"macOS" : "13.0",
|
48 |
+
"tvOS" : "16.0",
|
49 |
+
"visionOS" : "1.0",
|
50 |
+
"watchOS" : "9.0",
|
51 |
+
"iOS" : "16.0",
|
52 |
+
"macCatalyst" : "16.0"
|
53 |
+
},
|
54 |
+
"modelType" : {
|
55 |
+
"name" : "MLModelType_mlProgram"
|
56 |
+
},
|
57 |
+
"userDefinedMetadata" : {
|
58 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
59 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1",
|
60 |
+
"com.github.apple.coremltools.version" : "8.1"
|
61 |
+
},
|
62 |
+
"inputSchema" : [
|
63 |
+
{
|
64 |
+
"hasShapeFlexibility" : "0",
|
65 |
+
"isOptional" : "0",
|
66 |
+
"dataType" : "Float16",
|
67 |
+
"formattedType" : "MultiArray (Float16 1 Γ 160000)",
|
68 |
+
"shortDescription" : "",
|
69 |
+
"shape" : "[1, 160000]",
|
70 |
+
"name" : "waveforms",
|
71 |
+
"type" : "MultiArray"
|
72 |
+
}
|
73 |
+
],
|
74 |
+
"generatedClassName" : "SpeakerEmbeddingPreprocessor",
|
75 |
+
"method" : "predict"
|
76 |
+
}
|
77 |
+
]
|
speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedderPreprocessor.mlmodelc/model.mil
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.1"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [1, 160000]> waveforms) {
|
5 |
+
tensor<string, []> cast_0_dtype_0 = const()[name = tensor<string, []>("cast_0_dtype_0"), val = tensor<string, []>("fp32")];
|
6 |
+
tensor<fp32, []> var_2_promoted = const()[name = tensor<string, []>("op_2_promoted"), val = tensor<fp32, []>(0x1p+15)];
|
7 |
+
tensor<fp32, [1, 160000]> cast_0 = cast(dtype = cast_0_dtype_0, x = waveforms)[name = tensor<string, []>("cast_11")];
|
8 |
+
tensor<fp32, [1, 160000]> waveform_1 = mul(x = cast_0, y = var_2_promoted)[name = tensor<string, []>("waveform_1")];
|
9 |
+
tensor<int32, [2]> var_6_begin_0 = const()[name = tensor<string, []>("op_6_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
10 |
+
tensor<int32, [2]> var_6_end_0 = const()[name = tensor<string, []>("op_6_end_0"), val = tensor<int32, [2]>([1, 160000])];
|
11 |
+
tensor<bool, [2]> var_6_end_mask_0 = const()[name = tensor<string, []>("op_6_end_mask_0"), val = tensor<bool, [2]>([false, true])];
|
12 |
+
tensor<bool, [2]> var_6_squeeze_mask_0 = const()[name = tensor<string, []>("op_6_squeeze_mask_0"), val = tensor<bool, [2]>([true, false])];
|
13 |
+
tensor<fp32, [160000]> var_6 = slice_by_index(begin = var_6_begin_0, end = var_6_end_0, end_mask = var_6_end_mask_0, squeeze_mask = var_6_squeeze_mask_0, x = waveform_1)[name = tensor<string, []>("op_6")];
|
14 |
+
tensor<int32, []> sliding_windows_0_axis_0 = const()[name = tensor<string, []>("sliding_windows_0_axis_0"), val = tensor<int32, []>(0)];
|
15 |
+
tensor<int32, []> sliding_windows_0_size_0 = const()[name = tensor<string, []>("sliding_windows_0_size_0"), val = tensor<int32, []>(400)];
|
16 |
+
tensor<int32, []> sliding_windows_0_stride_0 = const()[name = tensor<string, []>("sliding_windows_0_stride_0"), val = tensor<int32, []>(160)];
|
17 |
+
tensor<fp32, [998, 400]> sliding_windows_0 = sliding_windows(axis = sliding_windows_0_axis_0, size = sliding_windows_0_size_0, stride = sliding_windows_0_stride_0, x = var_6)[name = tensor<string, []>("sliding_windows_0")];
|
18 |
+
tensor<int32, [1]> var_42_axes_0 = const()[name = tensor<string, []>("op_42_axes_0"), val = tensor<int32, [1]>([1])];
|
19 |
+
tensor<bool, []> var_42_keep_dims_0 = const()[name = tensor<string, []>("op_42_keep_dims_0"), val = tensor<bool, []>(false)];
|
20 |
+
tensor<fp32, [998]> var_42 = reduce_mean(axes = var_42_axes_0, keep_dims = var_42_keep_dims_0, x = sliding_windows_0)[name = tensor<string, []>("op_42")];
|
21 |
+
tensor<int32, [1]> row_means_axes_0 = const()[name = tensor<string, []>("row_means_axes_0"), val = tensor<int32, [1]>([1])];
|
22 |
+
tensor<fp32, [998, 1]> row_means = expand_dims(axes = row_means_axes_0, x = var_42)[name = tensor<string, []>("row_means")];
|
23 |
+
tensor<fp32, [998, 400]> strided_input_3 = sub(x = sliding_windows_0, y = row_means)[name = tensor<string, []>("strided_input_3")];
|
24 |
+
tensor<int32, [1]> input_1_axes_0 = const()[name = tensor<string, []>("input_1_axes_0"), val = tensor<int32, [1]>([0])];
|
25 |
+
tensor<fp32, [1, 998, 400]> input_1 = expand_dims(axes = input_1_axes_0, x = strided_input_3)[name = tensor<string, []>("input_1")];
|
26 |
+
tensor<fp32, []> const_2 = const()[name = tensor<string, []>("const_2"), val = tensor<fp32, []>(0x0p+0)];
|
27 |
+
tensor<int32, [6]> var_54_pad_0 = const()[name = tensor<string, []>("op_54_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 1, 0])];
|
28 |
+
tensor<string, []> var_54_mode_0 = const()[name = tensor<string, []>("op_54_mode_0"), val = tensor<string, []>("replicate")];
|
29 |
+
tensor<fp32, [1, 998, 401]> var_54 = pad(constant_val = const_2, mode = var_54_mode_0, pad = var_54_pad_0, x = input_1)[name = tensor<string, []>("op_54")];
|
30 |
+
tensor<int32, [1]> offset_strided_input_axes_0 = const()[name = tensor<string, []>("offset_strided_input_axes_0"), val = tensor<int32, [1]>([0])];
|
31 |
+
tensor<fp32, [998, 401]> offset_strided_input = squeeze(axes = offset_strided_input_axes_0, x = var_54)[name = tensor<string, []>("offset_strided_input")];
|
32 |
+
tensor<int32, [2]> var_66_begin_0 = const()[name = tensor<string, []>("op_66_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
33 |
+
tensor<int32, [2]> var_66_end_0 = const()[name = tensor<string, []>("op_66_end_0"), val = tensor<int32, [2]>([998, 400])];
|
34 |
+
tensor<bool, [2]> var_66_end_mask_0 = const()[name = tensor<string, []>("op_66_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
35 |
+
tensor<fp32, [998, 400]> var_66 = slice_by_index(begin = var_66_begin_0, end = var_66_end_0, end_mask = var_66_end_mask_0, x = offset_strided_input)[name = tensor<string, []>("op_66")];
|
36 |
+
tensor<fp32, []> var_67 = const()[name = tensor<string, []>("op_67"), val = tensor<fp32, []>(0x1.f0a3d8p-1)];
|
37 |
+
tensor<fp32, [998, 400]> var_68 = mul(x = var_66, y = var_67)[name = tensor<string, []>("op_68")];
|
38 |
+
tensor<fp32, [998, 400]> strided_input_5 = sub(x = strided_input_3, y = var_68)[name = tensor<string, []>("strided_input_5")];
|
39 |
+
tensor<fp32, [1, 400]> window_function = const()[name = tensor<string, []>("window_function"), val = tensor<fp32, [1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
40 |
+
tensor<fp32, [998, 400]> strided_input_7 = mul(x = strided_input_5, y = window_function)[name = tensor<string, []>("strided_input_7")];
|
41 |
+
tensor<int32, [1]> input_3_axes_0 = const()[name = tensor<string, []>("input_3_axes_0"), val = tensor<int32, [1]>([0])];
|
42 |
+
tensor<fp32, [1, 998, 400]> input_3 = expand_dims(axes = input_3_axes_0, x = strided_input_7)[name = tensor<string, []>("input_3")];
|
43 |
+
tensor<fp32, []> const_3 = const()[name = tensor<string, []>("const_3"), val = tensor<fp32, []>(0x0p+0)];
|
44 |
+
tensor<int32, [6]> var_90_pad_0 = const()[name = tensor<string, []>("op_90_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 0, 112])];
|
45 |
+
tensor<string, []> var_90_mode_0 = const()[name = tensor<string, []>("op_90_mode_0"), val = tensor<string, []>("constant")];
|
46 |
+
tensor<fp32, [1, 998, 512]> var_90 = pad(constant_val = const_3, mode = var_90_mode_0, pad = var_90_pad_0, x = input_3)[name = tensor<string, []>("op_90")];
|
47 |
+
tensor<int32, [1]> strided_input_axes_0 = const()[name = tensor<string, []>("strided_input_axes_0"), val = tensor<int32, [1]>([0])];
|
48 |
+
tensor<fp32, [998, 512]> strided_input = squeeze(axes = strided_input_axes_0, x = var_90)[name = tensor<string, []>("strided_input")];
|
49 |
+
tensor<fp32, [512, 512]> cos_0 = const()[name = tensor<string, []>("cos_0"), val = tensor<fp32, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1728)))];
|
50 |
+
tensor<fp32, [512, 512]> sin_0 = const()[name = tensor<string, []>("sin_0"), val = tensor<fp32, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1050368)))];
|
51 |
+
tensor<bool, []> matmul_1_transpose_x_1 = const()[name = tensor<string, []>("matmul_1_transpose_x_1"), val = tensor<bool, []>(false)];
|
52 |
+
tensor<bool, []> matmul_1_transpose_y_1 = const()[name = tensor<string, []>("matmul_1_transpose_y_1"), val = tensor<bool, []>(true)];
|
53 |
+
tensor<fp32, [512, 998]> matmul_1 = matmul(transpose_x = matmul_1_transpose_x_1, transpose_y = matmul_1_transpose_y_1, x = cos_0, y = strided_input)[name = tensor<string, []>("matmul_1")];
|
54 |
+
tensor<bool, []> matmul_3_transpose_x_1 = const()[name = tensor<string, []>("matmul_3_transpose_x_1"), val = tensor<bool, []>(false)];
|
55 |
+
tensor<bool, []> matmul_3_transpose_y_1 = const()[name = tensor<string, []>("matmul_3_transpose_y_1"), val = tensor<bool, []>(true)];
|
56 |
+
tensor<fp32, [512, 998]> matmul_3 = matmul(transpose_x = matmul_3_transpose_x_1, transpose_y = matmul_3_transpose_y_1, x = sin_0, y = strided_input)[name = tensor<string, []>("matmul_3")];
|
57 |
+
tensor<fp32, []> mul_1_y_0 = const()[name = tensor<string, []>("mul_1_y_0"), val = tensor<fp32, []>(-0x1p+0)];
|
58 |
+
tensor<fp32, [512, 998]> mul_1 = mul(x = matmul_3, y = mul_1_y_0)[name = tensor<string, []>("mul_1")];
|
59 |
+
tensor<int32, [2]> transpose_3_perm_0 = const()[name = tensor<string, []>("transpose_3_perm_0"), val = tensor<int32, [2]>([-1, 0])];
|
60 |
+
tensor<int32, [2]> transpose_4_perm_0 = const()[name = tensor<string, []>("transpose_4_perm_0"), val = tensor<int32, [2]>([-1, 0])];
|
61 |
+
tensor<int32, [257]> range_1d_2 = const()[name = tensor<string, []>("range_1d_2"), val = tensor<int32, [257]>([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256])];
|
62 |
+
tensor<int32, []> gather_0_axis_0 = const()[name = tensor<string, []>("gather_0_axis_0"), val = tensor<int32, []>(-1)];
|
63 |
+
tensor<int32, []> gather_0_batch_dims_0 = const()[name = tensor<string, []>("gather_0_batch_dims_0"), val = tensor<int32, []>(0)];
|
64 |
+
tensor<fp32, [998, 512]> transpose_3 = transpose(perm = transpose_3_perm_0, x = matmul_1)[name = tensor<string, []>("transpose_6")];
|
65 |
+
tensor<fp32, [998, 257]> gather_0 = gather(axis = gather_0_axis_0, batch_dims = gather_0_batch_dims_0, indices = range_1d_2, x = transpose_3)[name = tensor<string, []>("gather_0")];
|
66 |
+
tensor<int32, []> gather_1_axis_0 = const()[name = tensor<string, []>("gather_1_axis_0"), val = tensor<int32, []>(-1)];
|
67 |
+
tensor<int32, []> gather_1_batch_dims_0 = const()[name = tensor<string, []>("gather_1_batch_dims_0"), val = tensor<int32, []>(0)];
|
68 |
+
tensor<fp32, [998, 512]> transpose_4 = transpose(perm = transpose_4_perm_0, x = mul_1)[name = tensor<string, []>("transpose_5")];
|
69 |
+
tensor<fp32, [998, 257]> gather_1 = gather(axis = gather_1_axis_0, batch_dims = gather_1_batch_dims_0, indices = range_1d_2, x = transpose_4)[name = tensor<string, []>("gather_1")];
|
70 |
+
tensor<fp32, [998, 257]> square_0 = square(x = gather_0)[name = tensor<string, []>("square_0")];
|
71 |
+
tensor<fp32, [998, 257]> square_1 = square(x = gather_1)[name = tensor<string, []>("square_1")];
|
72 |
+
tensor<fp32, [998, 257]> add_1 = add(x = square_0, y = square_1)[name = tensor<string, []>("add_1")];
|
73 |
+
tensor<fp32, [998, 257]> spectrum = identity(x = add_1)[name = tensor<string, []>("spectrum")];
|
74 |
+
tensor<fp32, [80, 257]> mel_energies_3 = const()[name = tensor<string, []>("mel_energies_3"), val = tensor<fp32, [80, 257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2099008)))];
|
75 |
+
tensor<fp32, [80]> mel_energies_bias_0 = const()[name = tensor<string, []>("mel_energies_bias_0"), val = tensor<fp32, [80]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2181312)))];
|
76 |
+
tensor<fp32, [998, 80]> mel_energies = linear(bias = mel_energies_bias_0, weight = mel_energies_3, x = spectrum)[name = tensor<string, []>("mel_energies")];
|
77 |
+
tensor<fp32, []> const_10 = const()[name = tensor<string, []>("const_10"), val = tensor<fp32, []>(0x1p-23)];
|
78 |
+
tensor<fp32, [998, 80]> var_186 = maximum(x = mel_energies, y = const_10)[name = tensor<string, []>("op_186")];
|
79 |
+
tensor<fp32, []> filter_banks_epsilon_0 = const()[name = tensor<string, []>("filter_banks_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
|
80 |
+
tensor<fp32, [998, 80]> filter_banks = log(epsilon = filter_banks_epsilon_0, x = var_186)[name = tensor<string, []>("filter_banks")];
|
81 |
+
tensor<int32, [1]> var_192_axes_0 = const()[name = tensor<string, []>("op_192_axes_0"), val = tensor<int32, [1]>([0])];
|
82 |
+
tensor<bool, []> var_192_keep_dims_0 = const()[name = tensor<string, []>("op_192_keep_dims_0"), val = tensor<bool, []>(true)];
|
83 |
+
tensor<fp32, [1, 80]> var_192 = reduce_mean(axes = var_192_axes_0, keep_dims = var_192_keep_dims_0, x = filter_banks)[name = tensor<string, []>("op_192")];
|
84 |
+
tensor<fp32, [998, 80]> var_194 = sub(x = filter_banks, y = var_192)[name = tensor<string, []>("op_194")];
|
85 |
+
tensor<int32, [1]> obj_axes_0 = const()[name = tensor<string, []>("obj_axes_0"), val = tensor<int32, [1]>([0])];
|
86 |
+
tensor<fp32, [1, 998, 80]> preprocessor_output_1_type_fp32 = expand_dims(axes = obj_axes_0, x = var_194)[name = tensor<string, []>("obj")];
|
87 |
+
tensor<string, []> cast_9_dtype_0 = const()[name = tensor<string, []>("cast_9_dtype_0"), val = tensor<string, []>("fp16")];
|
88 |
+
tensor<fp16, [1, 998, 80]> preprocessor_output_1 = cast(dtype = cast_9_dtype_0, x = preprocessor_output_1_type_fp32)[name = tensor<string, []>("cast_10")];
|
89 |
+
} -> (preprocessor_output_1);
|
90 |
+
}
|
speaker_embedder/pyannote-v3/W6A16/SpeakerEmbedderPreprocessor.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5f2c284bd22f1f7ab76901c1c6e57f82d4ebbf057fa0b924aad057f124f77a89
|
3 |
+
size 2181696
|
speaker_embedder/pyannote-v3/W8A16/LICENSE_NOTICE.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Argmax proprietary and confidential. Under NDA.
|
2 |
+
|
3 |
+
Copyright 2024 Argmax, Inc. All rights reserved.
|
4 |
+
|
5 |
+
Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
|
6 |
+
|
7 |
+
Please contact Argmax for licensing information at [email protected].
|
speaker_embedder/pyannote-v3/W8A16/README.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# License
|
2 |
+
|
3 |
+
Original model weights: https://github.com/wenet-e2e/wespeaker/blob/master/docs/pretrained.md#model-license
|
4 |
+
Argmax-optimized model asset (Assets with `.mlmodelc` extension): https://huggingface.co/argmaxinc/speakerkit-pro/blob/main/LICENSE_NOTICE.txt
|
5 |
+
|
6 |
+
Please contact [email protected] for licensing SpeakerKit Pro assets
|
speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedder.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a06be051bd0fe5990b1778044af9d0d5ab0af40f9867cb190b6384ff6417619
|
3 |
+
size 243
|
speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedder.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c354879615fe262bc5f8c92b69df8c58111d06331c5d20ddb2e0efe99ea4441c
|
3 |
+
size 370
|
speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedder.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (8 bits))",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 Γ 3 Γ 256)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 3, 256]",
|
13 |
+
"name" : "speaker_embeddings",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"modelParameters" : [
|
18 |
+
|
19 |
+
],
|
20 |
+
"specificationVersion" : 7,
|
21 |
+
"mlProgramOperationTypeHistogram" : {
|
22 |
+
"Concat" : 3,
|
23 |
+
"Ios16.mul" : 12,
|
24 |
+
"SliceByIndex" : 3,
|
25 |
+
"Ios16.constexprLutToDense" : 35,
|
26 |
+
"Transpose" : 1,
|
27 |
+
"Ios16.sub" : 6,
|
28 |
+
"Ios16.sqrt" : 3,
|
29 |
+
"Stack" : 1,
|
30 |
+
"UpsampleNearestNeighbor" : 1,
|
31 |
+
"Ios16.conv" : 36,
|
32 |
+
"Ios16.add" : 22,
|
33 |
+
"Squeeze" : 1,
|
34 |
+
"Ios16.relu" : 33,
|
35 |
+
"Ios16.realDiv" : 9,
|
36 |
+
"Ios16.reduceSum" : 12,
|
37 |
+
"ExpandDims" : 8,
|
38 |
+
"Ios16.linear" : 1,
|
39 |
+
"Ios16.reshape" : 1
|
40 |
+
},
|
41 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
42 |
+
"isUpdatable" : "0",
|
43 |
+
"stateSchema" : [
|
44 |
+
|
45 |
+
],
|
46 |
+
"availability" : {
|
47 |
+
"macOS" : "13.0",
|
48 |
+
"tvOS" : "16.0",
|
49 |
+
"visionOS" : "1.0",
|
50 |
+
"watchOS" : "9.0",
|
51 |
+
"iOS" : "16.0",
|
52 |
+
"macCatalyst" : "16.0"
|
53 |
+
},
|
54 |
+
"modelType" : {
|
55 |
+
"name" : "MLModelType_mlProgram"
|
56 |
+
},
|
57 |
+
"userDefinedMetadata" : {
|
58 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
59 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1",
|
60 |
+
"com.github.apple.coremltools.version" : "8.1"
|
61 |
+
},
|
62 |
+
"inputSchema" : [
|
63 |
+
{
|
64 |
+
"hasShapeFlexibility" : "0",
|
65 |
+
"isOptional" : "0",
|
66 |
+
"dataType" : "Float16",
|
67 |
+
"formattedType" : "MultiArray (Float16 1 Γ 998 Γ 80)",
|
68 |
+
"shortDescription" : "",
|
69 |
+
"shape" : "[1, 998, 80]",
|
70 |
+
"name" : "preprocessor_output_1",
|
71 |
+
"type" : "MultiArray"
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"hasShapeFlexibility" : "0",
|
75 |
+
"isOptional" : "0",
|
76 |
+
"dataType" : "Float16",
|
77 |
+
"formattedType" : "MultiArray (Float16 1 Γ 3 Γ 589)",
|
78 |
+
"shortDescription" : "",
|
79 |
+
"shape" : "[1, 3, 589]",
|
80 |
+
"name" : "speaker_masks",
|
81 |
+
"type" : "MultiArray"
|
82 |
+
}
|
83 |
+
],
|
84 |
+
"generatedClassName" : "SpeakerEmbedding_8_bit",
|
85 |
+
"method" : "predict"
|
86 |
+
}
|
87 |
+
]
|
speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedder.mlmodelc/model.mil
ADDED
@@ -0,0 +1,473 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [1, 998, 80]> preprocessor_output_1, tensor<fp16, [1, 3, 589]> speaker_masks) {
|
5 |
+
tensor<int32, []> var_12 = const()[name = tensor<string, []>("op_12"), val = tensor<int32, []>(1)];
|
6 |
+
tensor<int32, [3]> var_22 = const()[name = tensor<string, []>("op_22"), val = tensor<int32, [3]>([0, 2, 1])];
|
7 |
+
tensor<int32, [1]> input_1_axes_0 = const()[name = tensor<string, []>("input_1_axes_0"), val = tensor<int32, [1]>([1])];
|
8 |
+
tensor<fp16, [1, 80, 998]> fbank_cast_fp16 = transpose(perm = var_22, x = preprocessor_output_1)[name = tensor<string, []>("transpose_0")];
|
9 |
+
tensor<fp16, [1, 1, 80, 998]> input_1_cast_fp16 = expand_dims(axes = input_1_axes_0, x = fbank_cast_fp16)[name = tensor<string, []>("input_1_cast_fp16")];
|
10 |
+
tensor<string, []> input_3_pad_type_0 = const()[name = tensor<string, []>("input_3_pad_type_0"), val = tensor<string, []>("custom")];
|
11 |
+
tensor<int32, [4]> input_3_pad_0 = const()[name = tensor<string, []>("input_3_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
12 |
+
tensor<int32, [2]> input_3_strides_0 = const()[name = tensor<string, []>("input_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
13 |
+
tensor<int32, [2]> input_3_dilations_0 = const()[name = tensor<string, []>("input_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
14 |
+
tensor<int32, []> input_3_groups_0 = const()[name = tensor<string, []>("input_3_groups_0"), val = tensor<int32, []>(1)];
|
15 |
+
tensor<fp16, [32, 1, 3, 3]> const_5_to_fp16 = const()[name = tensor<string, []>("const_5_to_fp16"), val = tensor<fp16, [32, 1, 3, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
16 |
+
tensor<fp16, [32]> const_6_to_fp16 = const()[name = tensor<string, []>("const_6_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(704)))];
|
17 |
+
tensor<fp16, [1, 32, 80, 998]> input_5_cast_fp16 = conv(bias = const_6_to_fp16, dilations = input_3_dilations_0, groups = input_3_groups_0, pad = input_3_pad_0, pad_type = input_3_pad_type_0, strides = input_3_strides_0, weight = const_5_to_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
18 |
+
tensor<fp16, [1, 32, 80, 998]> input_7_cast_fp16 = relu(x = input_5_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
19 |
+
tensor<string, []> input_9_pad_type_0 = const()[name = tensor<string, []>("input_9_pad_type_0"), val = tensor<string, []>("custom")];
|
20 |
+
tensor<int32, [4]> input_9_pad_0 = const()[name = tensor<string, []>("input_9_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
21 |
+
tensor<int32, [2]> input_9_strides_0 = const()[name = tensor<string, []>("input_9_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
22 |
+
tensor<int32, [2]> input_9_dilations_0 = const()[name = tensor<string, []>("input_9_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
23 |
+
tensor<int32, []> input_9_groups_0 = const()[name = tensor<string, []>("input_9_groups_0"), val = tensor<int32, []>(1)];
|
24 |
+
tensor<fp16, [32, 32, 3, 3]> const_7_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [9216]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(832))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10112))), name = tensor<string, []>("const_7_to_fp16_palettized"), shape = tensor<uint32, [4]>([32, 32, 3, 3])];
|
25 |
+
tensor<fp16, [32]> const_8_to_fp16 = const()[name = tensor<string, []>("const_8_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10688)))];
|
26 |
+
tensor<fp16, [1, 32, 80, 998]> input_11_cast_fp16 = conv(bias = const_8_to_fp16, dilations = input_9_dilations_0, groups = input_9_groups_0, pad = input_9_pad_0, pad_type = input_9_pad_type_0, strides = input_9_strides_0, weight = const_7_to_fp16_palettized, x = input_7_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
27 |
+
tensor<fp16, [1, 32, 80, 998]> input_13_cast_fp16 = relu(x = input_11_cast_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
|
28 |
+
tensor<string, []> input_15_pad_type_0 = const()[name = tensor<string, []>("input_15_pad_type_0"), val = tensor<string, []>("custom")];
|
29 |
+
tensor<int32, [4]> input_15_pad_0 = const()[name = tensor<string, []>("input_15_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
30 |
+
tensor<int32, [2]> input_15_strides_0 = const()[name = tensor<string, []>("input_15_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
31 |
+
tensor<int32, [2]> input_15_dilations_0 = const()[name = tensor<string, []>("input_15_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
32 |
+
tensor<int32, []> input_15_groups_0 = const()[name = tensor<string, []>("input_15_groups_0"), val = tensor<int32, []>(1)];
|
33 |
+
tensor<fp16, [32, 32, 3, 3]> const_9_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [9216]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10816))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(20096))), name = tensor<string, []>("const_9_to_fp16_palettized"), shape = tensor<uint32, [4]>([32, 32, 3, 3])];
|
34 |
+
tensor<fp16, [32]> const_10_to_fp16 = const()[name = tensor<string, []>("const_10_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(20672)))];
|
35 |
+
tensor<fp16, [1, 32, 80, 998]> out_1_cast_fp16 = conv(bias = const_10_to_fp16, dilations = input_15_dilations_0, groups = input_15_groups_0, pad = input_15_pad_0, pad_type = input_15_pad_type_0, strides = input_15_strides_0, weight = const_9_to_fp16_palettized, x = input_13_cast_fp16)[name = tensor<string, []>("out_1_cast_fp16")];
|
36 |
+
tensor<fp16, [1, 32, 80, 998]> input_17_cast_fp16 = add(x = out_1_cast_fp16, y = input_7_cast_fp16)[name = tensor<string, []>("input_17_cast_fp16")];
|
37 |
+
tensor<fp16, [1, 32, 80, 998]> input_19_cast_fp16 = relu(x = input_17_cast_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
|
38 |
+
tensor<string, []> input_21_pad_type_0 = const()[name = tensor<string, []>("input_21_pad_type_0"), val = tensor<string, []>("custom")];
|
39 |
+
tensor<int32, [4]> input_21_pad_0 = const()[name = tensor<string, []>("input_21_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
40 |
+
tensor<int32, [2]> input_21_strides_0 = const()[name = tensor<string, []>("input_21_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
41 |
+
tensor<int32, [2]> input_21_dilations_0 = const()[name = tensor<string, []>("input_21_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
42 |
+
tensor<int32, []> input_21_groups_0 = const()[name = tensor<string, []>("input_21_groups_0"), val = tensor<int32, []>(1)];
|
43 |
+
tensor<fp16, [32, 32, 3, 3]> const_11_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [9216]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(20800))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30080))), name = tensor<string, []>("const_11_to_fp16_palettized"), shape = tensor<uint32, [4]>([32, 32, 3, 3])];
|
44 |
+
tensor<fp16, [32]> const_12_to_fp16 = const()[name = tensor<string, []>("const_12_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30656)))];
|
45 |
+
tensor<fp16, [1, 32, 80, 998]> input_23_cast_fp16 = conv(bias = const_12_to_fp16, dilations = input_21_dilations_0, groups = input_21_groups_0, pad = input_21_pad_0, pad_type = input_21_pad_type_0, strides = input_21_strides_0, weight = const_11_to_fp16_palettized, x = input_19_cast_fp16)[name = tensor<string, []>("input_23_cast_fp16")];
|
46 |
+
tensor<fp16, [1, 32, 80, 998]> input_25_cast_fp16 = relu(x = input_23_cast_fp16)[name = tensor<string, []>("input_25_cast_fp16")];
|
47 |
+
tensor<string, []> input_27_pad_type_0 = const()[name = tensor<string, []>("input_27_pad_type_0"), val = tensor<string, []>("custom")];
|
48 |
+
tensor<int32, [4]> input_27_pad_0 = const()[name = tensor<string, []>("input_27_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
49 |
+
tensor<int32, [2]> input_27_strides_0 = const()[name = tensor<string, []>("input_27_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
50 |
+
tensor<int32, [2]> input_27_dilations_0 = const()[name = tensor<string, []>("input_27_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
51 |
+
tensor<int32, []> input_27_groups_0 = const()[name = tensor<string, []>("input_27_groups_0"), val = tensor<int32, []>(1)];
|
52 |
+
tensor<fp16, [32, 32, 3, 3]> const_13_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [9216]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(30784))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40064))), name = tensor<string, []>("const_13_to_fp16_palettized"), shape = tensor<uint32, [4]>([32, 32, 3, 3])];
|
53 |
+
tensor<fp16, [32]> const_14_to_fp16 = const()[name = tensor<string, []>("const_14_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40640)))];
|
54 |
+
tensor<fp16, [1, 32, 80, 998]> out_3_cast_fp16 = conv(bias = const_14_to_fp16, dilations = input_27_dilations_0, groups = input_27_groups_0, pad = input_27_pad_0, pad_type = input_27_pad_type_0, strides = input_27_strides_0, weight = const_13_to_fp16_palettized, x = input_25_cast_fp16)[name = tensor<string, []>("out_3_cast_fp16")];
|
55 |
+
tensor<fp16, [1, 32, 80, 998]> input_29_cast_fp16 = add(x = out_3_cast_fp16, y = input_19_cast_fp16)[name = tensor<string, []>("input_29_cast_fp16")];
|
56 |
+
tensor<fp16, [1, 32, 80, 998]> input_31_cast_fp16 = relu(x = input_29_cast_fp16)[name = tensor<string, []>("input_31_cast_fp16")];
|
57 |
+
tensor<string, []> input_33_pad_type_0 = const()[name = tensor<string, []>("input_33_pad_type_0"), val = tensor<string, []>("custom")];
|
58 |
+
tensor<int32, [4]> input_33_pad_0 = const()[name = tensor<string, []>("input_33_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
59 |
+
tensor<int32, [2]> input_33_strides_0 = const()[name = tensor<string, []>("input_33_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
60 |
+
tensor<int32, [2]> input_33_dilations_0 = const()[name = tensor<string, []>("input_33_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
61 |
+
tensor<int32, []> input_33_groups_0 = const()[name = tensor<string, []>("input_33_groups_0"), val = tensor<int32, []>(1)];
|
62 |
+
tensor<fp16, [32, 32, 3, 3]> const_15_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [9216]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(40768))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50048))), name = tensor<string, []>("const_15_to_fp16_palettized"), shape = tensor<uint32, [4]>([32, 32, 3, 3])];
|
63 |
+
tensor<fp16, [32]> const_16_to_fp16 = const()[name = tensor<string, []>("const_16_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50624)))];
|
64 |
+
tensor<fp16, [1, 32, 80, 998]> input_35_cast_fp16 = conv(bias = const_16_to_fp16, dilations = input_33_dilations_0, groups = input_33_groups_0, pad = input_33_pad_0, pad_type = input_33_pad_type_0, strides = input_33_strides_0, weight = const_15_to_fp16_palettized, x = input_31_cast_fp16)[name = tensor<string, []>("input_35_cast_fp16")];
|
65 |
+
tensor<fp16, [1, 32, 80, 998]> input_37_cast_fp16 = relu(x = input_35_cast_fp16)[name = tensor<string, []>("input_37_cast_fp16")];
|
66 |
+
tensor<string, []> input_39_pad_type_0 = const()[name = tensor<string, []>("input_39_pad_type_0"), val = tensor<string, []>("custom")];
|
67 |
+
tensor<int32, [4]> input_39_pad_0 = const()[name = tensor<string, []>("input_39_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
68 |
+
tensor<int32, [2]> input_39_strides_0 = const()[name = tensor<string, []>("input_39_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
69 |
+
tensor<int32, [2]> input_39_dilations_0 = const()[name = tensor<string, []>("input_39_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
70 |
+
tensor<int32, []> input_39_groups_0 = const()[name = tensor<string, []>("input_39_groups_0"), val = tensor<int32, []>(1)];
|
71 |
+
tensor<fp16, [32, 32, 3, 3]> const_17_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [9216]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(50752))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(60032))), name = tensor<string, []>("const_17_to_fp16_palettized"), shape = tensor<uint32, [4]>([32, 32, 3, 3])];
|
72 |
+
tensor<fp16, [32]> const_18_to_fp16 = const()[name = tensor<string, []>("const_18_to_fp16"), val = tensor<fp16, [32]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(60608)))];
|
73 |
+
tensor<fp16, [1, 32, 80, 998]> out_5_cast_fp16 = conv(bias = const_18_to_fp16, dilations = input_39_dilations_0, groups = input_39_groups_0, pad = input_39_pad_0, pad_type = input_39_pad_type_0, strides = input_39_strides_0, weight = const_17_to_fp16_palettized, x = input_37_cast_fp16)[name = tensor<string, []>("out_5_cast_fp16")];
|
74 |
+
tensor<fp16, [1, 32, 80, 998]> input_41_cast_fp16 = add(x = out_5_cast_fp16, y = input_31_cast_fp16)[name = tensor<string, []>("input_41_cast_fp16")];
|
75 |
+
tensor<fp16, [1, 32, 80, 998]> input_43_cast_fp16 = relu(x = input_41_cast_fp16)[name = tensor<string, []>("input_43_cast_fp16")];
|
76 |
+
tensor<string, []> input_45_pad_type_0 = const()[name = tensor<string, []>("input_45_pad_type_0"), val = tensor<string, []>("custom")];
|
77 |
+
tensor<int32, [4]> input_45_pad_0 = const()[name = tensor<string, []>("input_45_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
78 |
+
tensor<int32, [2]> input_45_strides_0 = const()[name = tensor<string, []>("input_45_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
79 |
+
tensor<int32, [2]> input_45_dilations_0 = const()[name = tensor<string, []>("input_45_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
80 |
+
tensor<int32, []> input_45_groups_0 = const()[name = tensor<string, []>("input_45_groups_0"), val = tensor<int32, []>(1)];
|
81 |
+
tensor<fp16, [64, 32, 3, 3]> const_19_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [18432]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(60736))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(79232))), name = tensor<string, []>("const_19_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 32, 3, 3])];
|
82 |
+
tensor<fp16, [64]> const_20_to_fp16 = const()[name = tensor<string, []>("const_20_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(79808)))];
|
83 |
+
tensor<fp16, [1, 64, 40, 499]> input_47_cast_fp16 = conv(bias = const_20_to_fp16, dilations = input_45_dilations_0, groups = input_45_groups_0, pad = input_45_pad_0, pad_type = input_45_pad_type_0, strides = input_45_strides_0, weight = const_19_to_fp16_palettized, x = input_43_cast_fp16)[name = tensor<string, []>("input_47_cast_fp16")];
|
84 |
+
tensor<fp16, [1, 64, 40, 499]> input_49_cast_fp16 = relu(x = input_47_cast_fp16)[name = tensor<string, []>("input_49_cast_fp16")];
|
85 |
+
tensor<string, []> input_51_pad_type_0 = const()[name = tensor<string, []>("input_51_pad_type_0"), val = tensor<string, []>("custom")];
|
86 |
+
tensor<int32, [4]> input_51_pad_0 = const()[name = tensor<string, []>("input_51_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
87 |
+
tensor<int32, [2]> input_51_strides_0 = const()[name = tensor<string, []>("input_51_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
88 |
+
tensor<int32, [2]> input_51_dilations_0 = const()[name = tensor<string, []>("input_51_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
89 |
+
tensor<int32, []> input_51_groups_0 = const()[name = tensor<string, []>("input_51_groups_0"), val = tensor<int32, []>(1)];
|
90 |
+
tensor<fp16, [64, 64, 3, 3]> const_21_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [36864]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(80000))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(116928))), name = tensor<string, []>("const_21_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
91 |
+
tensor<fp16, [64]> const_22_to_fp16 = const()[name = tensor<string, []>("const_22_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117504)))];
|
92 |
+
tensor<fp16, [1, 64, 40, 499]> out_7_cast_fp16 = conv(bias = const_22_to_fp16, dilations = input_51_dilations_0, groups = input_51_groups_0, pad = input_51_pad_0, pad_type = input_51_pad_type_0, strides = input_51_strides_0, weight = const_21_to_fp16_palettized, x = input_49_cast_fp16)[name = tensor<string, []>("out_7_cast_fp16")];
|
93 |
+
tensor<string, []> input_53_pad_type_0 = const()[name = tensor<string, []>("input_53_pad_type_0"), val = tensor<string, []>("valid")];
|
94 |
+
tensor<int32, [2]> input_53_strides_0 = const()[name = tensor<string, []>("input_53_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
95 |
+
tensor<int32, [4]> input_53_pad_0 = const()[name = tensor<string, []>("input_53_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
96 |
+
tensor<int32, [2]> input_53_dilations_0 = const()[name = tensor<string, []>("input_53_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
97 |
+
tensor<int32, []> input_53_groups_0 = const()[name = tensor<string, []>("input_53_groups_0"), val = tensor<int32, []>(1)];
|
98 |
+
tensor<fp16, [64, 32, 1, 1]> const_23_to_fp16 = const()[name = tensor<string, []>("const_23_to_fp16"), val = tensor<fp16, [64, 32, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(117696)))];
|
99 |
+
tensor<fp16, [64]> const_24_to_fp16 = const()[name = tensor<string, []>("const_24_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(121856)))];
|
100 |
+
tensor<fp16, [1, 64, 40, 499]> var_171_cast_fp16 = conv(bias = const_24_to_fp16, dilations = input_53_dilations_0, groups = input_53_groups_0, pad = input_53_pad_0, pad_type = input_53_pad_type_0, strides = input_53_strides_0, weight = const_23_to_fp16, x = input_43_cast_fp16)[name = tensor<string, []>("op_171_cast_fp16")];
|
101 |
+
tensor<fp16, [1, 64, 40, 499]> input_55_cast_fp16 = add(x = out_7_cast_fp16, y = var_171_cast_fp16)[name = tensor<string, []>("input_55_cast_fp16")];
|
102 |
+
tensor<fp16, [1, 64, 40, 499]> input_57_cast_fp16 = relu(x = input_55_cast_fp16)[name = tensor<string, []>("input_57_cast_fp16")];
|
103 |
+
tensor<string, []> input_59_pad_type_0 = const()[name = tensor<string, []>("input_59_pad_type_0"), val = tensor<string, []>("custom")];
|
104 |
+
tensor<int32, [4]> input_59_pad_0 = const()[name = tensor<string, []>("input_59_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
105 |
+
tensor<int32, [2]> input_59_strides_0 = const()[name = tensor<string, []>("input_59_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
106 |
+
tensor<int32, [2]> input_59_dilations_0 = const()[name = tensor<string, []>("input_59_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
107 |
+
tensor<int32, []> input_59_groups_0 = const()[name = tensor<string, []>("input_59_groups_0"), val = tensor<int32, []>(1)];
|
108 |
+
tensor<fp16, [64, 64, 3, 3]> const_25_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [36864]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(122048))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(158976))), name = tensor<string, []>("const_25_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
109 |
+
tensor<fp16, [64]> const_26_to_fp16 = const()[name = tensor<string, []>("const_26_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(159552)))];
|
110 |
+
tensor<fp16, [1, 64, 40, 499]> input_61_cast_fp16 = conv(bias = const_26_to_fp16, dilations = input_59_dilations_0, groups = input_59_groups_0, pad = input_59_pad_0, pad_type = input_59_pad_type_0, strides = input_59_strides_0, weight = const_25_to_fp16_palettized, x = input_57_cast_fp16)[name = tensor<string, []>("input_61_cast_fp16")];
|
111 |
+
tensor<fp16, [1, 64, 40, 499]> input_63_cast_fp16 = relu(x = input_61_cast_fp16)[name = tensor<string, []>("input_63_cast_fp16")];
|
112 |
+
tensor<string, []> input_65_pad_type_0 = const()[name = tensor<string, []>("input_65_pad_type_0"), val = tensor<string, []>("custom")];
|
113 |
+
tensor<int32, [4]> input_65_pad_0 = const()[name = tensor<string, []>("input_65_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
114 |
+
tensor<int32, [2]> input_65_strides_0 = const()[name = tensor<string, []>("input_65_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
115 |
+
tensor<int32, [2]> input_65_dilations_0 = const()[name = tensor<string, []>("input_65_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
116 |
+
tensor<int32, []> input_65_groups_0 = const()[name = tensor<string, []>("input_65_groups_0"), val = tensor<int32, []>(1)];
|
117 |
+
tensor<fp16, [64, 64, 3, 3]> const_27_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [36864]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(159744))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(196672))), name = tensor<string, []>("const_27_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
118 |
+
tensor<fp16, [64]> const_28_to_fp16 = const()[name = tensor<string, []>("const_28_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(197248)))];
|
119 |
+
tensor<fp16, [1, 64, 40, 499]> out_9_cast_fp16 = conv(bias = const_28_to_fp16, dilations = input_65_dilations_0, groups = input_65_groups_0, pad = input_65_pad_0, pad_type = input_65_pad_type_0, strides = input_65_strides_0, weight = const_27_to_fp16_palettized, x = input_63_cast_fp16)[name = tensor<string, []>("out_9_cast_fp16")];
|
120 |
+
tensor<fp16, [1, 64, 40, 499]> input_67_cast_fp16 = add(x = out_9_cast_fp16, y = input_57_cast_fp16)[name = tensor<string, []>("input_67_cast_fp16")];
|
121 |
+
tensor<fp16, [1, 64, 40, 499]> input_69_cast_fp16 = relu(x = input_67_cast_fp16)[name = tensor<string, []>("input_69_cast_fp16")];
|
122 |
+
tensor<string, []> input_71_pad_type_0 = const()[name = tensor<string, []>("input_71_pad_type_0"), val = tensor<string, []>("custom")];
|
123 |
+
tensor<int32, [4]> input_71_pad_0 = const()[name = tensor<string, []>("input_71_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
124 |
+
tensor<int32, [2]> input_71_strides_0 = const()[name = tensor<string, []>("input_71_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
125 |
+
tensor<int32, [2]> input_71_dilations_0 = const()[name = tensor<string, []>("input_71_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
126 |
+
tensor<int32, []> input_71_groups_0 = const()[name = tensor<string, []>("input_71_groups_0"), val = tensor<int32, []>(1)];
|
127 |
+
tensor<fp16, [64, 64, 3, 3]> const_29_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [36864]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(197440))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(234368))), name = tensor<string, []>("const_29_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
128 |
+
tensor<fp16, [64]> const_30_to_fp16 = const()[name = tensor<string, []>("const_30_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(234944)))];
|
129 |
+
tensor<fp16, [1, 64, 40, 499]> input_73_cast_fp16 = conv(bias = const_30_to_fp16, dilations = input_71_dilations_0, groups = input_71_groups_0, pad = input_71_pad_0, pad_type = input_71_pad_type_0, strides = input_71_strides_0, weight = const_29_to_fp16_palettized, x = input_69_cast_fp16)[name = tensor<string, []>("input_73_cast_fp16")];
|
130 |
+
tensor<fp16, [1, 64, 40, 499]> input_75_cast_fp16 = relu(x = input_73_cast_fp16)[name = tensor<string, []>("input_75_cast_fp16")];
|
131 |
+
tensor<string, []> input_77_pad_type_0 = const()[name = tensor<string, []>("input_77_pad_type_0"), val = tensor<string, []>("custom")];
|
132 |
+
tensor<int32, [4]> input_77_pad_0 = const()[name = tensor<string, []>("input_77_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
133 |
+
tensor<int32, [2]> input_77_strides_0 = const()[name = tensor<string, []>("input_77_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
134 |
+
tensor<int32, [2]> input_77_dilations_0 = const()[name = tensor<string, []>("input_77_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
135 |
+
tensor<int32, []> input_77_groups_0 = const()[name = tensor<string, []>("input_77_groups_0"), val = tensor<int32, []>(1)];
|
136 |
+
tensor<fp16, [64, 64, 3, 3]> const_31_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [36864]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(235136))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(272064))), name = tensor<string, []>("const_31_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
137 |
+
tensor<fp16, [64]> const_32_to_fp16 = const()[name = tensor<string, []>("const_32_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(272640)))];
|
138 |
+
tensor<fp16, [1, 64, 40, 499]> out_11_cast_fp16 = conv(bias = const_32_to_fp16, dilations = input_77_dilations_0, groups = input_77_groups_0, pad = input_77_pad_0, pad_type = input_77_pad_type_0, strides = input_77_strides_0, weight = const_31_to_fp16_palettized, x = input_75_cast_fp16)[name = tensor<string, []>("out_11_cast_fp16")];
|
139 |
+
tensor<fp16, [1, 64, 40, 499]> input_79_cast_fp16 = add(x = out_11_cast_fp16, y = input_69_cast_fp16)[name = tensor<string, []>("input_79_cast_fp16")];
|
140 |
+
tensor<fp16, [1, 64, 40, 499]> input_81_cast_fp16 = relu(x = input_79_cast_fp16)[name = tensor<string, []>("input_81_cast_fp16")];
|
141 |
+
tensor<string, []> input_83_pad_type_0 = const()[name = tensor<string, []>("input_83_pad_type_0"), val = tensor<string, []>("custom")];
|
142 |
+
tensor<int32, [4]> input_83_pad_0 = const()[name = tensor<string, []>("input_83_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
143 |
+
tensor<int32, [2]> input_83_strides_0 = const()[name = tensor<string, []>("input_83_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
144 |
+
tensor<int32, [2]> input_83_dilations_0 = const()[name = tensor<string, []>("input_83_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
145 |
+
tensor<int32, []> input_83_groups_0 = const()[name = tensor<string, []>("input_83_groups_0"), val = tensor<int32, []>(1)];
|
146 |
+
tensor<fp16, [64, 64, 3, 3]> const_33_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [36864]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(272832))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(309760))), name = tensor<string, []>("const_33_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
147 |
+
tensor<fp16, [64]> const_34_to_fp16 = const()[name = tensor<string, []>("const_34_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(310336)))];
|
148 |
+
tensor<fp16, [1, 64, 40, 499]> input_85_cast_fp16 = conv(bias = const_34_to_fp16, dilations = input_83_dilations_0, groups = input_83_groups_0, pad = input_83_pad_0, pad_type = input_83_pad_type_0, strides = input_83_strides_0, weight = const_33_to_fp16_palettized, x = input_81_cast_fp16)[name = tensor<string, []>("input_85_cast_fp16")];
|
149 |
+
tensor<fp16, [1, 64, 40, 499]> input_87_cast_fp16 = relu(x = input_85_cast_fp16)[name = tensor<string, []>("input_87_cast_fp16")];
|
150 |
+
tensor<string, []> input_89_pad_type_0 = const()[name = tensor<string, []>("input_89_pad_type_0"), val = tensor<string, []>("custom")];
|
151 |
+
tensor<int32, [4]> input_89_pad_0 = const()[name = tensor<string, []>("input_89_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
152 |
+
tensor<int32, [2]> input_89_strides_0 = const()[name = tensor<string, []>("input_89_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
153 |
+
tensor<int32, [2]> input_89_dilations_0 = const()[name = tensor<string, []>("input_89_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
154 |
+
tensor<int32, []> input_89_groups_0 = const()[name = tensor<string, []>("input_89_groups_0"), val = tensor<int32, []>(1)];
|
155 |
+
tensor<fp16, [64, 64, 3, 3]> const_35_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [36864]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(310528))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(347456))), name = tensor<string, []>("const_35_to_fp16_palettized"), shape = tensor<uint32, [4]>([64, 64, 3, 3])];
|
156 |
+
tensor<fp16, [64]> const_36_to_fp16 = const()[name = tensor<string, []>("const_36_to_fp16"), val = tensor<fp16, [64]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(348032)))];
|
157 |
+
tensor<fp16, [1, 64, 40, 499]> out_13_cast_fp16 = conv(bias = const_36_to_fp16, dilations = input_89_dilations_0, groups = input_89_groups_0, pad = input_89_pad_0, pad_type = input_89_pad_type_0, strides = input_89_strides_0, weight = const_35_to_fp16_palettized, x = input_87_cast_fp16)[name = tensor<string, []>("out_13_cast_fp16")];
|
158 |
+
tensor<fp16, [1, 64, 40, 499]> input_91_cast_fp16 = add(x = out_13_cast_fp16, y = input_81_cast_fp16)[name = tensor<string, []>("input_91_cast_fp16")];
|
159 |
+
tensor<fp16, [1, 64, 40, 499]> input_93_cast_fp16 = relu(x = input_91_cast_fp16)[name = tensor<string, []>("input_93_cast_fp16")];
|
160 |
+
tensor<string, []> input_95_pad_type_0 = const()[name = tensor<string, []>("input_95_pad_type_0"), val = tensor<string, []>("custom")];
|
161 |
+
tensor<int32, [4]> input_95_pad_0 = const()[name = tensor<string, []>("input_95_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
162 |
+
tensor<int32, [2]> input_95_strides_0 = const()[name = tensor<string, []>("input_95_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
163 |
+
tensor<int32, [2]> input_95_dilations_0 = const()[name = tensor<string, []>("input_95_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
164 |
+
tensor<int32, []> input_95_groups_0 = const()[name = tensor<string, []>("input_95_groups_0"), val = tensor<int32, []>(1)];
|
165 |
+
tensor<fp16, [128, 64, 3, 3]> const_37_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [73728]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(348224))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(422016))), name = tensor<string, []>("const_37_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 64, 3, 3])];
|
166 |
+
tensor<fp16, [128]> const_38_to_fp16 = const()[name = tensor<string, []>("const_38_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(422592)))];
|
167 |
+
tensor<fp16, [1, 128, 20, 250]> input_97_cast_fp16 = conv(bias = const_38_to_fp16, dilations = input_95_dilations_0, groups = input_95_groups_0, pad = input_95_pad_0, pad_type = input_95_pad_type_0, strides = input_95_strides_0, weight = const_37_to_fp16_palettized, x = input_93_cast_fp16)[name = tensor<string, []>("input_97_cast_fp16")];
|
168 |
+
tensor<fp16, [1, 128, 20, 250]> input_99_cast_fp16 = relu(x = input_97_cast_fp16)[name = tensor<string, []>("input_99_cast_fp16")];
|
169 |
+
tensor<string, []> input_101_pad_type_0 = const()[name = tensor<string, []>("input_101_pad_type_0"), val = tensor<string, []>("custom")];
|
170 |
+
tensor<int32, [4]> input_101_pad_0 = const()[name = tensor<string, []>("input_101_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
171 |
+
tensor<int32, [2]> input_101_strides_0 = const()[name = tensor<string, []>("input_101_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
172 |
+
tensor<int32, [2]> input_101_dilations_0 = const()[name = tensor<string, []>("input_101_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
173 |
+
tensor<int32, []> input_101_groups_0 = const()[name = tensor<string, []>("input_101_groups_0"), val = tensor<int32, []>(1)];
|
174 |
+
tensor<fp16, [128, 128, 3, 3]> const_39_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [147456]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(422912))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(570432))), name = tensor<string, []>("const_39_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
175 |
+
tensor<fp16, [128]> const_40_to_fp16 = const()[name = tensor<string, []>("const_40_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(571008)))];
|
176 |
+
tensor<fp16, [1, 128, 20, 250]> out_15_cast_fp16 = conv(bias = const_40_to_fp16, dilations = input_101_dilations_0, groups = input_101_groups_0, pad = input_101_pad_0, pad_type = input_101_pad_type_0, strides = input_101_strides_0, weight = const_39_to_fp16_palettized, x = input_99_cast_fp16)[name = tensor<string, []>("out_15_cast_fp16")];
|
177 |
+
tensor<string, []> input_103_pad_type_0 = const()[name = tensor<string, []>("input_103_pad_type_0"), val = tensor<string, []>("valid")];
|
178 |
+
tensor<int32, [2]> input_103_strides_0 = const()[name = tensor<string, []>("input_103_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
179 |
+
tensor<int32, [4]> input_103_pad_0 = const()[name = tensor<string, []>("input_103_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
180 |
+
tensor<int32, [2]> input_103_dilations_0 = const()[name = tensor<string, []>("input_103_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
181 |
+
tensor<int32, []> input_103_groups_0 = const()[name = tensor<string, []>("input_103_groups_0"), val = tensor<int32, []>(1)];
|
182 |
+
tensor<fp16, [128, 64, 1, 1]> const_41_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [8192]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(571328))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(579584))), name = tensor<string, []>("const_41_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 64, 1, 1])];
|
183 |
+
tensor<fp16, [128]> const_42_to_fp16 = const()[name = tensor<string, []>("const_42_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(580160)))];
|
184 |
+
tensor<fp16, [1, 128, 20, 250]> var_307_cast_fp16 = conv(bias = const_42_to_fp16, dilations = input_103_dilations_0, groups = input_103_groups_0, pad = input_103_pad_0, pad_type = input_103_pad_type_0, strides = input_103_strides_0, weight = const_41_to_fp16_palettized, x = input_93_cast_fp16)[name = tensor<string, []>("op_307_cast_fp16")];
|
185 |
+
tensor<fp16, [1, 128, 20, 250]> input_105_cast_fp16 = add(x = out_15_cast_fp16, y = var_307_cast_fp16)[name = tensor<string, []>("input_105_cast_fp16")];
|
186 |
+
tensor<fp16, [1, 128, 20, 250]> input_107_cast_fp16 = relu(x = input_105_cast_fp16)[name = tensor<string, []>("input_107_cast_fp16")];
|
187 |
+
tensor<string, []> input_109_pad_type_0 = const()[name = tensor<string, []>("input_109_pad_type_0"), val = tensor<string, []>("custom")];
|
188 |
+
tensor<int32, [4]> input_109_pad_0 = const()[name = tensor<string, []>("input_109_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
189 |
+
tensor<int32, [2]> input_109_strides_0 = const()[name = tensor<string, []>("input_109_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
190 |
+
tensor<int32, [2]> input_109_dilations_0 = const()[name = tensor<string, []>("input_109_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
191 |
+
tensor<int32, []> input_109_groups_0 = const()[name = tensor<string, []>("input_109_groups_0"), val = tensor<int32, []>(1)];
|
192 |
+
tensor<fp16, [128, 128, 3, 3]> const_43_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [147456]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(580480))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(728000))), name = tensor<string, []>("const_43_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
193 |
+
tensor<fp16, [128]> const_44_to_fp16 = const()[name = tensor<string, []>("const_44_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(728576)))];
|
194 |
+
tensor<fp16, [1, 128, 20, 250]> input_111_cast_fp16 = conv(bias = const_44_to_fp16, dilations = input_109_dilations_0, groups = input_109_groups_0, pad = input_109_pad_0, pad_type = input_109_pad_type_0, strides = input_109_strides_0, weight = const_43_to_fp16_palettized, x = input_107_cast_fp16)[name = tensor<string, []>("input_111_cast_fp16")];
|
195 |
+
tensor<fp16, [1, 128, 20, 250]> input_113_cast_fp16 = relu(x = input_111_cast_fp16)[name = tensor<string, []>("input_113_cast_fp16")];
|
196 |
+
tensor<string, []> input_115_pad_type_0 = const()[name = tensor<string, []>("input_115_pad_type_0"), val = tensor<string, []>("custom")];
|
197 |
+
tensor<int32, [4]> input_115_pad_0 = const()[name = tensor<string, []>("input_115_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
198 |
+
tensor<int32, [2]> input_115_strides_0 = const()[name = tensor<string, []>("input_115_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
199 |
+
tensor<int32, [2]> input_115_dilations_0 = const()[name = tensor<string, []>("input_115_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
200 |
+
tensor<int32, []> input_115_groups_0 = const()[name = tensor<string, []>("input_115_groups_0"), val = tensor<int32, []>(1)];
|
201 |
+
tensor<fp16, [128, 128, 3, 3]> const_45_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [147456]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(728896))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(876416))), name = tensor<string, []>("const_45_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
202 |
+
tensor<fp16, [128]> const_46_to_fp16 = const()[name = tensor<string, []>("const_46_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(876992)))];
|
203 |
+
tensor<fp16, [1, 128, 20, 250]> out_17_cast_fp16 = conv(bias = const_46_to_fp16, dilations = input_115_dilations_0, groups = input_115_groups_0, pad = input_115_pad_0, pad_type = input_115_pad_type_0, strides = input_115_strides_0, weight = const_45_to_fp16_palettized, x = input_113_cast_fp16)[name = tensor<string, []>("out_17_cast_fp16")];
|
204 |
+
tensor<fp16, [1, 128, 20, 250]> input_117_cast_fp16 = add(x = out_17_cast_fp16, y = input_107_cast_fp16)[name = tensor<string, []>("input_117_cast_fp16")];
|
205 |
+
tensor<fp16, [1, 128, 20, 250]> input_119_cast_fp16 = relu(x = input_117_cast_fp16)[name = tensor<string, []>("input_119_cast_fp16")];
|
206 |
+
tensor<string, []> input_121_pad_type_0 = const()[name = tensor<string, []>("input_121_pad_type_0"), val = tensor<string, []>("custom")];
|
207 |
+
tensor<int32, [4]> input_121_pad_0 = const()[name = tensor<string, []>("input_121_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
208 |
+
tensor<int32, [2]> input_121_strides_0 = const()[name = tensor<string, []>("input_121_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
209 |
+
tensor<int32, [2]> input_121_dilations_0 = const()[name = tensor<string, []>("input_121_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
210 |
+
tensor<int32, []> input_121_groups_0 = const()[name = tensor<string, []>("input_121_groups_0"), val = tensor<int32, []>(1)];
|
211 |
+
tensor<fp16, [128, 128, 3, 3]> const_47_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [147456]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(877312))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1024832))), name = tensor<string, []>("const_47_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
212 |
+
tensor<fp16, [128]> const_48_to_fp16 = const()[name = tensor<string, []>("const_48_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1025408)))];
|
213 |
+
tensor<fp16, [1, 128, 20, 250]> input_123_cast_fp16 = conv(bias = const_48_to_fp16, dilations = input_121_dilations_0, groups = input_121_groups_0, pad = input_121_pad_0, pad_type = input_121_pad_type_0, strides = input_121_strides_0, weight = const_47_to_fp16_palettized, x = input_119_cast_fp16)[name = tensor<string, []>("input_123_cast_fp16")];
|
214 |
+
tensor<fp16, [1, 128, 20, 250]> input_125_cast_fp16 = relu(x = input_123_cast_fp16)[name = tensor<string, []>("input_125_cast_fp16")];
|
215 |
+
tensor<string, []> input_127_pad_type_0 = const()[name = tensor<string, []>("input_127_pad_type_0"), val = tensor<string, []>("custom")];
|
216 |
+
tensor<int32, [4]> input_127_pad_0 = const()[name = tensor<string, []>("input_127_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
217 |
+
tensor<int32, [2]> input_127_strides_0 = const()[name = tensor<string, []>("input_127_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
218 |
+
tensor<int32, [2]> input_127_dilations_0 = const()[name = tensor<string, []>("input_127_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
219 |
+
tensor<int32, []> input_127_groups_0 = const()[name = tensor<string, []>("input_127_groups_0"), val = tensor<int32, []>(1)];
|
220 |
+
tensor<fp16, [128, 128, 3, 3]> const_49_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [147456]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1025728))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1173248))), name = tensor<string, []>("const_49_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
221 |
+
tensor<fp16, [128]> const_50_to_fp16 = const()[name = tensor<string, []>("const_50_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1173824)))];
|
222 |
+
tensor<fp16, [1, 128, 20, 250]> out_19_cast_fp16 = conv(bias = const_50_to_fp16, dilations = input_127_dilations_0, groups = input_127_groups_0, pad = input_127_pad_0, pad_type = input_127_pad_type_0, strides = input_127_strides_0, weight = const_49_to_fp16_palettized, x = input_125_cast_fp16)[name = tensor<string, []>("out_19_cast_fp16")];
|
223 |
+
tensor<fp16, [1, 128, 20, 250]> input_129_cast_fp16 = add(x = out_19_cast_fp16, y = input_119_cast_fp16)[name = tensor<string, []>("input_129_cast_fp16")];
|
224 |
+
tensor<fp16, [1, 128, 20, 250]> input_131_cast_fp16 = relu(x = input_129_cast_fp16)[name = tensor<string, []>("input_131_cast_fp16")];
|
225 |
+
tensor<string, []> input_133_pad_type_0 = const()[name = tensor<string, []>("input_133_pad_type_0"), val = tensor<string, []>("custom")];
|
226 |
+
tensor<int32, [4]> input_133_pad_0 = const()[name = tensor<string, []>("input_133_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
227 |
+
tensor<int32, [2]> input_133_strides_0 = const()[name = tensor<string, []>("input_133_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
228 |
+
tensor<int32, [2]> input_133_dilations_0 = const()[name = tensor<string, []>("input_133_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
229 |
+
tensor<int32, []> input_133_groups_0 = const()[name = tensor<string, []>("input_133_groups_0"), val = tensor<int32, []>(1)];
|
230 |
+
tensor<fp16, [128, 128, 3, 3]> const_51_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [147456]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1174144))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1321664))), name = tensor<string, []>("const_51_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
231 |
+
tensor<fp16, [128]> const_52_to_fp16 = const()[name = tensor<string, []>("const_52_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1322240)))];
|
232 |
+
tensor<fp16, [1, 128, 20, 250]> input_135_cast_fp16 = conv(bias = const_52_to_fp16, dilations = input_133_dilations_0, groups = input_133_groups_0, pad = input_133_pad_0, pad_type = input_133_pad_type_0, strides = input_133_strides_0, weight = const_51_to_fp16_palettized, x = input_131_cast_fp16)[name = tensor<string, []>("input_135_cast_fp16")];
|
233 |
+
tensor<fp16, [1, 128, 20, 250]> input_137_cast_fp16 = relu(x = input_135_cast_fp16)[name = tensor<string, []>("input_137_cast_fp16")];
|
234 |
+
tensor<string, []> input_139_pad_type_0 = const()[name = tensor<string, []>("input_139_pad_type_0"), val = tensor<string, []>("custom")];
|
235 |
+
tensor<int32, [4]> input_139_pad_0 = const()[name = tensor<string, []>("input_139_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
236 |
+
tensor<int32, [2]> input_139_strides_0 = const()[name = tensor<string, []>("input_139_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
237 |
+
tensor<int32, [2]> input_139_dilations_0 = const()[name = tensor<string, []>("input_139_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
238 |
+
tensor<int32, []> input_139_groups_0 = const()[name = tensor<string, []>("input_139_groups_0"), val = tensor<int32, []>(1)];
|
239 |
+
tensor<fp16, [128, 128, 3, 3]> const_53_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [147456]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1322560))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1470080))), name = tensor<string, []>("const_53_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
240 |
+
tensor<fp16, [128]> const_54_to_fp16 = const()[name = tensor<string, []>("const_54_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1470656)))];
|
241 |
+
tensor<fp16, [1, 128, 20, 250]> out_21_cast_fp16 = conv(bias = const_54_to_fp16, dilations = input_139_dilations_0, groups = input_139_groups_0, pad = input_139_pad_0, pad_type = input_139_pad_type_0, strides = input_139_strides_0, weight = const_53_to_fp16_palettized, x = input_137_cast_fp16)[name = tensor<string, []>("out_21_cast_fp16")];
|
242 |
+
tensor<fp16, [1, 128, 20, 250]> input_141_cast_fp16 = add(x = out_21_cast_fp16, y = input_131_cast_fp16)[name = tensor<string, []>("input_141_cast_fp16")];
|
243 |
+
tensor<fp16, [1, 128, 20, 250]> input_143_cast_fp16 = relu(x = input_141_cast_fp16)[name = tensor<string, []>("input_143_cast_fp16")];
|
244 |
+
tensor<string, []> input_145_pad_type_0 = const()[name = tensor<string, []>("input_145_pad_type_0"), val = tensor<string, []>("custom")];
|
245 |
+
tensor<int32, [4]> input_145_pad_0 = const()[name = tensor<string, []>("input_145_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
246 |
+
tensor<int32, [2]> input_145_strides_0 = const()[name = tensor<string, []>("input_145_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
247 |
+
tensor<int32, [2]> input_145_dilations_0 = const()[name = tensor<string, []>("input_145_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
248 |
+
tensor<int32, []> input_145_groups_0 = const()[name = tensor<string, []>("input_145_groups_0"), val = tensor<int32, []>(1)];
|
249 |
+
tensor<fp16, [128, 128, 3, 3]> const_55_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [147456]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1470976))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1618496))), name = tensor<string, []>("const_55_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
250 |
+
tensor<fp16, [128]> const_56_to_fp16 = const()[name = tensor<string, []>("const_56_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1619072)))];
|
251 |
+
tensor<fp16, [1, 128, 20, 250]> input_147_cast_fp16 = conv(bias = const_56_to_fp16, dilations = input_145_dilations_0, groups = input_145_groups_0, pad = input_145_pad_0, pad_type = input_145_pad_type_0, strides = input_145_strides_0, weight = const_55_to_fp16_palettized, x = input_143_cast_fp16)[name = tensor<string, []>("input_147_cast_fp16")];
|
252 |
+
tensor<fp16, [1, 128, 20, 250]> input_149_cast_fp16 = relu(x = input_147_cast_fp16)[name = tensor<string, []>("input_149_cast_fp16")];
|
253 |
+
tensor<string, []> input_151_pad_type_0 = const()[name = tensor<string, []>("input_151_pad_type_0"), val = tensor<string, []>("custom")];
|
254 |
+
tensor<int32, [4]> input_151_pad_0 = const()[name = tensor<string, []>("input_151_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
255 |
+
tensor<int32, [2]> input_151_strides_0 = const()[name = tensor<string, []>("input_151_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
256 |
+
tensor<int32, [2]> input_151_dilations_0 = const()[name = tensor<string, []>("input_151_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
257 |
+
tensor<int32, []> input_151_groups_0 = const()[name = tensor<string, []>("input_151_groups_0"), val = tensor<int32, []>(1)];
|
258 |
+
tensor<fp16, [128, 128, 3, 3]> const_57_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [147456]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1619392))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1766912))), name = tensor<string, []>("const_57_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
259 |
+
tensor<fp16, [128]> const_58_to_fp16 = const()[name = tensor<string, []>("const_58_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1767488)))];
|
260 |
+
tensor<fp16, [1, 128, 20, 250]> out_23_cast_fp16 = conv(bias = const_58_to_fp16, dilations = input_151_dilations_0, groups = input_151_groups_0, pad = input_151_pad_0, pad_type = input_151_pad_type_0, strides = input_151_strides_0, weight = const_57_to_fp16_palettized, x = input_149_cast_fp16)[name = tensor<string, []>("out_23_cast_fp16")];
|
261 |
+
tensor<fp16, [1, 128, 20, 250]> input_153_cast_fp16 = add(x = out_23_cast_fp16, y = input_143_cast_fp16)[name = tensor<string, []>("input_153_cast_fp16")];
|
262 |
+
tensor<fp16, [1, 128, 20, 250]> input_155_cast_fp16 = relu(x = input_153_cast_fp16)[name = tensor<string, []>("input_155_cast_fp16")];
|
263 |
+
tensor<string, []> input_157_pad_type_0 = const()[name = tensor<string, []>("input_157_pad_type_0"), val = tensor<string, []>("custom")];
|
264 |
+
tensor<int32, [4]> input_157_pad_0 = const()[name = tensor<string, []>("input_157_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
265 |
+
tensor<int32, [2]> input_157_strides_0 = const()[name = tensor<string, []>("input_157_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
266 |
+
tensor<int32, [2]> input_157_dilations_0 = const()[name = tensor<string, []>("input_157_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
267 |
+
tensor<int32, []> input_157_groups_0 = const()[name = tensor<string, []>("input_157_groups_0"), val = tensor<int32, []>(1)];
|
268 |
+
tensor<fp16, [128, 128, 3, 3]> const_59_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [147456]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1767808))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1915328))), name = tensor<string, []>("const_59_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
269 |
+
tensor<fp16, [128]> const_60_to_fp16 = const()[name = tensor<string, []>("const_60_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1915904)))];
|
270 |
+
tensor<fp16, [1, 128, 20, 250]> input_159_cast_fp16 = conv(bias = const_60_to_fp16, dilations = input_157_dilations_0, groups = input_157_groups_0, pad = input_157_pad_0, pad_type = input_157_pad_type_0, strides = input_157_strides_0, weight = const_59_to_fp16_palettized, x = input_155_cast_fp16)[name = tensor<string, []>("input_159_cast_fp16")];
|
271 |
+
tensor<fp16, [1, 128, 20, 250]> input_161_cast_fp16 = relu(x = input_159_cast_fp16)[name = tensor<string, []>("input_161_cast_fp16")];
|
272 |
+
tensor<string, []> input_163_pad_type_0 = const()[name = tensor<string, []>("input_163_pad_type_0"), val = tensor<string, []>("custom")];
|
273 |
+
tensor<int32, [4]> input_163_pad_0 = const()[name = tensor<string, []>("input_163_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
274 |
+
tensor<int32, [2]> input_163_strides_0 = const()[name = tensor<string, []>("input_163_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
275 |
+
tensor<int32, [2]> input_163_dilations_0 = const()[name = tensor<string, []>("input_163_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
276 |
+
tensor<int32, []> input_163_groups_0 = const()[name = tensor<string, []>("input_163_groups_0"), val = tensor<int32, []>(1)];
|
277 |
+
tensor<fp16, [128, 128, 3, 3]> const_61_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [147456]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1916224))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2063744))), name = tensor<string, []>("const_61_to_fp16_palettized"), shape = tensor<uint32, [4]>([128, 128, 3, 3])];
|
278 |
+
tensor<fp16, [128]> const_62_to_fp16 = const()[name = tensor<string, []>("const_62_to_fp16"), val = tensor<fp16, [128]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2064320)))];
|
279 |
+
tensor<fp16, [1, 128, 20, 250]> out_25_cast_fp16 = conv(bias = const_62_to_fp16, dilations = input_163_dilations_0, groups = input_163_groups_0, pad = input_163_pad_0, pad_type = input_163_pad_type_0, strides = input_163_strides_0, weight = const_61_to_fp16_palettized, x = input_161_cast_fp16)[name = tensor<string, []>("out_25_cast_fp16")];
|
280 |
+
tensor<fp16, [1, 128, 20, 250]> input_165_cast_fp16 = add(x = out_25_cast_fp16, y = input_155_cast_fp16)[name = tensor<string, []>("input_165_cast_fp16")];
|
281 |
+
tensor<fp16, [1, 128, 20, 250]> input_167_cast_fp16 = relu(x = input_165_cast_fp16)[name = tensor<string, []>("input_167_cast_fp16")];
|
282 |
+
tensor<string, []> input_169_pad_type_0 = const()[name = tensor<string, []>("input_169_pad_type_0"), val = tensor<string, []>("custom")];
|
283 |
+
tensor<int32, [4]> input_169_pad_0 = const()[name = tensor<string, []>("input_169_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
284 |
+
tensor<int32, [2]> input_169_strides_0 = const()[name = tensor<string, []>("input_169_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
285 |
+
tensor<int32, [2]> input_169_dilations_0 = const()[name = tensor<string, []>("input_169_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
286 |
+
tensor<int32, []> input_169_groups_0 = const()[name = tensor<string, []>("input_169_groups_0"), val = tensor<int32, []>(1)];
|
287 |
+
tensor<fp16, [256, 128, 3, 3]> const_63_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [294912]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2064640))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2359616))), name = tensor<string, []>("const_63_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 128, 3, 3])];
|
288 |
+
tensor<fp16, [256]> const_64_to_fp16 = const()[name = tensor<string, []>("const_64_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2360192)))];
|
289 |
+
tensor<fp16, [1, 256, 10, 125]> input_171_cast_fp16 = conv(bias = const_64_to_fp16, dilations = input_169_dilations_0, groups = input_169_groups_0, pad = input_169_pad_0, pad_type = input_169_pad_type_0, strides = input_169_strides_0, weight = const_63_to_fp16_palettized, x = input_167_cast_fp16)[name = tensor<string, []>("input_171_cast_fp16")];
|
290 |
+
tensor<fp16, [1, 256, 10, 125]> input_173_cast_fp16 = relu(x = input_171_cast_fp16)[name = tensor<string, []>("input_173_cast_fp16")];
|
291 |
+
tensor<string, []> input_175_pad_type_0 = const()[name = tensor<string, []>("input_175_pad_type_0"), val = tensor<string, []>("custom")];
|
292 |
+
tensor<int32, [4]> input_175_pad_0 = const()[name = tensor<string, []>("input_175_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
293 |
+
tensor<int32, [2]> input_175_strides_0 = const()[name = tensor<string, []>("input_175_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
294 |
+
tensor<int32, [2]> input_175_dilations_0 = const()[name = tensor<string, []>("input_175_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
295 |
+
tensor<int32, []> input_175_groups_0 = const()[name = tensor<string, []>("input_175_groups_0"), val = tensor<int32, []>(1)];
|
296 |
+
tensor<fp16, [256, 256, 3, 3]> const_65_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [589824]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2360768))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2950656))), name = tensor<string, []>("const_65_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 256, 3, 3])];
|
297 |
+
tensor<fp16, [256]> const_66_to_fp16 = const()[name = tensor<string, []>("const_66_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2951232)))];
|
298 |
+
tensor<fp16, [1, 256, 10, 125]> out_27_cast_fp16 = conv(bias = const_66_to_fp16, dilations = input_175_dilations_0, groups = input_175_groups_0, pad = input_175_pad_0, pad_type = input_175_pad_type_0, strides = input_175_strides_0, weight = const_65_to_fp16_palettized, x = input_173_cast_fp16)[name = tensor<string, []>("out_27_cast_fp16")];
|
299 |
+
tensor<string, []> input_177_pad_type_0 = const()[name = tensor<string, []>("input_177_pad_type_0"), val = tensor<string, []>("valid")];
|
300 |
+
tensor<int32, [2]> input_177_strides_0 = const()[name = tensor<string, []>("input_177_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
301 |
+
tensor<int32, [4]> input_177_pad_0 = const()[name = tensor<string, []>("input_177_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
302 |
+
tensor<int32, [2]> input_177_dilations_0 = const()[name = tensor<string, []>("input_177_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
303 |
+
tensor<int32, []> input_177_groups_0 = const()[name = tensor<string, []>("input_177_groups_0"), val = tensor<int32, []>(1)];
|
304 |
+
tensor<fp16, [256, 128, 1, 1]> const_67_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [32768]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2951808))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2984640))), name = tensor<string, []>("const_67_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 128, 1, 1])];
|
305 |
+
tensor<fp16, [256]> const_68_to_fp16 = const()[name = tensor<string, []>("const_68_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2985216)))];
|
306 |
+
tensor<fp16, [1, 256, 10, 125]> var_498_cast_fp16 = conv(bias = const_68_to_fp16, dilations = input_177_dilations_0, groups = input_177_groups_0, pad = input_177_pad_0, pad_type = input_177_pad_type_0, strides = input_177_strides_0, weight = const_67_to_fp16_palettized, x = input_167_cast_fp16)[name = tensor<string, []>("op_498_cast_fp16")];
|
307 |
+
tensor<fp16, [1, 256, 10, 125]> input_179_cast_fp16 = add(x = out_27_cast_fp16, y = var_498_cast_fp16)[name = tensor<string, []>("input_179_cast_fp16")];
|
308 |
+
tensor<fp16, [1, 256, 10, 125]> input_181_cast_fp16 = relu(x = input_179_cast_fp16)[name = tensor<string, []>("input_181_cast_fp16")];
|
309 |
+
tensor<string, []> input_183_pad_type_0 = const()[name = tensor<string, []>("input_183_pad_type_0"), val = tensor<string, []>("custom")];
|
310 |
+
tensor<int32, [4]> input_183_pad_0 = const()[name = tensor<string, []>("input_183_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
311 |
+
tensor<int32, [2]> input_183_strides_0 = const()[name = tensor<string, []>("input_183_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
312 |
+
tensor<int32, [2]> input_183_dilations_0 = const()[name = tensor<string, []>("input_183_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
313 |
+
tensor<int32, []> input_183_groups_0 = const()[name = tensor<string, []>("input_183_groups_0"), val = tensor<int32, []>(1)];
|
314 |
+
tensor<fp16, [256, 256, 3, 3]> const_69_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [589824]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2985792))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3575680))), name = tensor<string, []>("const_69_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 256, 3, 3])];
|
315 |
+
tensor<fp16, [256]> const_70_to_fp16 = const()[name = tensor<string, []>("const_70_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3576256)))];
|
316 |
+
tensor<fp16, [1, 256, 10, 125]> input_185_cast_fp16 = conv(bias = const_70_to_fp16, dilations = input_183_dilations_0, groups = input_183_groups_0, pad = input_183_pad_0, pad_type = input_183_pad_type_0, strides = input_183_strides_0, weight = const_69_to_fp16_palettized, x = input_181_cast_fp16)[name = tensor<string, []>("input_185_cast_fp16")];
|
317 |
+
tensor<fp16, [1, 256, 10, 125]> input_187_cast_fp16 = relu(x = input_185_cast_fp16)[name = tensor<string, []>("input_187_cast_fp16")];
|
318 |
+
tensor<string, []> input_189_pad_type_0 = const()[name = tensor<string, []>("input_189_pad_type_0"), val = tensor<string, []>("custom")];
|
319 |
+
tensor<int32, [4]> input_189_pad_0 = const()[name = tensor<string, []>("input_189_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
320 |
+
tensor<int32, [2]> input_189_strides_0 = const()[name = tensor<string, []>("input_189_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
321 |
+
tensor<int32, [2]> input_189_dilations_0 = const()[name = tensor<string, []>("input_189_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
322 |
+
tensor<int32, []> input_189_groups_0 = const()[name = tensor<string, []>("input_189_groups_0"), val = tensor<int32, []>(1)];
|
323 |
+
tensor<fp16, [256, 256, 3, 3]> const_71_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [589824]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3576832))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4166720))), name = tensor<string, []>("const_71_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 256, 3, 3])];
|
324 |
+
tensor<fp16, [256]> const_72_to_fp16 = const()[name = tensor<string, []>("const_72_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4167296)))];
|
325 |
+
tensor<fp16, [1, 256, 10, 125]> out_29_cast_fp16 = conv(bias = const_72_to_fp16, dilations = input_189_dilations_0, groups = input_189_groups_0, pad = input_189_pad_0, pad_type = input_189_pad_type_0, strides = input_189_strides_0, weight = const_71_to_fp16_palettized, x = input_187_cast_fp16)[name = tensor<string, []>("out_29_cast_fp16")];
|
326 |
+
tensor<fp16, [1, 256, 10, 125]> input_191_cast_fp16 = add(x = out_29_cast_fp16, y = input_181_cast_fp16)[name = tensor<string, []>("input_191_cast_fp16")];
|
327 |
+
tensor<fp16, [1, 256, 10, 125]> input_193_cast_fp16 = relu(x = input_191_cast_fp16)[name = tensor<string, []>("input_193_cast_fp16")];
|
328 |
+
tensor<string, []> input_195_pad_type_0 = const()[name = tensor<string, []>("input_195_pad_type_0"), val = tensor<string, []>("custom")];
|
329 |
+
tensor<int32, [4]> input_195_pad_0 = const()[name = tensor<string, []>("input_195_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
330 |
+
tensor<int32, [2]> input_195_strides_0 = const()[name = tensor<string, []>("input_195_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
331 |
+
tensor<int32, [2]> input_195_dilations_0 = const()[name = tensor<string, []>("input_195_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
332 |
+
tensor<int32, []> input_195_groups_0 = const()[name = tensor<string, []>("input_195_groups_0"), val = tensor<int32, []>(1)];
|
333 |
+
tensor<fp16, [256, 256, 3, 3]> const_73_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [589824]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4167872))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4757760))), name = tensor<string, []>("const_73_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 256, 3, 3])];
|
334 |
+
tensor<fp16, [256]> const_74_to_fp16 = const()[name = tensor<string, []>("const_74_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4758336)))];
|
335 |
+
tensor<fp16, [1, 256, 10, 125]> input_197_cast_fp16 = conv(bias = const_74_to_fp16, dilations = input_195_dilations_0, groups = input_195_groups_0, pad = input_195_pad_0, pad_type = input_195_pad_type_0, strides = input_195_strides_0, weight = const_73_to_fp16_palettized, x = input_193_cast_fp16)[name = tensor<string, []>("input_197_cast_fp16")];
|
336 |
+
tensor<fp16, [1, 256, 10, 125]> input_199_cast_fp16 = relu(x = input_197_cast_fp16)[name = tensor<string, []>("input_199_cast_fp16")];
|
337 |
+
tensor<string, []> input_201_pad_type_0 = const()[name = tensor<string, []>("input_201_pad_type_0"), val = tensor<string, []>("custom")];
|
338 |
+
tensor<int32, [4]> input_201_pad_0 = const()[name = tensor<string, []>("input_201_pad_0"), val = tensor<int32, [4]>([1, 1, 1, 1])];
|
339 |
+
tensor<int32, [2]> input_201_strides_0 = const()[name = tensor<string, []>("input_201_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
340 |
+
tensor<int32, [2]> input_201_dilations_0 = const()[name = tensor<string, []>("input_201_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
341 |
+
tensor<int32, []> input_201_groups_0 = const()[name = tensor<string, []>("input_201_groups_0"), val = tensor<int32, []>(1)];
|
342 |
+
tensor<fp16, [256, 256, 3, 3]> const_75_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [589824]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4758912))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5348800))), name = tensor<string, []>("const_75_to_fp16_palettized"), shape = tensor<uint32, [4]>([256, 256, 3, 3])];
|
343 |
+
tensor<fp16, [256]> const_76_to_fp16 = const()[name = tensor<string, []>("const_76_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5349376)))];
|
344 |
+
tensor<fp16, [1, 256, 10, 125]> out_cast_fp16 = conv(bias = const_76_to_fp16, dilations = input_201_dilations_0, groups = input_201_groups_0, pad = input_201_pad_0, pad_type = input_201_pad_type_0, strides = input_201_strides_0, weight = const_75_to_fp16_palettized, x = input_199_cast_fp16)[name = tensor<string, []>("out_cast_fp16")];
|
345 |
+
tensor<fp16, [1, 256, 10, 125]> input_203_cast_fp16 = add(x = out_cast_fp16, y = input_193_cast_fp16)[name = tensor<string, []>("input_203_cast_fp16")];
|
346 |
+
tensor<fp16, [1, 256, 10, 125]> x_cast_fp16 = relu(x = input_203_cast_fp16)[name = tensor<string, []>("x_cast_fp16")];
|
347 |
+
tensor<int32, [3]> var_577 = const()[name = tensor<string, []>("op_577"), val = tensor<int32, [3]>([1, 2560, 125])];
|
348 |
+
tensor<fp16, [1, 2560, 125]> sequences_cast_fp16 = reshape(shape = var_577, x = x_cast_fp16)[name = tensor<string, []>("sequences_cast_fp16")];
|
349 |
+
tensor<int32, [1]> expand_dims_0_axes_0 = const()[name = tensor<string, []>("expand_dims_0_axes_0"), val = tensor<int32, [1]>([3])];
|
350 |
+
tensor<fp16, [1, 3, 589, 1]> expand_dims_0_cast_fp16 = expand_dims(axes = expand_dims_0_axes_0, x = speaker_masks)[name = tensor<string, []>("expand_dims_0_cast_fp16")];
|
351 |
+
tensor<fp32, []> upsample_nearest_neighbor_0_scale_factor_height_0 = const()[name = tensor<string, []>("upsample_nearest_neighbor_0_scale_factor_height_0"), val = tensor<fp32, []>(0x1.b2a2a4p-3)];
|
352 |
+
tensor<fp32, []> upsample_nearest_neighbor_0_scale_factor_width_0 = const()[name = tensor<string, []>("upsample_nearest_neighbor_0_scale_factor_width_0"), val = tensor<fp32, []>(0x1p+0)];
|
353 |
+
tensor<fp16, [1, 3, 125, 1]> upsample_nearest_neighbor_0_cast_fp16 = upsample_nearest_neighbor(scale_factor_height = upsample_nearest_neighbor_0_scale_factor_height_0, scale_factor_width = upsample_nearest_neighbor_0_scale_factor_width_0, x = expand_dims_0_cast_fp16)[name = tensor<string, []>("upsample_nearest_neighbor_0_cast_fp16")];
|
354 |
+
tensor<int32, [1]> weights_1_axes_0 = const()[name = tensor<string, []>("weights_1_axes_0"), val = tensor<int32, [1]>([3])];
|
355 |
+
tensor<fp16, [1, 3, 125]> weights_1_cast_fp16 = squeeze(axes = weights_1_axes_0, x = upsample_nearest_neighbor_0_cast_fp16)[name = tensor<string, []>("weights_1_cast_fp16")];
|
356 |
+
tensor<int32, [3]> var_583_begin_0 = const()[name = tensor<string, []>("op_583_begin_0"), val = tensor<int32, [3]>([0, 0, 0])];
|
357 |
+
tensor<int32, [3]> var_583_end_0 = const()[name = tensor<string, []>("op_583_end_0"), val = tensor<int32, [3]>([1, 1, 125])];
|
358 |
+
tensor<bool, [3]> var_583_end_mask_0 = const()[name = tensor<string, []>("op_583_end_mask_0"), val = tensor<bool, [3]>([true, false, true])];
|
359 |
+
tensor<bool, [3]> var_583_squeeze_mask_0 = const()[name = tensor<string, []>("op_583_squeeze_mask_0"), val = tensor<bool, [3]>([false, true, false])];
|
360 |
+
tensor<fp16, [1, 125]> var_583_cast_fp16 = slice_by_index(begin = var_583_begin_0, end = var_583_end_0, end_mask = var_583_end_mask_0, squeeze_mask = var_583_squeeze_mask_0, x = weights_1_cast_fp16)[name = tensor<string, []>("op_583_cast_fp16")];
|
361 |
+
tensor<int32, [1]> weights_5_axes_0 = const()[name = tensor<string, []>("weights_5_axes_0"), val = tensor<int32, [1]>([1])];
|
362 |
+
tensor<fp16, [1, 1, 125]> weights_5_cast_fp16 = expand_dims(axes = weights_5_axes_0, x = var_583_cast_fp16)[name = tensor<string, []>("weights_5_cast_fp16")];
|
363 |
+
tensor<int32, [1]> var_587_axes_0 = const()[name = tensor<string, []>("op_587_axes_0"), val = tensor<int32, [1]>([2])];
|
364 |
+
tensor<bool, []> var_587_keep_dims_0 = const()[name = tensor<string, []>("op_587_keep_dims_0"), val = tensor<bool, []>(false)];
|
365 |
+
tensor<fp16, [1, 1]> var_587_cast_fp16 = reduce_sum(axes = var_587_axes_0, keep_dims = var_587_keep_dims_0, x = weights_5_cast_fp16)[name = tensor<string, []>("op_587_cast_fp16")];
|
366 |
+
tensor<fp16, []> var_588_to_fp16 = const()[name = tensor<string, []>("op_588_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
367 |
+
tensor<fp16, [1, 1]> v1_1_cast_fp16 = add(x = var_587_cast_fp16, y = var_588_to_fp16)[name = tensor<string, []>("v1_1_cast_fp16")];
|
368 |
+
tensor<fp16, [1, 2560, 125]> var_590_cast_fp16 = mul(x = sequences_cast_fp16, y = weights_5_cast_fp16)[name = tensor<string, []>("op_590_cast_fp16")];
|
369 |
+
tensor<int32, [1]> var_592_axes_0 = const()[name = tensor<string, []>("op_592_axes_0"), val = tensor<int32, [1]>([2])];
|
370 |
+
tensor<bool, []> var_592_keep_dims_0 = const()[name = tensor<string, []>("op_592_keep_dims_0"), val = tensor<bool, []>(false)];
|
371 |
+
tensor<fp16, [1, 2560]> var_592_cast_fp16 = reduce_sum(axes = var_592_axes_0, keep_dims = var_592_keep_dims_0, x = var_590_cast_fp16)[name = tensor<string, []>("op_592_cast_fp16")];
|
372 |
+
tensor<fp16, [1, 2560]> mean_1_cast_fp16 = real_div(x = var_592_cast_fp16, y = v1_1_cast_fp16)[name = tensor<string, []>("mean_1_cast_fp16")];
|
373 |
+
tensor<int32, [1]> var_594_axes_0 = const()[name = tensor<string, []>("op_594_axes_0"), val = tensor<int32, [1]>([2])];
|
374 |
+
tensor<fp16, [1, 2560, 1]> var_594_cast_fp16 = expand_dims(axes = var_594_axes_0, x = mean_1_cast_fp16)[name = tensor<string, []>("op_594_cast_fp16")];
|
375 |
+
tensor<fp16, [1, 2560, 125]> var_595_cast_fp16 = sub(x = sequences_cast_fp16, y = var_594_cast_fp16)[name = tensor<string, []>("op_595_cast_fp16")];
|
376 |
+
tensor<fp16, [1, 2560, 125]> dx2_1_cast_fp16 = mul(x = var_595_cast_fp16, y = var_595_cast_fp16)[name = tensor<string, []>("dx2_1_cast_fp16")];
|
377 |
+
tensor<fp16, [1, 1, 125]> var_597_cast_fp16 = mul(x = weights_5_cast_fp16, y = weights_5_cast_fp16)[name = tensor<string, []>("op_597_cast_fp16")];
|
378 |
+
tensor<int32, [1]> v2_1_axes_0 = const()[name = tensor<string, []>("v2_1_axes_0"), val = tensor<int32, [1]>([2])];
|
379 |
+
tensor<bool, []> v2_1_keep_dims_0 = const()[name = tensor<string, []>("v2_1_keep_dims_0"), val = tensor<bool, []>(false)];
|
380 |
+
tensor<fp16, [1, 1]> v2_1_cast_fp16 = reduce_sum(axes = v2_1_axes_0, keep_dims = v2_1_keep_dims_0, x = var_597_cast_fp16)[name = tensor<string, []>("v2_1_cast_fp16")];
|
381 |
+
tensor<fp16, [1, 2560, 125]> var_600_cast_fp16 = mul(x = dx2_1_cast_fp16, y = weights_5_cast_fp16)[name = tensor<string, []>("op_600_cast_fp16")];
|
382 |
+
tensor<int32, [1]> var_602_axes_0 = const()[name = tensor<string, []>("op_602_axes_0"), val = tensor<int32, [1]>([2])];
|
383 |
+
tensor<bool, []> var_602_keep_dims_0 = const()[name = tensor<string, []>("op_602_keep_dims_0"), val = tensor<bool, []>(false)];
|
384 |
+
tensor<fp16, [1, 2560]> var_602_cast_fp16 = reduce_sum(axes = var_602_axes_0, keep_dims = var_602_keep_dims_0, x = var_600_cast_fp16)[name = tensor<string, []>("op_602_cast_fp16")];
|
385 |
+
tensor<fp16, [1, 1]> var_603_cast_fp16 = real_div(x = v2_1_cast_fp16, y = v1_1_cast_fp16)[name = tensor<string, []>("op_603_cast_fp16")];
|
386 |
+
tensor<fp16, [1, 1]> var_604_cast_fp16 = sub(x = v1_1_cast_fp16, y = var_603_cast_fp16)[name = tensor<string, []>("op_604_cast_fp16")];
|
387 |
+
tensor<fp16, []> var_605_to_fp16 = const()[name = tensor<string, []>("op_605_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
388 |
+
tensor<fp16, [1, 1]> var_606_cast_fp16 = add(x = var_604_cast_fp16, y = var_605_to_fp16)[name = tensor<string, []>("op_606_cast_fp16")];
|
389 |
+
tensor<fp16, [1, 2560]> var_1_cast_fp16 = real_div(x = var_602_cast_fp16, y = var_606_cast_fp16)[name = tensor<string, []>("var_1_cast_fp16")];
|
390 |
+
tensor<fp16, [1, 2560]> std_1_cast_fp16 = sqrt(x = var_1_cast_fp16)[name = tensor<string, []>("std_1_cast_fp16")];
|
391 |
+
tensor<bool, []> var_610_interleave_0 = const()[name = tensor<string, []>("op_610_interleave_0"), val = tensor<bool, []>(false)];
|
392 |
+
tensor<fp16, [1, 5120]> var_610_cast_fp16 = concat(axis = var_12, interleave = var_610_interleave_0, values = (mean_1_cast_fp16, std_1_cast_fp16))[name = tensor<string, []>("op_610_cast_fp16")];
|
393 |
+
tensor<int32, [3]> var_612_begin_0 = const()[name = tensor<string, []>("op_612_begin_0"), val = tensor<int32, [3]>([0, 1, 0])];
|
394 |
+
tensor<int32, [3]> var_612_end_0 = const()[name = tensor<string, []>("op_612_end_0"), val = tensor<int32, [3]>([1, 2, 125])];
|
395 |
+
tensor<bool, [3]> var_612_end_mask_0 = const()[name = tensor<string, []>("op_612_end_mask_0"), val = tensor<bool, [3]>([true, false, true])];
|
396 |
+
tensor<bool, [3]> var_612_squeeze_mask_0 = const()[name = tensor<string, []>("op_612_squeeze_mask_0"), val = tensor<bool, [3]>([false, true, false])];
|
397 |
+
tensor<fp16, [1, 125]> var_612_cast_fp16 = slice_by_index(begin = var_612_begin_0, end = var_612_end_0, end_mask = var_612_end_mask_0, squeeze_mask = var_612_squeeze_mask_0, x = weights_1_cast_fp16)[name = tensor<string, []>("op_612_cast_fp16")];
|
398 |
+
tensor<int32, [1]> weights_9_axes_0 = const()[name = tensor<string, []>("weights_9_axes_0"), val = tensor<int32, [1]>([1])];
|
399 |
+
tensor<fp16, [1, 1, 125]> weights_9_cast_fp16 = expand_dims(axes = weights_9_axes_0, x = var_612_cast_fp16)[name = tensor<string, []>("weights_9_cast_fp16")];
|
400 |
+
tensor<int32, [1]> var_616_axes_0 = const()[name = tensor<string, []>("op_616_axes_0"), val = tensor<int32, [1]>([2])];
|
401 |
+
tensor<bool, []> var_616_keep_dims_0 = const()[name = tensor<string, []>("op_616_keep_dims_0"), val = tensor<bool, []>(false)];
|
402 |
+
tensor<fp16, [1, 1]> var_616_cast_fp16 = reduce_sum(axes = var_616_axes_0, keep_dims = var_616_keep_dims_0, x = weights_9_cast_fp16)[name = tensor<string, []>("op_616_cast_fp16")];
|
403 |
+
tensor<fp16, []> var_617_to_fp16 = const()[name = tensor<string, []>("op_617_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
404 |
+
tensor<fp16, [1, 1]> v1_3_cast_fp16 = add(x = var_616_cast_fp16, y = var_617_to_fp16)[name = tensor<string, []>("v1_3_cast_fp16")];
|
405 |
+
tensor<fp16, [1, 2560, 125]> var_619_cast_fp16 = mul(x = sequences_cast_fp16, y = weights_9_cast_fp16)[name = tensor<string, []>("op_619_cast_fp16")];
|
406 |
+
tensor<int32, [1]> var_621_axes_0 = const()[name = tensor<string, []>("op_621_axes_0"), val = tensor<int32, [1]>([2])];
|
407 |
+
tensor<bool, []> var_621_keep_dims_0 = const()[name = tensor<string, []>("op_621_keep_dims_0"), val = tensor<bool, []>(false)];
|
408 |
+
tensor<fp16, [1, 2560]> var_621_cast_fp16 = reduce_sum(axes = var_621_axes_0, keep_dims = var_621_keep_dims_0, x = var_619_cast_fp16)[name = tensor<string, []>("op_621_cast_fp16")];
|
409 |
+
tensor<fp16, [1, 2560]> mean_3_cast_fp16 = real_div(x = var_621_cast_fp16, y = v1_3_cast_fp16)[name = tensor<string, []>("mean_3_cast_fp16")];
|
410 |
+
tensor<int32, [1]> var_623_axes_0 = const()[name = tensor<string, []>("op_623_axes_0"), val = tensor<int32, [1]>([2])];
|
411 |
+
tensor<fp16, [1, 2560, 1]> var_623_cast_fp16 = expand_dims(axes = var_623_axes_0, x = mean_3_cast_fp16)[name = tensor<string, []>("op_623_cast_fp16")];
|
412 |
+
tensor<fp16, [1, 2560, 125]> var_624_cast_fp16 = sub(x = sequences_cast_fp16, y = var_623_cast_fp16)[name = tensor<string, []>("op_624_cast_fp16")];
|
413 |
+
tensor<fp16, [1, 2560, 125]> dx2_3_cast_fp16 = mul(x = var_624_cast_fp16, y = var_624_cast_fp16)[name = tensor<string, []>("dx2_3_cast_fp16")];
|
414 |
+
tensor<fp16, [1, 1, 125]> var_626_cast_fp16 = mul(x = weights_9_cast_fp16, y = weights_9_cast_fp16)[name = tensor<string, []>("op_626_cast_fp16")];
|
415 |
+
tensor<int32, [1]> v2_3_axes_0 = const()[name = tensor<string, []>("v2_3_axes_0"), val = tensor<int32, [1]>([2])];
|
416 |
+
tensor<bool, []> v2_3_keep_dims_0 = const()[name = tensor<string, []>("v2_3_keep_dims_0"), val = tensor<bool, []>(false)];
|
417 |
+
tensor<fp16, [1, 1]> v2_3_cast_fp16 = reduce_sum(axes = v2_3_axes_0, keep_dims = v2_3_keep_dims_0, x = var_626_cast_fp16)[name = tensor<string, []>("v2_3_cast_fp16")];
|
418 |
+
tensor<fp16, [1, 2560, 125]> var_629_cast_fp16 = mul(x = dx2_3_cast_fp16, y = weights_9_cast_fp16)[name = tensor<string, []>("op_629_cast_fp16")];
|
419 |
+
tensor<int32, [1]> var_631_axes_0 = const()[name = tensor<string, []>("op_631_axes_0"), val = tensor<int32, [1]>([2])];
|
420 |
+
tensor<bool, []> var_631_keep_dims_0 = const()[name = tensor<string, []>("op_631_keep_dims_0"), val = tensor<bool, []>(false)];
|
421 |
+
tensor<fp16, [1, 2560]> var_631_cast_fp16 = reduce_sum(axes = var_631_axes_0, keep_dims = var_631_keep_dims_0, x = var_629_cast_fp16)[name = tensor<string, []>("op_631_cast_fp16")];
|
422 |
+
tensor<fp16, [1, 1]> var_632_cast_fp16 = real_div(x = v2_3_cast_fp16, y = v1_3_cast_fp16)[name = tensor<string, []>("op_632_cast_fp16")];
|
423 |
+
tensor<fp16, [1, 1]> var_633_cast_fp16 = sub(x = v1_3_cast_fp16, y = var_632_cast_fp16)[name = tensor<string, []>("op_633_cast_fp16")];
|
424 |
+
tensor<fp16, []> var_634_to_fp16 = const()[name = tensor<string, []>("op_634_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
425 |
+
tensor<fp16, [1, 1]> var_635_cast_fp16 = add(x = var_633_cast_fp16, y = var_634_to_fp16)[name = tensor<string, []>("op_635_cast_fp16")];
|
426 |
+
tensor<fp16, [1, 2560]> var_3_cast_fp16 = real_div(x = var_631_cast_fp16, y = var_635_cast_fp16)[name = tensor<string, []>("var_3_cast_fp16")];
|
427 |
+
tensor<fp16, [1, 2560]> std_3_cast_fp16 = sqrt(x = var_3_cast_fp16)[name = tensor<string, []>("std_3_cast_fp16")];
|
428 |
+
tensor<bool, []> var_639_interleave_0 = const()[name = tensor<string, []>("op_639_interleave_0"), val = tensor<bool, []>(false)];
|
429 |
+
tensor<fp16, [1, 5120]> var_639_cast_fp16 = concat(axis = var_12, interleave = var_639_interleave_0, values = (mean_3_cast_fp16, std_3_cast_fp16))[name = tensor<string, []>("op_639_cast_fp16")];
|
430 |
+
tensor<int32, [3]> var_641_begin_0 = const()[name = tensor<string, []>("op_641_begin_0"), val = tensor<int32, [3]>([0, 2, 0])];
|
431 |
+
tensor<int32, [3]> var_641_end_0 = const()[name = tensor<string, []>("op_641_end_0"), val = tensor<int32, [3]>([1, 3, 125])];
|
432 |
+
tensor<bool, [3]> var_641_end_mask_0 = const()[name = tensor<string, []>("op_641_end_mask_0"), val = tensor<bool, [3]>([true, false, true])];
|
433 |
+
tensor<bool, [3]> var_641_squeeze_mask_0 = const()[name = tensor<string, []>("op_641_squeeze_mask_0"), val = tensor<bool, [3]>([false, true, false])];
|
434 |
+
tensor<fp16, [1, 125]> var_641_cast_fp16 = slice_by_index(begin = var_641_begin_0, end = var_641_end_0, end_mask = var_641_end_mask_0, squeeze_mask = var_641_squeeze_mask_0, x = weights_1_cast_fp16)[name = tensor<string, []>("op_641_cast_fp16")];
|
435 |
+
tensor<int32, [1]> weights_axes_0 = const()[name = tensor<string, []>("weights_axes_0"), val = tensor<int32, [1]>([1])];
|
436 |
+
tensor<fp16, [1, 1, 125]> weights_cast_fp16 = expand_dims(axes = weights_axes_0, x = var_641_cast_fp16)[name = tensor<string, []>("weights_cast_fp16")];
|
437 |
+
tensor<int32, [1]> var_645_axes_0 = const()[name = tensor<string, []>("op_645_axes_0"), val = tensor<int32, [1]>([2])];
|
438 |
+
tensor<bool, []> var_645_keep_dims_0 = const()[name = tensor<string, []>("op_645_keep_dims_0"), val = tensor<bool, []>(false)];
|
439 |
+
tensor<fp16, [1, 1]> var_645_cast_fp16 = reduce_sum(axes = var_645_axes_0, keep_dims = var_645_keep_dims_0, x = weights_cast_fp16)[name = tensor<string, []>("op_645_cast_fp16")];
|
440 |
+
tensor<fp16, []> var_646_to_fp16 = const()[name = tensor<string, []>("op_646_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
441 |
+
tensor<fp16, [1, 1]> v1_cast_fp16 = add(x = var_645_cast_fp16, y = var_646_to_fp16)[name = tensor<string, []>("v1_cast_fp16")];
|
442 |
+
tensor<fp16, [1, 2560, 125]> var_648_cast_fp16 = mul(x = sequences_cast_fp16, y = weights_cast_fp16)[name = tensor<string, []>("op_648_cast_fp16")];
|
443 |
+
tensor<int32, [1]> var_650_axes_0 = const()[name = tensor<string, []>("op_650_axes_0"), val = tensor<int32, [1]>([2])];
|
444 |
+
tensor<bool, []> var_650_keep_dims_0 = const()[name = tensor<string, []>("op_650_keep_dims_0"), val = tensor<bool, []>(false)];
|
445 |
+
tensor<fp16, [1, 2560]> var_650_cast_fp16 = reduce_sum(axes = var_650_axes_0, keep_dims = var_650_keep_dims_0, x = var_648_cast_fp16)[name = tensor<string, []>("op_650_cast_fp16")];
|
446 |
+
tensor<fp16, [1, 2560]> mean_cast_fp16 = real_div(x = var_650_cast_fp16, y = v1_cast_fp16)[name = tensor<string, []>("mean_cast_fp16")];
|
447 |
+
tensor<int32, [1]> var_652_axes_0 = const()[name = tensor<string, []>("op_652_axes_0"), val = tensor<int32, [1]>([2])];
|
448 |
+
tensor<fp16, [1, 2560, 1]> var_652_cast_fp16 = expand_dims(axes = var_652_axes_0, x = mean_cast_fp16)[name = tensor<string, []>("op_652_cast_fp16")];
|
449 |
+
tensor<fp16, [1, 2560, 125]> var_653_cast_fp16 = sub(x = sequences_cast_fp16, y = var_652_cast_fp16)[name = tensor<string, []>("op_653_cast_fp16")];
|
450 |
+
tensor<fp16, [1, 2560, 125]> dx2_cast_fp16 = mul(x = var_653_cast_fp16, y = var_653_cast_fp16)[name = tensor<string, []>("dx2_cast_fp16")];
|
451 |
+
tensor<fp16, [1, 1, 125]> var_655_cast_fp16 = mul(x = weights_cast_fp16, y = weights_cast_fp16)[name = tensor<string, []>("op_655_cast_fp16")];
|
452 |
+
tensor<int32, [1]> v2_axes_0 = const()[name = tensor<string, []>("v2_axes_0"), val = tensor<int32, [1]>([2])];
|
453 |
+
tensor<bool, []> v2_keep_dims_0 = const()[name = tensor<string, []>("v2_keep_dims_0"), val = tensor<bool, []>(false)];
|
454 |
+
tensor<fp16, [1, 1]> v2_cast_fp16 = reduce_sum(axes = v2_axes_0, keep_dims = v2_keep_dims_0, x = var_655_cast_fp16)[name = tensor<string, []>("v2_cast_fp16")];
|
455 |
+
tensor<fp16, [1, 2560, 125]> var_658_cast_fp16 = mul(x = dx2_cast_fp16, y = weights_cast_fp16)[name = tensor<string, []>("op_658_cast_fp16")];
|
456 |
+
tensor<int32, [1]> var_660_axes_0 = const()[name = tensor<string, []>("op_660_axes_0"), val = tensor<int32, [1]>([2])];
|
457 |
+
tensor<bool, []> var_660_keep_dims_0 = const()[name = tensor<string, []>("op_660_keep_dims_0"), val = tensor<bool, []>(false)];
|
458 |
+
tensor<fp16, [1, 2560]> var_660_cast_fp16 = reduce_sum(axes = var_660_axes_0, keep_dims = var_660_keep_dims_0, x = var_658_cast_fp16)[name = tensor<string, []>("op_660_cast_fp16")];
|
459 |
+
tensor<fp16, [1, 1]> var_661_cast_fp16 = real_div(x = v2_cast_fp16, y = v1_cast_fp16)[name = tensor<string, []>("op_661_cast_fp16")];
|
460 |
+
tensor<fp16, [1, 1]> var_662_cast_fp16 = sub(x = v1_cast_fp16, y = var_661_cast_fp16)[name = tensor<string, []>("op_662_cast_fp16")];
|
461 |
+
tensor<fp16, []> var_663_to_fp16 = const()[name = tensor<string, []>("op_663_to_fp16"), val = tensor<fp16, []>(0x1p-24)];
|
462 |
+
tensor<fp16, [1, 1]> var_664_cast_fp16 = add(x = var_662_cast_fp16, y = var_663_to_fp16)[name = tensor<string, []>("op_664_cast_fp16")];
|
463 |
+
tensor<fp16, [1, 2560]> var_cast_fp16 = real_div(x = var_660_cast_fp16, y = var_664_cast_fp16)[name = tensor<string, []>("var_cast_fp16")];
|
464 |
+
tensor<fp16, [1, 2560]> std_cast_fp16 = sqrt(x = var_cast_fp16)[name = tensor<string, []>("std_cast_fp16")];
|
465 |
+
tensor<bool, []> var_668_interleave_0 = const()[name = tensor<string, []>("op_668_interleave_0"), val = tensor<bool, []>(false)];
|
466 |
+
tensor<fp16, [1, 5120]> var_668_cast_fp16 = concat(axis = var_12, interleave = var_668_interleave_0, values = (mean_cast_fp16, std_cast_fp16))[name = tensor<string, []>("op_668_cast_fp16")];
|
467 |
+
tensor<int32, []> input_axis_0 = const()[name = tensor<string, []>("input_axis_0"), val = tensor<int32, []>(1)];
|
468 |
+
tensor<fp16, [1, 3, 5120]> input_cast_fp16 = stack(axis = input_axis_0, values = (var_610_cast_fp16, var_639_cast_fp16, var_668_cast_fp16))[name = tensor<string, []>("input_cast_fp16")];
|
469 |
+
tensor<fp16, [256, 5120]> model_resnet_seg_1_weight_to_fp16_palettized = constexpr_lut_to_dense()[indices = tensor<uint8, [1310720]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5349952))), lut = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6660736))), name = tensor<string, []>("model_resnet_seg_1_weight_to_fp16_palettized"), shape = tensor<uint32, [2]>([256, 5120])];
|
470 |
+
tensor<fp16, [256]> model_resnet_seg_1_bias_to_fp16 = const()[name = tensor<string, []>("model_resnet_seg_1_bias_to_fp16"), val = tensor<fp16, [256]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6661312)))];
|
471 |
+
tensor<fp16, [1, 3, 256]> speaker_embeddings = linear(bias = model_resnet_seg_1_bias_to_fp16, weight = model_resnet_seg_1_weight_to_fp16_palettized, x = input_cast_fp16)[name = tensor<string, []>("linear_0_cast_fp16")];
|
472 |
+
} -> (speaker_embeddings);
|
473 |
+
}
|
speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedder.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e733a81059912ad73b02ce82ae271b40e0767ac6b155dfca84a6c3a0d753d02f
|
3 |
+
size 6661888
|
speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedderPreprocessor.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5dd1e6ea694479da669d42d9752db8ebffdc7582b80c90f06452e2ed1f72cf8f
|
3 |
+
size 243
|
speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedderPreprocessor.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f252f1834b495a132333af500f573a7218c2d3d1f7bfb0faaad89c51a989dac7
|
3 |
+
size 330
|
speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedderPreprocessor.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Float32",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 1 Γ 998 Γ 80)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[1, 998, 80]",
|
13 |
+
"name" : "preprocessor_output_1",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"modelParameters" : [
|
18 |
+
|
19 |
+
],
|
20 |
+
"specificationVersion" : 7,
|
21 |
+
"mlProgramOperationTypeHistogram" : {
|
22 |
+
"Ios16.cast" : 2,
|
23 |
+
"Ios16.mul" : 4,
|
24 |
+
"SliceByIndex" : 2,
|
25 |
+
"Transpose" : 2,
|
26 |
+
"SlidingWindows" : 1,
|
27 |
+
"Ios16.sub" : 3,
|
28 |
+
"Ios16.log" : 1,
|
29 |
+
"Ios16.reduceMean" : 2,
|
30 |
+
"Ios16.square" : 2,
|
31 |
+
"Squeeze" : 2,
|
32 |
+
"Ios16.matmul" : 2,
|
33 |
+
"Ios16.add" : 1,
|
34 |
+
"Ios16.linear" : 1,
|
35 |
+
"ExpandDims" : 4,
|
36 |
+
"Ios16.gather" : 2,
|
37 |
+
"Ios16.maximum" : 1,
|
38 |
+
"Identity" : 1,
|
39 |
+
"Pad" : 2
|
40 |
+
},
|
41 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
42 |
+
"isUpdatable" : "0",
|
43 |
+
"stateSchema" : [
|
44 |
+
|
45 |
+
],
|
46 |
+
"availability" : {
|
47 |
+
"macOS" : "13.0",
|
48 |
+
"tvOS" : "16.0",
|
49 |
+
"visionOS" : "1.0",
|
50 |
+
"watchOS" : "9.0",
|
51 |
+
"iOS" : "16.0",
|
52 |
+
"macCatalyst" : "16.0"
|
53 |
+
},
|
54 |
+
"modelType" : {
|
55 |
+
"name" : "MLModelType_mlProgram"
|
56 |
+
},
|
57 |
+
"userDefinedMetadata" : {
|
58 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
59 |
+
"com.github.apple.coremltools.version" : "8.1",
|
60 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1"
|
61 |
+
},
|
62 |
+
"inputSchema" : [
|
63 |
+
{
|
64 |
+
"hasShapeFlexibility" : "0",
|
65 |
+
"isOptional" : "0",
|
66 |
+
"dataType" : "Float16",
|
67 |
+
"formattedType" : "MultiArray (Float16 1 Γ 160000)",
|
68 |
+
"shortDescription" : "",
|
69 |
+
"shape" : "[1, 160000]",
|
70 |
+
"name" : "waveforms",
|
71 |
+
"type" : "MultiArray"
|
72 |
+
}
|
73 |
+
],
|
74 |
+
"generatedClassName" : "SpeakerEmbeddingPreprocessor",
|
75 |
+
"method" : "predict"
|
76 |
+
}
|
77 |
+
]
|
speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedderPreprocessor.mlmodelc/model.mil
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
program(1.0)
|
2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3401.3.1"}, {"coremlc-version", "3401.4.1"}, {"coremltools-component-torch", "2.5.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.1"}})]
|
3 |
+
{
|
4 |
+
func main<ios16>(tensor<fp16, [1, 160000]> waveforms) {
|
5 |
+
tensor<string, []> cast_0_dtype_0 = const()[name = tensor<string, []>("cast_0_dtype_0"), val = tensor<string, []>("fp32")];
|
6 |
+
tensor<fp32, []> var_2_promoted = const()[name = tensor<string, []>("op_2_promoted"), val = tensor<fp32, []>(0x1p+15)];
|
7 |
+
tensor<fp32, [1, 160000]> cast_0 = cast(dtype = cast_0_dtype_0, x = waveforms)[name = tensor<string, []>("cast_11")];
|
8 |
+
tensor<fp32, [1, 160000]> waveform_1 = mul(x = cast_0, y = var_2_promoted)[name = tensor<string, []>("waveform_1")];
|
9 |
+
tensor<int32, [2]> var_6_begin_0 = const()[name = tensor<string, []>("op_6_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
10 |
+
tensor<int32, [2]> var_6_end_0 = const()[name = tensor<string, []>("op_6_end_0"), val = tensor<int32, [2]>([1, 160000])];
|
11 |
+
tensor<bool, [2]> var_6_end_mask_0 = const()[name = tensor<string, []>("op_6_end_mask_0"), val = tensor<bool, [2]>([false, true])];
|
12 |
+
tensor<bool, [2]> var_6_squeeze_mask_0 = const()[name = tensor<string, []>("op_6_squeeze_mask_0"), val = tensor<bool, [2]>([true, false])];
|
13 |
+
tensor<fp32, [160000]> var_6 = slice_by_index(begin = var_6_begin_0, end = var_6_end_0, end_mask = var_6_end_mask_0, squeeze_mask = var_6_squeeze_mask_0, x = waveform_1)[name = tensor<string, []>("op_6")];
|
14 |
+
tensor<int32, []> sliding_windows_0_axis_0 = const()[name = tensor<string, []>("sliding_windows_0_axis_0"), val = tensor<int32, []>(0)];
|
15 |
+
tensor<int32, []> sliding_windows_0_size_0 = const()[name = tensor<string, []>("sliding_windows_0_size_0"), val = tensor<int32, []>(400)];
|
16 |
+
tensor<int32, []> sliding_windows_0_stride_0 = const()[name = tensor<string, []>("sliding_windows_0_stride_0"), val = tensor<int32, []>(160)];
|
17 |
+
tensor<fp32, [998, 400]> sliding_windows_0 = sliding_windows(axis = sliding_windows_0_axis_0, size = sliding_windows_0_size_0, stride = sliding_windows_0_stride_0, x = var_6)[name = tensor<string, []>("sliding_windows_0")];
|
18 |
+
tensor<int32, [1]> var_42_axes_0 = const()[name = tensor<string, []>("op_42_axes_0"), val = tensor<int32, [1]>([1])];
|
19 |
+
tensor<bool, []> var_42_keep_dims_0 = const()[name = tensor<string, []>("op_42_keep_dims_0"), val = tensor<bool, []>(false)];
|
20 |
+
tensor<fp32, [998]> var_42 = reduce_mean(axes = var_42_axes_0, keep_dims = var_42_keep_dims_0, x = sliding_windows_0)[name = tensor<string, []>("op_42")];
|
21 |
+
tensor<int32, [1]> row_means_axes_0 = const()[name = tensor<string, []>("row_means_axes_0"), val = tensor<int32, [1]>([1])];
|
22 |
+
tensor<fp32, [998, 1]> row_means = expand_dims(axes = row_means_axes_0, x = var_42)[name = tensor<string, []>("row_means")];
|
23 |
+
tensor<fp32, [998, 400]> strided_input_3 = sub(x = sliding_windows_0, y = row_means)[name = tensor<string, []>("strided_input_3")];
|
24 |
+
tensor<int32, [1]> input_1_axes_0 = const()[name = tensor<string, []>("input_1_axes_0"), val = tensor<int32, [1]>([0])];
|
25 |
+
tensor<fp32, [1, 998, 400]> input_1 = expand_dims(axes = input_1_axes_0, x = strided_input_3)[name = tensor<string, []>("input_1")];
|
26 |
+
tensor<fp32, []> const_2 = const()[name = tensor<string, []>("const_2"), val = tensor<fp32, []>(0x0p+0)];
|
27 |
+
tensor<int32, [6]> var_54_pad_0 = const()[name = tensor<string, []>("op_54_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 1, 0])];
|
28 |
+
tensor<string, []> var_54_mode_0 = const()[name = tensor<string, []>("op_54_mode_0"), val = tensor<string, []>("replicate")];
|
29 |
+
tensor<fp32, [1, 998, 401]> var_54 = pad(constant_val = const_2, mode = var_54_mode_0, pad = var_54_pad_0, x = input_1)[name = tensor<string, []>("op_54")];
|
30 |
+
tensor<int32, [1]> offset_strided_input_axes_0 = const()[name = tensor<string, []>("offset_strided_input_axes_0"), val = tensor<int32, [1]>([0])];
|
31 |
+
tensor<fp32, [998, 401]> offset_strided_input = squeeze(axes = offset_strided_input_axes_0, x = var_54)[name = tensor<string, []>("offset_strided_input")];
|
32 |
+
tensor<int32, [2]> var_66_begin_0 = const()[name = tensor<string, []>("op_66_begin_0"), val = tensor<int32, [2]>([0, 0])];
|
33 |
+
tensor<int32, [2]> var_66_end_0 = const()[name = tensor<string, []>("op_66_end_0"), val = tensor<int32, [2]>([998, 400])];
|
34 |
+
tensor<bool, [2]> var_66_end_mask_0 = const()[name = tensor<string, []>("op_66_end_mask_0"), val = tensor<bool, [2]>([true, false])];
|
35 |
+
tensor<fp32, [998, 400]> var_66 = slice_by_index(begin = var_66_begin_0, end = var_66_end_0, end_mask = var_66_end_mask_0, x = offset_strided_input)[name = tensor<string, []>("op_66")];
|
36 |
+
tensor<fp32, []> var_67 = const()[name = tensor<string, []>("op_67"), val = tensor<fp32, []>(0x1.f0a3d8p-1)];
|
37 |
+
tensor<fp32, [998, 400]> var_68 = mul(x = var_66, y = var_67)[name = tensor<string, []>("op_68")];
|
38 |
+
tensor<fp32, [998, 400]> strided_input_5 = sub(x = strided_input_3, y = var_68)[name = tensor<string, []>("strided_input_5")];
|
39 |
+
tensor<fp32, [1, 400]> window_function = const()[name = tensor<string, []>("window_function"), val = tensor<fp32, [1, 400]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
40 |
+
tensor<fp32, [998, 400]> strided_input_7 = mul(x = strided_input_5, y = window_function)[name = tensor<string, []>("strided_input_7")];
|
41 |
+
tensor<int32, [1]> input_3_axes_0 = const()[name = tensor<string, []>("input_3_axes_0"), val = tensor<int32, [1]>([0])];
|
42 |
+
tensor<fp32, [1, 998, 400]> input_3 = expand_dims(axes = input_3_axes_0, x = strided_input_7)[name = tensor<string, []>("input_3")];
|
43 |
+
tensor<fp32, []> const_3 = const()[name = tensor<string, []>("const_3"), val = tensor<fp32, []>(0x0p+0)];
|
44 |
+
tensor<int32, [6]> var_90_pad_0 = const()[name = tensor<string, []>("op_90_pad_0"), val = tensor<int32, [6]>([0, 0, 0, 0, 0, 112])];
|
45 |
+
tensor<string, []> var_90_mode_0 = const()[name = tensor<string, []>("op_90_mode_0"), val = tensor<string, []>("constant")];
|
46 |
+
tensor<fp32, [1, 998, 512]> var_90 = pad(constant_val = const_3, mode = var_90_mode_0, pad = var_90_pad_0, x = input_3)[name = tensor<string, []>("op_90")];
|
47 |
+
tensor<int32, [1]> strided_input_axes_0 = const()[name = tensor<string, []>("strided_input_axes_0"), val = tensor<int32, [1]>([0])];
|
48 |
+
tensor<fp32, [998, 512]> strided_input = squeeze(axes = strided_input_axes_0, x = var_90)[name = tensor<string, []>("strided_input")];
|
49 |
+
tensor<fp32, [512, 512]> cos_0 = const()[name = tensor<string, []>("cos_0"), val = tensor<fp32, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1728)))];
|
50 |
+
tensor<fp32, [512, 512]> sin_0 = const()[name = tensor<string, []>("sin_0"), val = tensor<fp32, [512, 512]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1050368)))];
|
51 |
+
tensor<bool, []> matmul_1_transpose_x_1 = const()[name = tensor<string, []>("matmul_1_transpose_x_1"), val = tensor<bool, []>(false)];
|
52 |
+
tensor<bool, []> matmul_1_transpose_y_1 = const()[name = tensor<string, []>("matmul_1_transpose_y_1"), val = tensor<bool, []>(true)];
|
53 |
+
tensor<fp32, [512, 998]> matmul_1 = matmul(transpose_x = matmul_1_transpose_x_1, transpose_y = matmul_1_transpose_y_1, x = cos_0, y = strided_input)[name = tensor<string, []>("matmul_1")];
|
54 |
+
tensor<bool, []> matmul_3_transpose_x_1 = const()[name = tensor<string, []>("matmul_3_transpose_x_1"), val = tensor<bool, []>(false)];
|
55 |
+
tensor<bool, []> matmul_3_transpose_y_1 = const()[name = tensor<string, []>("matmul_3_transpose_y_1"), val = tensor<bool, []>(true)];
|
56 |
+
tensor<fp32, [512, 998]> matmul_3 = matmul(transpose_x = matmul_3_transpose_x_1, transpose_y = matmul_3_transpose_y_1, x = sin_0, y = strided_input)[name = tensor<string, []>("matmul_3")];
|
57 |
+
tensor<fp32, []> mul_1_y_0 = const()[name = tensor<string, []>("mul_1_y_0"), val = tensor<fp32, []>(-0x1p+0)];
|
58 |
+
tensor<fp32, [512, 998]> mul_1 = mul(x = matmul_3, y = mul_1_y_0)[name = tensor<string, []>("mul_1")];
|
59 |
+
tensor<int32, [2]> transpose_3_perm_0 = const()[name = tensor<string, []>("transpose_3_perm_0"), val = tensor<int32, [2]>([-1, 0])];
|
60 |
+
tensor<int32, [2]> transpose_4_perm_0 = const()[name = tensor<string, []>("transpose_4_perm_0"), val = tensor<int32, [2]>([-1, 0])];
|
61 |
+
tensor<int32, [257]> range_1d_2 = const()[name = tensor<string, []>("range_1d_2"), val = tensor<int32, [257]>([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256])];
|
62 |
+
tensor<int32, []> gather_0_axis_0 = const()[name = tensor<string, []>("gather_0_axis_0"), val = tensor<int32, []>(-1)];
|
63 |
+
tensor<int32, []> gather_0_batch_dims_0 = const()[name = tensor<string, []>("gather_0_batch_dims_0"), val = tensor<int32, []>(0)];
|
64 |
+
tensor<fp32, [998, 512]> transpose_3 = transpose(perm = transpose_3_perm_0, x = matmul_1)[name = tensor<string, []>("transpose_6")];
|
65 |
+
tensor<fp32, [998, 257]> gather_0 = gather(axis = gather_0_axis_0, batch_dims = gather_0_batch_dims_0, indices = range_1d_2, x = transpose_3)[name = tensor<string, []>("gather_0")];
|
66 |
+
tensor<int32, []> gather_1_axis_0 = const()[name = tensor<string, []>("gather_1_axis_0"), val = tensor<int32, []>(-1)];
|
67 |
+
tensor<int32, []> gather_1_batch_dims_0 = const()[name = tensor<string, []>("gather_1_batch_dims_0"), val = tensor<int32, []>(0)];
|
68 |
+
tensor<fp32, [998, 512]> transpose_4 = transpose(perm = transpose_4_perm_0, x = mul_1)[name = tensor<string, []>("transpose_5")];
|
69 |
+
tensor<fp32, [998, 257]> gather_1 = gather(axis = gather_1_axis_0, batch_dims = gather_1_batch_dims_0, indices = range_1d_2, x = transpose_4)[name = tensor<string, []>("gather_1")];
|
70 |
+
tensor<fp32, [998, 257]> square_0 = square(x = gather_0)[name = tensor<string, []>("square_0")];
|
71 |
+
tensor<fp32, [998, 257]> square_1 = square(x = gather_1)[name = tensor<string, []>("square_1")];
|
72 |
+
tensor<fp32, [998, 257]> add_1 = add(x = square_0, y = square_1)[name = tensor<string, []>("add_1")];
|
73 |
+
tensor<fp32, [998, 257]> spectrum = identity(x = add_1)[name = tensor<string, []>("spectrum")];
|
74 |
+
tensor<fp32, [80, 257]> mel_energies_3 = const()[name = tensor<string, []>("mel_energies_3"), val = tensor<fp32, [80, 257]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2099008)))];
|
75 |
+
tensor<fp32, [80]> mel_energies_bias_0 = const()[name = tensor<string, []>("mel_energies_bias_0"), val = tensor<fp32, [80]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2181312)))];
|
76 |
+
tensor<fp32, [998, 80]> mel_energies = linear(bias = mel_energies_bias_0, weight = mel_energies_3, x = spectrum)[name = tensor<string, []>("mel_energies")];
|
77 |
+
tensor<fp32, []> const_10 = const()[name = tensor<string, []>("const_10"), val = tensor<fp32, []>(0x1p-23)];
|
78 |
+
tensor<fp32, [998, 80]> var_186 = maximum(x = mel_energies, y = const_10)[name = tensor<string, []>("op_186")];
|
79 |
+
tensor<fp32, []> filter_banks_epsilon_0 = const()[name = tensor<string, []>("filter_banks_epsilon_0"), val = tensor<fp32, []>(0x1p-149)];
|
80 |
+
tensor<fp32, [998, 80]> filter_banks = log(epsilon = filter_banks_epsilon_0, x = var_186)[name = tensor<string, []>("filter_banks")];
|
81 |
+
tensor<int32, [1]> var_192_axes_0 = const()[name = tensor<string, []>("op_192_axes_0"), val = tensor<int32, [1]>([0])];
|
82 |
+
tensor<bool, []> var_192_keep_dims_0 = const()[name = tensor<string, []>("op_192_keep_dims_0"), val = tensor<bool, []>(true)];
|
83 |
+
tensor<fp32, [1, 80]> var_192 = reduce_mean(axes = var_192_axes_0, keep_dims = var_192_keep_dims_0, x = filter_banks)[name = tensor<string, []>("op_192")];
|
84 |
+
tensor<fp32, [998, 80]> var_194 = sub(x = filter_banks, y = var_192)[name = tensor<string, []>("op_194")];
|
85 |
+
tensor<int32, [1]> obj_axes_0 = const()[name = tensor<string, []>("obj_axes_0"), val = tensor<int32, [1]>([0])];
|
86 |
+
tensor<fp32, [1, 998, 80]> preprocessor_output_1_type_fp32 = expand_dims(axes = obj_axes_0, x = var_194)[name = tensor<string, []>("obj")];
|
87 |
+
tensor<string, []> cast_9_dtype_0 = const()[name = tensor<string, []>("cast_9_dtype_0"), val = tensor<string, []>("fp16")];
|
88 |
+
tensor<fp16, [1, 998, 80]> preprocessor_output_1 = cast(dtype = cast_9_dtype_0, x = preprocessor_output_1_type_fp32)[name = tensor<string, []>("cast_10")];
|
89 |
+
} -> (preprocessor_output_1);
|
90 |
+
}
|
speaker_embedder/pyannote-v3/W8A16/SpeakerEmbedderPreprocessor.mlmodelc/weights/weight.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5f2c284bd22f1f7ab76901c1c6e57f82d4ebbf057fa0b924aad057f124f77a89
|
3 |
+
size 2181696
|
speaker_segmenter/pyannote-v3/W32A32/LICENSE_NOTICE.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Argmax proprietary and confidential. Under NDA.
|
2 |
+
|
3 |
+
Copyright 2024 Argmax, Inc. All rights reserved.
|
4 |
+
|
5 |
+
Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
|
6 |
+
|
7 |
+
Please contact Argmax for licensing information at [email protected].
|
speaker_segmenter/pyannote-v3/W32A32/README.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# License
|
2 |
+
|
3 |
+
Original model weights: https://huggingface.co/pyannote/segmentation-3.0/blob/main/LICENSE
|
4 |
+
Argmax-optimized model asset (Assets with `.mlmodelc` extension): https://huggingface.co/argmaxinc/speakerkit-pro/blob/main/LICENSE_NOTICE.txt
|
5 |
+
|
6 |
+
Please contact [email protected] for licensing SpeakerKit Pro assets
|
speaker_segmenter/pyannote-v3/W32A32/SpeakerSegmenter.mlmodelc/LICENSE_NOTICE.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Argmax proprietary and confidential. Under NDA.
|
2 |
+
|
3 |
+
Copyright 2024 Argmax, Inc. All rights reserved.
|
4 |
+
|
5 |
+
Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
|
6 |
+
|
7 |
+
Please contact Argmax for licensing information at [email protected].
|
speaker_segmenter/pyannote-v3/{SpeakerSegmenter.mlmodelc β W32A32/SpeakerSegmenter.mlmodelc}/analytics/coremldata.bin
RENAMED
File without changes
|
speaker_segmenter/pyannote-v3/{SpeakerSegmenter.mlmodelc β W32A32/SpeakerSegmenter.mlmodelc}/coremldata.bin
RENAMED
File without changes
|
speaker_segmenter/pyannote-v3/{SpeakerSegmenter.mlmodelc β W32A32/SpeakerSegmenter.mlmodelc}/metadata.json
RENAMED
File without changes
|
speaker_segmenter/pyannote-v3/{SpeakerSegmenter.mlmodelc β W32A32/SpeakerSegmenter.mlmodelc}/model.mil
RENAMED
File without changes
|
speaker_segmenter/pyannote-v3/{SpeakerSegmenter.mlmodelc β W32A32/SpeakerSegmenter.mlmodelc}/weights/weight.bin
RENAMED
File without changes
|
speaker_segmenter/pyannote-v3/W8A16/LICENSE_NOTICE.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Argmax proprietary and confidential. Under NDA.
|
2 |
+
|
3 |
+
Copyright 2024 Argmax, Inc. All rights reserved.
|
4 |
+
|
5 |
+
Unauthorized access, copying, use, distribution, and or commercialization of this file, via any medium or means is strictly prohibited.
|
6 |
+
|
7 |
+
Please contact Argmax for licensing information at [email protected].
|
speaker_segmenter/pyannote-v3/W8A16/README.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# License
|
2 |
+
|
3 |
+
Original model weights: https://huggingface.co/pyannote/segmentation-3.0/blob/main/LICENSE
|
4 |
+
Argmax-optimized model asset (Assets with `.mlmodelc` extension): https://huggingface.co/argmaxinc/speakerkit-pro/blob/main/LICENSE_NOTICE.txt
|
5 |
+
|
6 |
+
Please contact [email protected] for licensing SpeakerKit Pro assets
|
speaker_segmenter/pyannote-v3/W8A16/SpeakerSegmenter.mlmodelc/analytics/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:40637aa0cb2a073bc303c7ca9ee79da35fa81d2cad1ead180e93b134005b95de
|
3 |
+
size 243
|
speaker_segmenter/pyannote-v3/W8A16/SpeakerSegmenter.mlmodelc/coremldata.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6c356ed983b2a3332ce51299ca0f9747a35cb6c2a67b0ac24c69dbef3f989634
|
3 |
+
size 497
|
speaker_segmenter/pyannote-v3/W8A16/SpeakerSegmenter.mlmodelc/metadata.json
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"metadataOutputVersion" : "3.0",
|
4 |
+
"storagePrecision" : "Mixed (Float16, Palettized (8 bits))",
|
5 |
+
"outputSchema" : [
|
6 |
+
{
|
7 |
+
"hasShapeFlexibility" : "0",
|
8 |
+
"isOptional" : "0",
|
9 |
+
"dataType" : "Float16",
|
10 |
+
"formattedType" : "MultiArray (Float16 21 Γ 589 Γ 3)",
|
11 |
+
"shortDescription" : "",
|
12 |
+
"shape" : "[21, 589, 3]",
|
13 |
+
"name" : "speaker_probs",
|
14 |
+
"type" : "MultiArray"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"hasShapeFlexibility" : "0",
|
18 |
+
"isOptional" : "0",
|
19 |
+
"dataType" : "Float16",
|
20 |
+
"formattedType" : "MultiArray (Float16 21 Γ 589 Γ 3)",
|
21 |
+
"shortDescription" : "",
|
22 |
+
"shape" : "[21, 589, 3]",
|
23 |
+
"name" : "speaker_ids",
|
24 |
+
"type" : "MultiArray"
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"hasShapeFlexibility" : "0",
|
28 |
+
"isOptional" : "0",
|
29 |
+
"dataType" : "Float16",
|
30 |
+
"formattedType" : "MultiArray (Float16 21 Γ 3)",
|
31 |
+
"shortDescription" : "",
|
32 |
+
"shape" : "[21, 3]",
|
33 |
+
"name" : "speaker_activity",
|
34 |
+
"type" : "MultiArray"
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"hasShapeFlexibility" : "0",
|
38 |
+
"isOptional" : "0",
|
39 |
+
"dataType" : "Float16",
|
40 |
+
"formattedType" : "MultiArray (Float16 21 Γ 589)",
|
41 |
+
"shortDescription" : "",
|
42 |
+
"shape" : "[21, 589]",
|
43 |
+
"name" : "overlapped_speaker_activity",
|
44 |
+
"type" : "MultiArray"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"hasShapeFlexibility" : "0",
|
48 |
+
"isOptional" : "0",
|
49 |
+
"dataType" : "Float16",
|
50 |
+
"formattedType" : "MultiArray (Float16 1767)",
|
51 |
+
"shortDescription" : "",
|
52 |
+
"shape" : "[1767]",
|
53 |
+
"name" : "voice_activity",
|
54 |
+
"type" : "MultiArray"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"hasShapeFlexibility" : "0",
|
58 |
+
"isOptional" : "0",
|
59 |
+
"dataType" : "Float16",
|
60 |
+
"formattedType" : "MultiArray (Float16 21 Γ 1 Γ 160000)",
|
61 |
+
"shortDescription" : "",
|
62 |
+
"shape" : "[21, 1, 160000]",
|
63 |
+
"name" : "sliding_window_waveform",
|
64 |
+
"type" : "MultiArray"
|
65 |
+
}
|
66 |
+
],
|
67 |
+
"modelParameters" : [
|
68 |
+
|
69 |
+
],
|
70 |
+
"specificationVersion" : 8,
|
71 |
+
"mlProgramOperationTypeHistogram" : {
|
72 |
+
"Ios17.reduceArgmax" : 1,
|
73 |
+
"Ios16.maxPool" : 3,
|
74 |
+
"Ios17.slidingWindows" : 1,
|
75 |
+
"Ios17.instanceNorm" : 4,
|
76 |
+
"Ios17.exp" : 1,
|
77 |
+
"Ios16.softmax" : 1,
|
78 |
+
"Ios17.scatter" : 42,
|
79 |
+
"Ios17.transpose" : 2,
|
80 |
+
"Ios17.expandDims" : 1,
|
81 |
+
"Ios16.reduceMax" : 1,
|
82 |
+
"Ios17.add" : 40,
|
83 |
+
"Ios17.sliceByIndex" : 61,
|
84 |
+
"Ios16.reduceSum" : 2,
|
85 |
+
"Ios17.log" : 1,
|
86 |
+
"Ios17.conv" : 3,
|
87 |
+
"Ios17.lstm" : 4,
|
88 |
+
"Ios16.constexprLutToDense" : 22,
|
89 |
+
"OneHot" : 1,
|
90 |
+
"Ios17.cast" : 2,
|
91 |
+
"Ios17.linear" : 5,
|
92 |
+
"Ios17.leakyRelu" : 5,
|
93 |
+
"Ios17.abs" : 1,
|
94 |
+
"Ios17.realDiv" : 1,
|
95 |
+
"Ios17.greater" : 1
|
96 |
+
},
|
97 |
+
"computePrecision" : "Mixed (Float16, Float32, Int32)",
|
98 |
+
"isUpdatable" : "0",
|
99 |
+
"stateSchema" : [
|
100 |
+
|
101 |
+
],
|
102 |
+
"availability" : {
|
103 |
+
"macOS" : "14.0",
|
104 |
+
"tvOS" : "17.0",
|
105 |
+
"visionOS" : "1.0",
|
106 |
+
"watchOS" : "10.0",
|
107 |
+
"iOS" : "17.0",
|
108 |
+
"macCatalyst" : "17.0"
|
109 |
+
},
|
110 |
+
"modelType" : {
|
111 |
+
"name" : "MLModelType_mlProgram"
|
112 |
+
},
|
113 |
+
"userDefinedMetadata" : {
|
114 |
+
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
115 |
+
"com.github.apple.coremltools.source" : "torch==2.5.1",
|
116 |
+
"com.github.apple.coremltools.version" : "8.1"
|
117 |
+
},
|
118 |
+
"inputSchema" : [
|
119 |
+
{
|
120 |
+
"hasShapeFlexibility" : "0",
|
121 |
+
"isOptional" : "0",
|
122 |
+
"dataType" : "Float16",
|
123 |
+
"formattedType" : "MultiArray (Float16 480000)",
|
124 |
+
"shortDescription" : "",
|
125 |
+
"shape" : "[480000]",
|
126 |
+
"name" : "waveform",
|
127 |
+
"type" : "MultiArray"
|
128 |
+
}
|
129 |
+
],
|
130 |
+
"generatedClassName" : "SpeakerSegmenter_8_bit",
|
131 |
+
"method" : "predict"
|
132 |
+
}
|
133 |
+
]
|
speaker_segmenter/pyannote-v3/W8A16/SpeakerSegmenter.mlmodelc/model.mil
ADDED
The diff for this file is too large to render.
See raw diff
|
|