Build (aarch64)
Browse files- build/torch26-cxx11-cu126-aarch64-linux/quantization_eetq/__init__.py +3 -0
- build/torch26-cxx11-cu126-aarch64-linux/quantization_eetq/_ops.py +9 -0
- build/torch26-cxx11-cu126-aarch64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
- build/torch26-cxx11-cu126-aarch64-linux/quantization_eetq/custom_ops.py +36 -0
- build/torch26-cxx98-cu126-aarch64-linux/quantization_eetq/__init__.py +3 -0
- build/torch26-cxx98-cu126-aarch64-linux/quantization_eetq/_ops.py +9 -0
- build/torch26-cxx98-cu126-aarch64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
- build/torch26-cxx98-cu126-aarch64-linux/quantization_eetq/custom_ops.py +36 -0
- build/torch27-cxx11-cu126-aarch64-linux/quantization_eetq/__init__.py +3 -0
- build/torch27-cxx11-cu126-aarch64-linux/quantization_eetq/_ops.py +9 -0
- build/torch27-cxx11-cu126-aarch64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
- build/torch27-cxx11-cu126-aarch64-linux/quantization_eetq/custom_ops.py +36 -0
- build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/__init__.py +3 -0
- build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/_ops.py +9 -0
- build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
- build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/custom_ops.py +36 -0
build/torch26-cxx11-cu126-aarch64-linux/quantization_eetq/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
2 |
+
|
3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch26-cxx11-cu126-aarch64-linux/quantization_eetq/_ops.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _quantization_eetq_f87291e
|
3 |
+
ops = torch.ops._quantization_eetq_f87291e
|
4 |
+
|
5 |
+
def add_op_namespace_prefix(op_name: str):
|
6 |
+
"""
|
7 |
+
Prefix op by namespace.
|
8 |
+
"""
|
9 |
+
return f"_quantization_eetq_f87291e::{op_name}"
|
build/torch26-cxx11-cu126-aarch64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e2b8b5343121a6ce83d7730e31913ed787ce1ebacd7bfb562ac9bab5fcbd4f7e
|
3 |
+
size 26495968
|
build/torch26-cxx11-cu126-aarch64-linux/quantization_eetq/custom_ops.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from ._ops import ops
|
5 |
+
|
6 |
+
|
7 |
+
def w8_a16_gemm(
|
8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
9 |
+
) -> torch.Tensor:
|
10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
11 |
+
|
12 |
+
|
13 |
+
def w8_a16_gemm_(
|
14 |
+
input: torch.Tensor,
|
15 |
+
weight: torch.Tensor,
|
16 |
+
scale: torch.Tensor,
|
17 |
+
output: torch.Tensor,
|
18 |
+
m: int,
|
19 |
+
n: int,
|
20 |
+
k: int,
|
21 |
+
) -> torch.Tensor:
|
22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
23 |
+
|
24 |
+
|
25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
27 |
+
|
28 |
+
|
29 |
+
def quant_weights(
|
30 |
+
origin_weight: torch.Tensor,
|
31 |
+
quant_type: torch.dtype,
|
32 |
+
return_unprocessed_quantized_tensor: bool,
|
33 |
+
) -> List[torch.Tensor]:
|
34 |
+
return ops.quant_weights(
|
35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
36 |
+
)
|
build/torch26-cxx98-cu126-aarch64-linux/quantization_eetq/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
2 |
+
|
3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch26-cxx98-cu126-aarch64-linux/quantization_eetq/_ops.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _quantization_eetq_f87291e
|
3 |
+
ops = torch.ops._quantization_eetq_f87291e
|
4 |
+
|
5 |
+
def add_op_namespace_prefix(op_name: str):
|
6 |
+
"""
|
7 |
+
Prefix op by namespace.
|
8 |
+
"""
|
9 |
+
return f"_quantization_eetq_f87291e::{op_name}"
|
build/torch26-cxx98-cu126-aarch64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c0c6116e6e6fb38a5925a055966845b52920ca58e77f8a2bc097d22d82a45723
|
3 |
+
size 26757632
|
build/torch26-cxx98-cu126-aarch64-linux/quantization_eetq/custom_ops.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from ._ops import ops
|
5 |
+
|
6 |
+
|
7 |
+
def w8_a16_gemm(
|
8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
9 |
+
) -> torch.Tensor:
|
10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
11 |
+
|
12 |
+
|
13 |
+
def w8_a16_gemm_(
|
14 |
+
input: torch.Tensor,
|
15 |
+
weight: torch.Tensor,
|
16 |
+
scale: torch.Tensor,
|
17 |
+
output: torch.Tensor,
|
18 |
+
m: int,
|
19 |
+
n: int,
|
20 |
+
k: int,
|
21 |
+
) -> torch.Tensor:
|
22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
23 |
+
|
24 |
+
|
25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
27 |
+
|
28 |
+
|
29 |
+
def quant_weights(
|
30 |
+
origin_weight: torch.Tensor,
|
31 |
+
quant_type: torch.dtype,
|
32 |
+
return_unprocessed_quantized_tensor: bool,
|
33 |
+
) -> List[torch.Tensor]:
|
34 |
+
return ops.quant_weights(
|
35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
36 |
+
)
|
build/torch27-cxx11-cu126-aarch64-linux/quantization_eetq/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
2 |
+
|
3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch27-cxx11-cu126-aarch64-linux/quantization_eetq/_ops.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _quantization_eetq_f87291e
|
3 |
+
ops = torch.ops._quantization_eetq_f87291e
|
4 |
+
|
5 |
+
def add_op_namespace_prefix(op_name: str):
|
6 |
+
"""
|
7 |
+
Prefix op by namespace.
|
8 |
+
"""
|
9 |
+
return f"_quantization_eetq_f87291e::{op_name}"
|
build/torch27-cxx11-cu126-aarch64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8a73dd746900ddb1570192c2cb50736aa56f8854a1dcd0d84c746138217cd82b
|
3 |
+
size 26496176
|
build/torch27-cxx11-cu126-aarch64-linux/quantization_eetq/custom_ops.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from ._ops import ops
|
5 |
+
|
6 |
+
|
7 |
+
def w8_a16_gemm(
|
8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
9 |
+
) -> torch.Tensor:
|
10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
11 |
+
|
12 |
+
|
13 |
+
def w8_a16_gemm_(
|
14 |
+
input: torch.Tensor,
|
15 |
+
weight: torch.Tensor,
|
16 |
+
scale: torch.Tensor,
|
17 |
+
output: torch.Tensor,
|
18 |
+
m: int,
|
19 |
+
n: int,
|
20 |
+
k: int,
|
21 |
+
) -> torch.Tensor:
|
22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
23 |
+
|
24 |
+
|
25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
27 |
+
|
28 |
+
|
29 |
+
def quant_weights(
|
30 |
+
origin_weight: torch.Tensor,
|
31 |
+
quant_type: torch.dtype,
|
32 |
+
return_unprocessed_quantized_tensor: bool,
|
33 |
+
) -> List[torch.Tensor]:
|
34 |
+
return ops.quant_weights(
|
35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
36 |
+
)
|
build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
2 |
+
|
3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/_ops.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _quantization_eetq_f87291e
|
3 |
+
ops = torch.ops._quantization_eetq_f87291e
|
4 |
+
|
5 |
+
def add_op_namespace_prefix(op_name: str):
|
6 |
+
"""
|
7 |
+
Prefix op by namespace.
|
8 |
+
"""
|
9 |
+
return f"_quantization_eetq_f87291e::{op_name}"
|
build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e4d40830eefb300f3c6e042e68baccf936811f3ea6de26f54d26b602b96e6e29
|
3 |
+
size 27936256
|
build/torch27-cxx11-cu128-aarch64-linux/quantization_eetq/custom_ops.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from ._ops import ops
|
5 |
+
|
6 |
+
|
7 |
+
def w8_a16_gemm(
|
8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
9 |
+
) -> torch.Tensor:
|
10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
11 |
+
|
12 |
+
|
13 |
+
def w8_a16_gemm_(
|
14 |
+
input: torch.Tensor,
|
15 |
+
weight: torch.Tensor,
|
16 |
+
scale: torch.Tensor,
|
17 |
+
output: torch.Tensor,
|
18 |
+
m: int,
|
19 |
+
n: int,
|
20 |
+
k: int,
|
21 |
+
) -> torch.Tensor:
|
22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
23 |
+
|
24 |
+
|
25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
27 |
+
|
28 |
+
|
29 |
+
def quant_weights(
|
30 |
+
origin_weight: torch.Tensor,
|
31 |
+
quant_type: torch.dtype,
|
32 |
+
return_unprocessed_quantized_tensor: bool,
|
33 |
+
) -> List[torch.Tensor]:
|
34 |
+
return ops.quant_weights(
|
35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
36 |
+
)
|