danieldk HF Staff commited on
Commit
dffc953
·
1 Parent(s): f87291e
Files changed (44) hide show
  1. build/torch25-cxx11-cu118-x86_64-linux/quantization_eetq/_ops.py +3 -3
  2. build/torch25-cxx11-cu118-x86_64-linux/quantization_eetq/{_quantization_eetq_v7rnpcck3kry4.abi3.so → _quantization_eetq_f87291e.abi3.so} +2 -2
  3. build/torch25-cxx11-cu121-x86_64-linux/quantization_eetq/_ops.py +3 -3
  4. build/torch25-cxx11-cu121-x86_64-linux/quantization_eetq/{_quantization_eetq_zcfiojfkx55be.abi3.so → _quantization_eetq_f87291e.abi3.so} +2 -2
  5. build/torch25-cxx11-cu124-x86_64-linux/quantization_eetq/_ops.py +3 -3
  6. build/torch25-cxx11-cu124-x86_64-linux/quantization_eetq/{_quantization_eetq_btymam4x7xvs6.abi3.so → _quantization_eetq_f87291e.abi3.so} +2 -2
  7. build/torch25-cxx98-cu118-x86_64-linux/quantization_eetq/_ops.py +3 -3
  8. build/torch25-cxx98-cu118-x86_64-linux/quantization_eetq/{_quantization_eetq_yy3p6bsf622sq.abi3.so → _quantization_eetq_f87291e.abi3.so} +2 -2
  9. build/torch25-cxx98-cu121-x86_64-linux/quantization_eetq/_ops.py +3 -3
  10. build/torch25-cxx98-cu121-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
  11. build/torch25-cxx98-cu121-x86_64-linux/quantization_eetq/_quantization_eetq_imijtykkseqze.abi3.so +0 -3
  12. build/torch25-cxx98-cu124-x86_64-linux/quantization_eetq/_ops.py +3 -3
  13. build/torch25-cxx98-cu124-x86_64-linux/quantization_eetq/_quantization_eetq_4qerj3t7ddiry.abi3.so +0 -3
  14. build/torch25-cxx98-cu124-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
  15. build/torch26-cxx11-cu118-x86_64-linux/quantization_eetq/_ops.py +3 -3
  16. build/torch26-cxx11-cu118-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
  17. build/torch26-cxx11-cu118-x86_64-linux/quantization_eetq/_quantization_eetq_j23ltbqvrnixg.abi3.so +0 -3
  18. build/torch26-cxx11-cu124-x86_64-linux/quantization_eetq/_ops.py +3 -3
  19. build/torch26-cxx11-cu124-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
  20. build/torch26-cxx11-cu124-x86_64-linux/quantization_eetq/_quantization_eetq_p5neqtnhdgxv2.abi3.so +0 -3
  21. build/torch26-cxx11-cu126-x86_64-linux/quantization_eetq/_ops.py +3 -3
  22. build/torch26-cxx11-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
  23. build/torch26-cxx11-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_idk3dezy35dfk.abi3.so +0 -3
  24. build/torch26-cxx98-cu118-x86_64-linux/quantization_eetq/_ops.py +3 -3
  25. build/torch26-cxx98-cu118-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
  26. build/torch26-cxx98-cu118-x86_64-linux/quantization_eetq/_quantization_eetq_fpjoxzd7nm2qa.abi3.so +0 -3
  27. build/torch26-cxx98-cu124-x86_64-linux/quantization_eetq/_ops.py +3 -3
  28. build/torch26-cxx98-cu124-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
  29. build/torch26-cxx98-cu124-x86_64-linux/quantization_eetq/_quantization_eetq_k7mlunxe2ye4s.abi3.so +0 -3
  30. build/torch26-cxx98-cu126-x86_64-linux/quantization_eetq/_ops.py +3 -3
  31. build/torch26-cxx98-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_7m7hz3sbwkaio.abi3.so +0 -3
  32. build/torch26-cxx98-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
  33. build/torch27-cxx11-cu118-x86_64-linux/quantization_eetq/__init__.py +3 -0
  34. build/torch27-cxx11-cu118-x86_64-linux/quantization_eetq/_ops.py +9 -0
  35. build/torch27-cxx11-cu118-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
  36. build/torch27-cxx11-cu118-x86_64-linux/quantization_eetq/custom_ops.py +36 -0
  37. build/torch27-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py +3 -0
  38. build/torch27-cxx11-cu126-x86_64-linux/quantization_eetq/_ops.py +9 -0
  39. build/torch27-cxx11-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
  40. build/torch27-cxx11-cu126-x86_64-linux/quantization_eetq/custom_ops.py +36 -0
  41. build/torch27-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py +3 -0
  42. build/torch27-cxx11-cu128-x86_64-linux/quantization_eetq/_ops.py +9 -0
  43. build/torch27-cxx11-cu128-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so +3 -0
  44. build/torch27-cxx11-cu128-x86_64-linux/quantization_eetq/custom_ops.py +36 -0
build/torch25-cxx11-cu118-x86_64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_v7rnpcck3kry4
3
- ops = torch.ops._quantization_eetq_v7rnpcck3kry4
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_v7rnpcck3kry4::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch25-cxx11-cu118-x86_64-linux/quantization_eetq/{_quantization_eetq_v7rnpcck3kry4.abi3.so → _quantization_eetq_f87291e.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff073c997a4857cf7f88f5ca5b6a0e0aed3feb9739ba4187a3c1ec6fd2f1b64b
3
- size 28364752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74d2a682ad383867f73c387b384877e0b516ec0c45aac25aab21245d8d190fb1
3
+ size 26992568
build/torch25-cxx11-cu121-x86_64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_zcfiojfkx55be
3
- ops = torch.ops._quantization_eetq_zcfiojfkx55be
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_zcfiojfkx55be::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch25-cxx11-cu121-x86_64-linux/quantization_eetq/{_quantization_eetq_zcfiojfkx55be.abi3.so → _quantization_eetq_f87291e.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93f7cce612f7efed34ccfadc57ca6a17899feba7ba4780f33d4ff8f828171bfc
3
- size 27919784
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ac014bd6e28e272aceebaf3be8a751945f555b6c765f742713b8d5457928d4a
3
+ size 26596752
build/torch25-cxx11-cu124-x86_64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_btymam4x7xvs6
3
- ops = torch.ops._quantization_eetq_btymam4x7xvs6
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_btymam4x7xvs6::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch25-cxx11-cu124-x86_64-linux/quantization_eetq/{_quantization_eetq_btymam4x7xvs6.abi3.so → _quantization_eetq_f87291e.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3e693ee1f395169b985b0dadab379cc5d2be0858ebd0caa3dedc5c6b8bd7a7d
3
- size 27950768
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:496286ab835ca1c192db340e5ce1b93bff77498ac6a05b60fb9ba458a5123b6a
3
+ size 26652304
build/torch25-cxx98-cu118-x86_64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_yy3p6bsf622sq
3
- ops = torch.ops._quantization_eetq_yy3p6bsf622sq
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_yy3p6bsf622sq::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch25-cxx98-cu118-x86_64-linux/quantization_eetq/{_quantization_eetq_yy3p6bsf622sq.abi3.so → _quantization_eetq_f87291e.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3c69a2ef282a7d50795a5656514227bbdef8e5153a2bbd2f277c78ea39de4cd
3
- size 28368280
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74b06278488042b55657c1fbe64f9a257e31834a0bc5a92ae958059ba3f97d2d
3
+ size 26996096
build/torch25-cxx98-cu121-x86_64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_imijtykkseqze
3
- ops = torch.ops._quantization_eetq_imijtykkseqze
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_imijtykkseqze::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch25-cxx98-cu121-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eabd50837cbfe9bb85002713b760e36751f670361f525040cfc624da307d6747
3
+ size 26740496
build/torch25-cxx98-cu121-x86_64-linux/quantization_eetq/_quantization_eetq_imijtykkseqze.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4111fb3304cba2975f11275f8444505208d4fc4b31da4ff3e3d508a3838ef64
3
- size 28063536
 
 
 
 
build/torch25-cxx98-cu124-x86_64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_4qerj3t7ddiry
3
- ops = torch.ops._quantization_eetq_4qerj3t7ddiry
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_4qerj3t7ddiry::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch25-cxx98-cu124-x86_64-linux/quantization_eetq/_quantization_eetq_4qerj3t7ddiry.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:933e5f40627ba92496a0fbd14c4a602d0ea81cbb66ec2e4e870e692d87601b3f
3
- size 28110408
 
 
 
 
build/torch25-cxx98-cu124-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2398f767ca6a630f56bc784fd08871a52da5a9cdf75c4f1ca424cc50c3ce8eba
3
+ size 26811952
build/torch26-cxx11-cu118-x86_64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_j23ltbqvrnixg
3
- ops = torch.ops._quantization_eetq_j23ltbqvrnixg
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_j23ltbqvrnixg::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch26-cxx11-cu118-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9421a4aab983eed100491cb55964465c3c34b26b32c003e3ff7529114315783e
3
+ size 26996960
build/torch26-cxx11-cu118-x86_64-linux/quantization_eetq/_quantization_eetq_j23ltbqvrnixg.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9de877da7bf0e968bb9d307539860efa86b12bcd15ac017acee2377f0241495a
3
- size 28369152
 
 
 
 
build/torch26-cxx11-cu124-x86_64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_p5neqtnhdgxv2
3
- ops = torch.ops._quantization_eetq_p5neqtnhdgxv2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_p5neqtnhdgxv2::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch26-cxx11-cu124-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e44c7c996edfb2c6855e3ecad13ba7359bc6d045db0b7054cd69b1d5473eee8e
3
+ size 26652600
build/torch26-cxx11-cu124-x86_64-linux/quantization_eetq/_quantization_eetq_p5neqtnhdgxv2.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:20e49f06c5ea24b8f93e1d1200b87ab284cbdbab87495e3ae34a162375b5634e
3
- size 27951064
 
 
 
 
build/torch26-cxx11-cu126-x86_64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_idk3dezy35dfk
3
- ops = torch.ops._quantization_eetq_idk3dezy35dfk
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_idk3dezy35dfk::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch26-cxx11-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f0fda311dce4ca05b8349027876ae4b03ff98f40cbfc5660e4c91c010267248
3
+ size 26679576
build/torch26-cxx11-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_idk3dezy35dfk.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0da474df2d93afd24827cd9ce04844e32bd0d742cbf60504c14f670e884ddc3c
3
- size 27998512
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_fpjoxzd7nm2qa
3
- ops = torch.ops._quantization_eetq_fpjoxzd7nm2qa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_fpjoxzd7nm2qa::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch26-cxx98-cu118-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9405bf1e69ebd59861db0a5ad593e046d2c4c13df629ed847649a001ece6e5bc
3
+ size 26992344
build/torch26-cxx98-cu118-x86_64-linux/quantization_eetq/_quantization_eetq_fpjoxzd7nm2qa.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7fb594a0a3bcca4af967680d7b5865e6fcbed12bb9d59f8374bf4601516f7bc8
3
- size 28364528
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_k7mlunxe2ye4s
3
- ops = torch.ops._quantization_eetq_k7mlunxe2ye4s
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_k7mlunxe2ye4s::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch26-cxx98-cu124-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db5ace1e9459c3f50259b285072d957fe2799ca8487d6aa8f23d8e5ce1be2b69
3
+ size 26816344
build/torch26-cxx98-cu124-x86_64-linux/quantization_eetq/_quantization_eetq_k7mlunxe2ye4s.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:865d3f89b2b8bbe79c7493416999cda076a106f0b3ecb5feb2664a2dee4d0fa5
3
- size 28114800
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/quantization_eetq/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_7m7hz3sbwkaio
3
- ops = torch.ops._quantization_eetq_7m7hz3sbwkaio
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_7m7hz3sbwkaio::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch26-cxx98-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_7m7hz3sbwkaio.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f443eff55bcac99507a8a622333461922f6bcee610cab9334882628e58a54da6
3
- size 28162256
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:636713866475cbcaa25d7bbf50b647dc834413467578c7780d66f40fdad79f40
3
+ size 26843312
build/torch27-cxx11-cu118-x86_64-linux/quantization_eetq/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/torch27-cxx11-cu118-x86_64-linux/quantization_eetq/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch27-cxx11-cu118-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec3c04a6ac40bcbc963fcbbffcef5be3db446b1556f20bfda3b22e96841aac70
3
+ size 26993040
build/torch27-cxx11-cu118-x86_64-linux/quantization_eetq/custom_ops.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ def w8_a16_gemm(
8
+ input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
9
+ ) -> torch.Tensor:
10
+ return ops.w8_a16_gemm(input, weight, scale)
11
+
12
+
13
+ def w8_a16_gemm_(
14
+ input: torch.Tensor,
15
+ weight: torch.Tensor,
16
+ scale: torch.Tensor,
17
+ output: torch.Tensor,
18
+ m: int,
19
+ n: int,
20
+ k: int,
21
+ ) -> torch.Tensor:
22
+ return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
23
+
24
+
25
+ def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
26
+ return ops.preprocess_weights(origin_weight, is_int4)
27
+
28
+
29
+ def quant_weights(
30
+ origin_weight: torch.Tensor,
31
+ quant_type: torch.dtype,
32
+ return_unprocessed_quantized_tensor: bool,
33
+ ) -> List[torch.Tensor]:
34
+ return ops.quant_weights(
35
+ origin_weight, quant_type, return_unprocessed_quantized_tensor
36
+ )
build/torch27-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/torch27-cxx11-cu126-x86_64-linux/quantization_eetq/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch27-cxx11-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5c62fa45dce86379638d29d692561b332b477875bff509da3c58dd634ca65e5
3
+ size 26679608
build/torch27-cxx11-cu126-x86_64-linux/quantization_eetq/custom_ops.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ def w8_a16_gemm(
8
+ input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
9
+ ) -> torch.Tensor:
10
+ return ops.w8_a16_gemm(input, weight, scale)
11
+
12
+
13
+ def w8_a16_gemm_(
14
+ input: torch.Tensor,
15
+ weight: torch.Tensor,
16
+ scale: torch.Tensor,
17
+ output: torch.Tensor,
18
+ m: int,
19
+ n: int,
20
+ k: int,
21
+ ) -> torch.Tensor:
22
+ return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
23
+
24
+
25
+ def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
26
+ return ops.preprocess_weights(origin_weight, is_int4)
27
+
28
+
29
+ def quant_weights(
30
+ origin_weight: torch.Tensor,
31
+ quant_type: torch.dtype,
32
+ return_unprocessed_quantized_tensor: bool,
33
+ ) -> List[torch.Tensor]:
34
+ return ops.quant_weights(
35
+ origin_weight, quant_type, return_unprocessed_quantized_tensor
36
+ )
build/torch27-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/torch27-cxx11-cu128-x86_64-linux/quantization_eetq/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_eetq_f87291e
3
+ ops = torch.ops._quantization_eetq_f87291e
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_eetq_f87291e::{op_name}"
build/torch27-cxx11-cu128-x86_64-linux/quantization_eetq/_quantization_eetq_f87291e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:777d5f3f582112682699ccb9570fedf5c0d5efb2ba8cdded2f680282cf802766
3
+ size 28052048
build/torch27-cxx11-cu128-x86_64-linux/quantization_eetq/custom_ops.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ def w8_a16_gemm(
8
+ input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
9
+ ) -> torch.Tensor:
10
+ return ops.w8_a16_gemm(input, weight, scale)
11
+
12
+
13
+ def w8_a16_gemm_(
14
+ input: torch.Tensor,
15
+ weight: torch.Tensor,
16
+ scale: torch.Tensor,
17
+ output: torch.Tensor,
18
+ m: int,
19
+ n: int,
20
+ k: int,
21
+ ) -> torch.Tensor:
22
+ return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
23
+
24
+
25
+ def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
26
+ return ops.preprocess_weights(origin_weight, is_int4)
27
+
28
+
29
+ def quant_weights(
30
+ origin_weight: torch.Tensor,
31
+ quant_type: torch.dtype,
32
+ return_unprocessed_quantized_tensor: bool,
33
+ ) -> List[torch.Tensor]:
34
+ return ops.quant_weights(
35
+ origin_weight, quant_type, return_unprocessed_quantized_tensor
36
+ )