danieldk HF staff commited on
Commit
329999a
·
1 Parent(s): b2e431d
Files changed (42) hide show
  1. build/torch24-cxx11-cu118-x86_64-linux/quantization/__init__.py +7 -0
  2. build/torch24-cxx11-cu118-x86_64-linux/quantization/_ops.py +3 -3
  3. build/torch24-cxx11-cu118-x86_64-linux/quantization/{_quantization_rte56vgl6pwy2.abi3.so → _quantization_nikdbevszxcfa.abi3.so} +1 -1
  4. build/torch24-cxx11-cu121-x86_64-linux/quantization/__init__.py +7 -0
  5. build/torch24-cxx11-cu121-x86_64-linux/quantization/_ops.py +3 -3
  6. build/{torch25-cxx11-cu121-x86_64-linux/quantization/_quantization_psmex5aqqy2hw.abi3.so → torch24-cxx11-cu121-x86_64-linux/quantization/_quantization_uqov56fwnow5m.abi3.so} +1 -1
  7. build/torch24-cxx11-cu124-x86_64-linux/quantization/__init__.py +7 -0
  8. build/torch24-cxx11-cu124-x86_64-linux/quantization/_ops.py +3 -3
  9. build/torch24-cxx11-cu124-x86_64-linux/quantization/{_quantization_wncmj6f5msx4s.abi3.so → _quantization_fsprdzcdnww3s.abi3.so} +1 -1
  10. build/torch24-cxx98-cu118-x86_64-linux/quantization/__init__.py +7 -0
  11. build/torch24-cxx98-cu118-x86_64-linux/quantization/_ops.py +3 -3
  12. build/torch24-cxx98-cu118-x86_64-linux/quantization/{_quantization_ysomlsabnzmcy.abi3.so → _quantization_3atrjdvtz7xxc.abi3.so} +1 -1
  13. build/torch24-cxx98-cu121-x86_64-linux/quantization/__init__.py +7 -0
  14. build/torch24-cxx98-cu121-x86_64-linux/quantization/_ops.py +3 -3
  15. build/torch24-cxx98-cu121-x86_64-linux/quantization/{_quantization_fdmxhndozpkiy.abi3.so → _quantization_ni5tfi7up4a5a.abi3.so} +1 -1
  16. build/torch24-cxx98-cu124-x86_64-linux/quantization/__init__.py +7 -0
  17. build/torch24-cxx98-cu124-x86_64-linux/quantization/_ops.py +3 -3
  18. build/torch24-cxx98-cu124-x86_64-linux/quantization/_quantization_fczw55tnfikom.abi3.so +0 -3
  19. build/{torch24-cxx11-cu121-x86_64-linux/quantization/_quantization_bjzgrvvxc7yoc.abi3.so → torch24-cxx98-cu124-x86_64-linux/quantization/_quantization_ydkmqrh62v5ym.abi3.so} +2 -2
  20. build/torch25-cxx11-cu118-x86_64-linux/quantization/__init__.py +7 -0
  21. build/torch25-cxx11-cu118-x86_64-linux/quantization/_ops.py +3 -3
  22. build/torch25-cxx11-cu118-x86_64-linux/quantization/_quantization_2upd4ndb7oxlw.abi3.so +3 -0
  23. build/torch25-cxx11-cu118-x86_64-linux/quantization/_quantization_svdmwkynmr5iq.abi3.so +0 -3
  24. build/torch25-cxx11-cu121-x86_64-linux/quantization/__init__.py +7 -0
  25. build/torch25-cxx11-cu121-x86_64-linux/quantization/_ops.py +3 -3
  26. build/torch25-cxx11-cu121-x86_64-linux/quantization/_quantization_srrzik3uw7tac.abi3.so +3 -0
  27. build/torch25-cxx11-cu124-x86_64-linux/quantization/__init__.py +7 -0
  28. build/torch25-cxx11-cu124-x86_64-linux/quantization/_ops.py +3 -3
  29. build/torch25-cxx11-cu124-x86_64-linux/quantization/_quantization_jotyxqrlkrs6e.abi3.so +3 -0
  30. build/torch25-cxx11-cu124-x86_64-linux/quantization/_quantization_weycjcyvicgdu.abi3.so +0 -3
  31. build/torch25-cxx98-cu118-x86_64-linux/quantization/__init__.py +7 -0
  32. build/torch25-cxx98-cu118-x86_64-linux/quantization/_ops.py +3 -3
  33. build/torch25-cxx98-cu118-x86_64-linux/quantization/_quantization_nxw74vvv2coka.abi3.so +3 -0
  34. build/torch25-cxx98-cu118-x86_64-linux/quantization/_quantization_p475gzgjpctze.abi3.so +0 -3
  35. build/torch25-cxx98-cu121-x86_64-linux/quantization/__init__.py +7 -0
  36. build/torch25-cxx98-cu121-x86_64-linux/quantization/_ops.py +3 -3
  37. build/torch25-cxx98-cu121-x86_64-linux/quantization/_quantization_6q7kuci5ztlzi.abi3.so +0 -3
  38. build/torch25-cxx98-cu121-x86_64-linux/quantization/_quantization_7fvkxfdjondcu.abi3.so +3 -0
  39. build/torch25-cxx98-cu124-x86_64-linux/quantization/__init__.py +7 -0
  40. build/torch25-cxx98-cu124-x86_64-linux/quantization/_ops.py +3 -3
  41. build/torch25-cxx98-cu124-x86_64-linux/quantization/_quantization_cf2wponrik3xk.abi3.so +3 -0
  42. build/torch25-cxx98-cu124-x86_64-linux/quantization/_quantization_j2g4sezvqowbi.abi3.so +0 -3
build/torch24-cxx11-cu118-x86_64-linux/quantization/__init__.py CHANGED
@@ -13,9 +13,15 @@ from .marlin import (
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
 
 
 
 
16
  from ._ops import ops
17
 
 
18
  __all__ = [
 
19
  "awq_marlin_repack",
20
  "cutlass_scaled_mm",
21
  "cutlass_scaled_mm_azp",
@@ -27,6 +33,7 @@ __all__ = [
27
  "marlin_gemm",
28
  "marlin_qqq_gemm",
29
  "ops",
 
30
  "scaled_fp8_quant",
31
  "scaled_int8_quant",
32
  ]
 
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
16
+ from .scalar_type import (
17
+ ScalarType,
18
+ scalar_types,
19
+ )
20
  from ._ops import ops
21
 
22
+
23
  __all__ = [
24
+ "ScalarType",
25
  "awq_marlin_repack",
26
  "cutlass_scaled_mm",
27
  "cutlass_scaled_mm_azp",
 
33
  "marlin_gemm",
34
  "marlin_qqq_gemm",
35
  "ops",
36
+ "scalar_types",
37
  "scaled_fp8_quant",
38
  "scaled_int8_quant",
39
  ]
build/torch24-cxx11-cu118-x86_64-linux/quantization/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_rte56vgl6pwy2
3
- ops = torch.ops._quantization_rte56vgl6pwy2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_rte56vgl6pwy2::{op_name}"
 
1
  import torch
2
+ from . import _quantization_nikdbevszxcfa
3
+ ops = torch.ops._quantization_nikdbevszxcfa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_nikdbevszxcfa::{op_name}"
build/torch24-cxx11-cu118-x86_64-linux/quantization/{_quantization_rte56vgl6pwy2.abi3.so → _quantization_nikdbevszxcfa.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:979af121eb31f81b7b44446ed0ededc0ce6e7076cf9ba9a8bf27fa8ee59d9cf0
3
  size 87696688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0185299e3f54c083e9c72372faae69c2340542260532313ef8648aa25eee6e13
3
  size 87696688
build/torch24-cxx11-cu121-x86_64-linux/quantization/__init__.py CHANGED
@@ -13,9 +13,15 @@ from .marlin import (
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
 
 
 
 
16
  from ._ops import ops
17
 
 
18
  __all__ = [
 
19
  "awq_marlin_repack",
20
  "cutlass_scaled_mm",
21
  "cutlass_scaled_mm_azp",
@@ -27,6 +33,7 @@ __all__ = [
27
  "marlin_gemm",
28
  "marlin_qqq_gemm",
29
  "ops",
 
30
  "scaled_fp8_quant",
31
  "scaled_int8_quant",
32
  ]
 
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
16
+ from .scalar_type import (
17
+ ScalarType,
18
+ scalar_types,
19
+ )
20
  from ._ops import ops
21
 
22
+
23
  __all__ = [
24
+ "ScalarType",
25
  "awq_marlin_repack",
26
  "cutlass_scaled_mm",
27
  "cutlass_scaled_mm_azp",
 
33
  "marlin_gemm",
34
  "marlin_qqq_gemm",
35
  "ops",
36
+ "scalar_types",
37
  "scaled_fp8_quant",
38
  "scaled_int8_quant",
39
  ]
build/torch24-cxx11-cu121-x86_64-linux/quantization/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_bjzgrvvxc7yoc
3
- ops = torch.ops._quantization_bjzgrvvxc7yoc
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_bjzgrvvxc7yoc::{op_name}"
 
1
  import torch
2
+ from . import _quantization_uqov56fwnow5m
3
+ ops = torch.ops._quantization_uqov56fwnow5m
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_uqov56fwnow5m::{op_name}"
build/{torch25-cxx11-cu121-x86_64-linux/quantization/_quantization_psmex5aqqy2hw.abi3.so → torch24-cxx11-cu121-x86_64-linux/quantization/_quantization_uqov56fwnow5m.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ded817c3f20400e3ba45a3ac2bf2771f8f987c45b58a7625a7633a8b73959d58
3
  size 89323344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d66e42d9a808218d02d852565fcc0762ed4bcccf588dbaadce388199a114102
3
  size 89323344
build/torch24-cxx11-cu124-x86_64-linux/quantization/__init__.py CHANGED
@@ -13,9 +13,15 @@ from .marlin import (
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
 
 
 
 
16
  from ._ops import ops
17
 
 
18
  __all__ = [
 
19
  "awq_marlin_repack",
20
  "cutlass_scaled_mm",
21
  "cutlass_scaled_mm_azp",
@@ -27,6 +33,7 @@ __all__ = [
27
  "marlin_gemm",
28
  "marlin_qqq_gemm",
29
  "ops",
 
30
  "scaled_fp8_quant",
31
  "scaled_int8_quant",
32
  ]
 
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
16
+ from .scalar_type import (
17
+ ScalarType,
18
+ scalar_types,
19
+ )
20
  from ._ops import ops
21
 
22
+
23
  __all__ = [
24
+ "ScalarType",
25
  "awq_marlin_repack",
26
  "cutlass_scaled_mm",
27
  "cutlass_scaled_mm_azp",
 
33
  "marlin_gemm",
34
  "marlin_qqq_gemm",
35
  "ops",
36
+ "scalar_types",
37
  "scaled_fp8_quant",
38
  "scaled_int8_quant",
39
  ]
build/torch24-cxx11-cu124-x86_64-linux/quantization/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_wncmj6f5msx4s
3
- ops = torch.ops._quantization_wncmj6f5msx4s
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_wncmj6f5msx4s::{op_name}"
 
1
  import torch
2
+ from . import _quantization_fsprdzcdnww3s
3
+ ops = torch.ops._quantization_fsprdzcdnww3s
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_fsprdzcdnww3s::{op_name}"
build/torch24-cxx11-cu124-x86_64-linux/quantization/{_quantization_wncmj6f5msx4s.abi3.so → _quantization_fsprdzcdnww3s.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:222d9473bc56a309dbd912533c6fc204ac9816c5e0e3b8a6f7440db84d400853
3
  size 92015360
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:134b41c03ea27aff03837f6fe7ad3cb3515709bcb93f2df6beccd8394b2d9db8
3
  size 92015360
build/torch24-cxx98-cu118-x86_64-linux/quantization/__init__.py CHANGED
@@ -13,9 +13,15 @@ from .marlin import (
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
 
 
 
 
16
  from ._ops import ops
17
 
 
18
  __all__ = [
 
19
  "awq_marlin_repack",
20
  "cutlass_scaled_mm",
21
  "cutlass_scaled_mm_azp",
@@ -27,6 +33,7 @@ __all__ = [
27
  "marlin_gemm",
28
  "marlin_qqq_gemm",
29
  "ops",
 
30
  "scaled_fp8_quant",
31
  "scaled_int8_quant",
32
  ]
 
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
16
+ from .scalar_type import (
17
+ ScalarType,
18
+ scalar_types,
19
+ )
20
  from ._ops import ops
21
 
22
+
23
  __all__ = [
24
+ "ScalarType",
25
  "awq_marlin_repack",
26
  "cutlass_scaled_mm",
27
  "cutlass_scaled_mm_azp",
 
33
  "marlin_gemm",
34
  "marlin_qqq_gemm",
35
  "ops",
36
+ "scalar_types",
37
  "scaled_fp8_quant",
38
  "scaled_int8_quant",
39
  ]
build/torch24-cxx98-cu118-x86_64-linux/quantization/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_ysomlsabnzmcy
3
- ops = torch.ops._quantization_ysomlsabnzmcy
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_ysomlsabnzmcy::{op_name}"
 
1
  import torch
2
+ from . import _quantization_3atrjdvtz7xxc
3
+ ops = torch.ops._quantization_3atrjdvtz7xxc
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_3atrjdvtz7xxc::{op_name}"
build/torch24-cxx98-cu118-x86_64-linux/quantization/{_quantization_ysomlsabnzmcy.abi3.so → _quantization_3atrjdvtz7xxc.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7365b575e30686fbba78ee99fb6e07be911ae616b17f9cc574ed08f21f82975
3
  size 87680472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e28134d3a57a6a4eb4d6e9a9dd9ff76f91693b794c9c7160dba78276f23e9d3
3
  size 87680472
build/torch24-cxx98-cu121-x86_64-linux/quantization/__init__.py CHANGED
@@ -13,9 +13,15 @@ from .marlin import (
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
 
 
 
 
16
  from ._ops import ops
17
 
 
18
  __all__ = [
 
19
  "awq_marlin_repack",
20
  "cutlass_scaled_mm",
21
  "cutlass_scaled_mm_azp",
@@ -27,6 +33,7 @@ __all__ = [
27
  "marlin_gemm",
28
  "marlin_qqq_gemm",
29
  "ops",
 
30
  "scaled_fp8_quant",
31
  "scaled_int8_quant",
32
  ]
 
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
16
+ from .scalar_type import (
17
+ ScalarType,
18
+ scalar_types,
19
+ )
20
  from ._ops import ops
21
 
22
+
23
  __all__ = [
24
+ "ScalarType",
25
  "awq_marlin_repack",
26
  "cutlass_scaled_mm",
27
  "cutlass_scaled_mm_azp",
 
33
  "marlin_gemm",
34
  "marlin_qqq_gemm",
35
  "ops",
36
+ "scalar_types",
37
  "scaled_fp8_quant",
38
  "scaled_int8_quant",
39
  ]
build/torch24-cxx98-cu121-x86_64-linux/quantization/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_fdmxhndozpkiy
3
- ops = torch.ops._quantization_fdmxhndozpkiy
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_fdmxhndozpkiy::{op_name}"
 
1
  import torch
2
+ from . import _quantization_ni5tfi7up4a5a
3
+ ops = torch.ops._quantization_ni5tfi7up4a5a
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_ni5tfi7up4a5a::{op_name}"
build/torch24-cxx98-cu121-x86_64-linux/quantization/{_quantization_fdmxhndozpkiy.abi3.so → _quantization_ni5tfi7up4a5a.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:581b5f65faa0c0c3b03e9b756a77361a58f880625faef541ccd1ec667ce10e42
3
  size 89319064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b82858c351ca2de075ef835be7a0e677c956a44b5dd267278f790ab8fb43631b
3
  size 89319064
build/torch24-cxx98-cu124-x86_64-linux/quantization/__init__.py CHANGED
@@ -13,9 +13,15 @@ from .marlin import (
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
 
 
 
 
16
  from ._ops import ops
17
 
 
18
  __all__ = [
 
19
  "awq_marlin_repack",
20
  "cutlass_scaled_mm",
21
  "cutlass_scaled_mm_azp",
@@ -27,6 +33,7 @@ __all__ = [
27
  "marlin_gemm",
28
  "marlin_qqq_gemm",
29
  "ops",
 
30
  "scaled_fp8_quant",
31
  "scaled_int8_quant",
32
  ]
 
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
16
+ from .scalar_type import (
17
+ ScalarType,
18
+ scalar_types,
19
+ )
20
  from ._ops import ops
21
 
22
+
23
  __all__ = [
24
+ "ScalarType",
25
  "awq_marlin_repack",
26
  "cutlass_scaled_mm",
27
  "cutlass_scaled_mm_azp",
 
33
  "marlin_gemm",
34
  "marlin_qqq_gemm",
35
  "ops",
36
+ "scalar_types",
37
  "scaled_fp8_quant",
38
  "scaled_int8_quant",
39
  ]
build/torch24-cxx98-cu124-x86_64-linux/quantization/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_fczw55tnfikom
3
- ops = torch.ops._quantization_fczw55tnfikom
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_fczw55tnfikom::{op_name}"
 
1
  import torch
2
+ from . import _quantization_ydkmqrh62v5ym
3
+ ops = torch.ops._quantization_ydkmqrh62v5ym
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_ydkmqrh62v5ym::{op_name}"
build/torch24-cxx98-cu124-x86_64-linux/quantization/_quantization_fczw55tnfikom.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe1352cc22ca1b9921b23a17f352b1e6d11fabfecdc5567bc0eb7d03947e0e4c
3
- size 92008832
 
 
 
 
build/{torch24-cxx11-cu121-x86_64-linux/quantization/_quantization_bjzgrvvxc7yoc.abi3.so → torch24-cxx98-cu124-x86_64-linux/quantization/_quantization_ydkmqrh62v5ym.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f247122c58b1e54de4f499d788827cd11e11b89b8e7f39478d1fd7e5eafd3528
3
- size 89315152
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f8fff0b6b20f06ac5e2831d1801ac5522f28e14c66469c21152de42055802ff
3
+ size 92000640
build/torch25-cxx11-cu118-x86_64-linux/quantization/__init__.py CHANGED
@@ -13,9 +13,15 @@ from .marlin import (
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
 
 
 
 
16
  from ._ops import ops
17
 
 
18
  __all__ = [
 
19
  "awq_marlin_repack",
20
  "cutlass_scaled_mm",
21
  "cutlass_scaled_mm_azp",
@@ -27,6 +33,7 @@ __all__ = [
27
  "marlin_gemm",
28
  "marlin_qqq_gemm",
29
  "ops",
 
30
  "scaled_fp8_quant",
31
  "scaled_int8_quant",
32
  ]
 
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
16
+ from .scalar_type import (
17
+ ScalarType,
18
+ scalar_types,
19
+ )
20
  from ._ops import ops
21
 
22
+
23
  __all__ = [
24
+ "ScalarType",
25
  "awq_marlin_repack",
26
  "cutlass_scaled_mm",
27
  "cutlass_scaled_mm_azp",
 
33
  "marlin_gemm",
34
  "marlin_qqq_gemm",
35
  "ops",
36
+ "scalar_types",
37
  "scaled_fp8_quant",
38
  "scaled_int8_quant",
39
  ]
build/torch25-cxx11-cu118-x86_64-linux/quantization/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_svdmwkynmr5iq
3
- ops = torch.ops._quantization_svdmwkynmr5iq
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_svdmwkynmr5iq::{op_name}"
 
1
  import torch
2
+ from . import _quantization_2upd4ndb7oxlw
3
+ ops = torch.ops._quantization_2upd4ndb7oxlw
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_2upd4ndb7oxlw::{op_name}"
build/torch25-cxx11-cu118-x86_64-linux/quantization/_quantization_2upd4ndb7oxlw.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:104b2c0a279647f0ffecaf3a3f9c599d70f7a5f8fb3676923192af102c1e0c29
3
+ size 87692592
build/torch25-cxx11-cu118-x86_64-linux/quantization/_quantization_svdmwkynmr5iq.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ccb41dfb79a95184171e2470926dbe7302a6d129a5e511f1a4778fcd70022e98
3
- size 87696688
 
 
 
 
build/torch25-cxx11-cu121-x86_64-linux/quantization/__init__.py CHANGED
@@ -13,9 +13,15 @@ from .marlin import (
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
 
 
 
 
16
  from ._ops import ops
17
 
 
18
  __all__ = [
 
19
  "awq_marlin_repack",
20
  "cutlass_scaled_mm",
21
  "cutlass_scaled_mm_azp",
@@ -27,6 +33,7 @@ __all__ = [
27
  "marlin_gemm",
28
  "marlin_qqq_gemm",
29
  "ops",
 
30
  "scaled_fp8_quant",
31
  "scaled_int8_quant",
32
  ]
 
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
16
+ from .scalar_type import (
17
+ ScalarType,
18
+ scalar_types,
19
+ )
20
  from ._ops import ops
21
 
22
+
23
  __all__ = [
24
+ "ScalarType",
25
  "awq_marlin_repack",
26
  "cutlass_scaled_mm",
27
  "cutlass_scaled_mm_azp",
 
33
  "marlin_gemm",
34
  "marlin_qqq_gemm",
35
  "ops",
36
+ "scalar_types",
37
  "scaled_fp8_quant",
38
  "scaled_int8_quant",
39
  ]
build/torch25-cxx11-cu121-x86_64-linux/quantization/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_psmex5aqqy2hw
3
- ops = torch.ops._quantization_psmex5aqqy2hw
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_psmex5aqqy2hw::{op_name}"
 
1
  import torch
2
+ from . import _quantization_srrzik3uw7tac
3
+ ops = torch.ops._quantization_srrzik3uw7tac
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_srrzik3uw7tac::{op_name}"
build/torch25-cxx11-cu121-x86_64-linux/quantization/_quantization_srrzik3uw7tac.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67a5b844f470191316ce745241a34298fd326906ff1ba28c7020de19fc13b154
3
+ size 89323344
build/torch25-cxx11-cu124-x86_64-linux/quantization/__init__.py CHANGED
@@ -13,9 +13,15 @@ from .marlin import (
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
 
 
 
 
16
  from ._ops import ops
17
 
 
18
  __all__ = [
 
19
  "awq_marlin_repack",
20
  "cutlass_scaled_mm",
21
  "cutlass_scaled_mm_azp",
@@ -27,6 +33,7 @@ __all__ = [
27
  "marlin_gemm",
28
  "marlin_qqq_gemm",
29
  "ops",
 
30
  "scaled_fp8_quant",
31
  "scaled_int8_quant",
32
  ]
 
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
16
+ from .scalar_type import (
17
+ ScalarType,
18
+ scalar_types,
19
+ )
20
  from ._ops import ops
21
 
22
+
23
  __all__ = [
24
+ "ScalarType",
25
  "awq_marlin_repack",
26
  "cutlass_scaled_mm",
27
  "cutlass_scaled_mm_azp",
 
33
  "marlin_gemm",
34
  "marlin_qqq_gemm",
35
  "ops",
36
+ "scalar_types",
37
  "scaled_fp8_quant",
38
  "scaled_int8_quant",
39
  ]
build/torch25-cxx11-cu124-x86_64-linux/quantization/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_weycjcyvicgdu
3
- ops = torch.ops._quantization_weycjcyvicgdu
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_weycjcyvicgdu::{op_name}"
 
1
  import torch
2
+ from . import _quantization_jotyxqrlkrs6e
3
+ ops = torch.ops._quantization_jotyxqrlkrs6e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_jotyxqrlkrs6e::{op_name}"
build/torch25-cxx11-cu124-x86_64-linux/quantization/_quantization_jotyxqrlkrs6e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f890ddeb18dd70c04887f8c9266b28fe891c230022a44bd4e064ccafea941b3
3
+ size 92011256
build/torch25-cxx11-cu124-x86_64-linux/quantization/_quantization_weycjcyvicgdu.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7694ff09eaa5c3047cc49fcce4f60d0f68e6643dec563e9d5f825a7223dbaa3
3
- size 92003064
 
 
 
 
build/torch25-cxx98-cu118-x86_64-linux/quantization/__init__.py CHANGED
@@ -13,9 +13,15 @@ from .marlin import (
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
 
 
 
 
16
  from ._ops import ops
17
 
 
18
  __all__ = [
 
19
  "awq_marlin_repack",
20
  "cutlass_scaled_mm",
21
  "cutlass_scaled_mm_azp",
@@ -27,6 +33,7 @@ __all__ = [
27
  "marlin_gemm",
28
  "marlin_qqq_gemm",
29
  "ops",
 
30
  "scaled_fp8_quant",
31
  "scaled_int8_quant",
32
  ]
 
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
16
+ from .scalar_type import (
17
+ ScalarType,
18
+ scalar_types,
19
+ )
20
  from ._ops import ops
21
 
22
+
23
  __all__ = [
24
+ "ScalarType",
25
  "awq_marlin_repack",
26
  "cutlass_scaled_mm",
27
  "cutlass_scaled_mm_azp",
 
33
  "marlin_gemm",
34
  "marlin_qqq_gemm",
35
  "ops",
36
+ "scalar_types",
37
  "scaled_fp8_quant",
38
  "scaled_int8_quant",
39
  ]
build/torch25-cxx98-cu118-x86_64-linux/quantization/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_p475gzgjpctze
3
- ops = torch.ops._quantization_p475gzgjpctze
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_p475gzgjpctze::{op_name}"
 
1
  import torch
2
+ from . import _quantization_nxw74vvv2coka
3
+ ops = torch.ops._quantization_nxw74vvv2coka
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_nxw74vvv2coka::{op_name}"
build/torch25-cxx98-cu118-x86_64-linux/quantization/_quantization_nxw74vvv2coka.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f236aad3266d710f87b1c4eed449a251b1bd56d28631cd6eb03ed50443a27f07
3
+ size 87680472
build/torch25-cxx98-cu118-x86_64-linux/quantization/_quantization_p475gzgjpctze.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:aaea78c36cc6a439e0ac7cf70006c4631d943d44867585fdfe305ae0f8f9cf8d
3
- size 87684656
 
 
 
 
build/torch25-cxx98-cu121-x86_64-linux/quantization/__init__.py CHANGED
@@ -13,9 +13,15 @@ from .marlin import (
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
 
 
 
 
16
  from ._ops import ops
17
 
 
18
  __all__ = [
 
19
  "awq_marlin_repack",
20
  "cutlass_scaled_mm",
21
  "cutlass_scaled_mm_azp",
@@ -27,6 +33,7 @@ __all__ = [
27
  "marlin_gemm",
28
  "marlin_qqq_gemm",
29
  "ops",
 
30
  "scaled_fp8_quant",
31
  "scaled_int8_quant",
32
  ]
 
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
16
+ from .scalar_type import (
17
+ ScalarType,
18
+ scalar_types,
19
+ )
20
  from ._ops import ops
21
 
22
+
23
  __all__ = [
24
+ "ScalarType",
25
  "awq_marlin_repack",
26
  "cutlass_scaled_mm",
27
  "cutlass_scaled_mm_azp",
 
33
  "marlin_gemm",
34
  "marlin_qqq_gemm",
35
  "ops",
36
+ "scalar_types",
37
  "scaled_fp8_quant",
38
  "scaled_int8_quant",
39
  ]
build/torch25-cxx98-cu121-x86_64-linux/quantization/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_6q7kuci5ztlzi
3
- ops = torch.ops._quantization_6q7kuci5ztlzi
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_6q7kuci5ztlzi::{op_name}"
 
1
  import torch
2
+ from . import _quantization_7fvkxfdjondcu
3
+ ops = torch.ops._quantization_7fvkxfdjondcu
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_7fvkxfdjondcu::{op_name}"
build/torch25-cxx98-cu121-x86_64-linux/quantization/_quantization_6q7kuci5ztlzi.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:38fb86c1ddce174f8f6a42d49ba4757a45e94d741615109670588838076c3627
3
- size 89314968
 
 
 
 
build/torch25-cxx98-cu121-x86_64-linux/quantization/_quantization_7fvkxfdjondcu.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58d9c6e3e114ed0bcad34b237c0e4dcf261e474bedc9fc0c240565c3c4168e5b
3
+ size 89323160
build/torch25-cxx98-cu124-x86_64-linux/quantization/__init__.py CHANGED
@@ -13,9 +13,15 @@ from .marlin import (
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
 
 
 
 
16
  from ._ops import ops
17
 
 
18
  __all__ = [
 
19
  "awq_marlin_repack",
20
  "cutlass_scaled_mm",
21
  "cutlass_scaled_mm_azp",
@@ -27,6 +33,7 @@ __all__ = [
27
  "marlin_gemm",
28
  "marlin_qqq_gemm",
29
  "ops",
 
30
  "scaled_fp8_quant",
31
  "scaled_int8_quant",
32
  ]
 
13
  marlin_qqq_gemm,
14
  marlin_gemm,
15
  )
16
+ from .scalar_type import (
17
+ ScalarType,
18
+ scalar_types,
19
+ )
20
  from ._ops import ops
21
 
22
+
23
  __all__ = [
24
+ "ScalarType",
25
  "awq_marlin_repack",
26
  "cutlass_scaled_mm",
27
  "cutlass_scaled_mm_azp",
 
33
  "marlin_gemm",
34
  "marlin_qqq_gemm",
35
  "ops",
36
+ "scalar_types",
37
  "scaled_fp8_quant",
38
  "scaled_int8_quant",
39
  ]
build/torch25-cxx98-cu124-x86_64-linux/quantization/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_j2g4sezvqowbi
3
- ops = torch.ops._quantization_j2g4sezvqowbi
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_j2g4sezvqowbi::{op_name}"
 
1
  import torch
2
+ from . import _quantization_cf2wponrik3xk
3
+ ops = torch.ops._quantization_cf2wponrik3xk
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_cf2wponrik3xk::{op_name}"
build/torch25-cxx98-cu124-x86_64-linux/quantization/_quantization_cf2wponrik3xk.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69789d17c330e17384c20579ae7a8c06c7bcc8ebf09ea8ded6df28e44d088d0
3
+ size 92004584
build/torch25-cxx98-cu124-x86_64-linux/quantization/_quantization_j2g4sezvqowbi.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2323b5a48e6e21a0909e64c502991d6c9086c6eb47bca319b89948aa2989c5d9
3
- size 92008680