kernel
David Holtz commited on
Commit
dd2a3b2
·
1 Parent(s): ab4cc6a

feat: bump build

Browse files
Files changed (44) hide show
  1. build/torch25-cxx11-cu118-x86_64-linux/flash_attn/__init__.py +2 -2
  2. build/torch25-cxx11-cu118-x86_64-linux/flash_attn/{_flash_attn_a7165c8_dirty.abi3.so → _flash_attn_ab4cc6a_dirty.abi3.so} +2 -2
  3. build/torch25-cxx11-cu118-x86_64-linux/flash_attn/_ops.py +3 -3
  4. build/torch25-cxx11-cu121-x86_64-linux/flash_attn/__init__.py +2 -2
  5. build/torch25-cxx11-cu121-x86_64-linux/flash_attn/{_flash_attn_a7165c8_dirty.abi3.so → _flash_attn_ab4cc6a_dirty.abi3.so} +2 -2
  6. build/torch25-cxx11-cu121-x86_64-linux/flash_attn/_ops.py +3 -3
  7. build/torch25-cxx11-cu124-x86_64-linux/flash_attn/__init__.py +2 -2
  8. build/torch25-cxx11-cu124-x86_64-linux/flash_attn/{_flash_attn_a7165c8_dirty.abi3.so → _flash_attn_ab4cc6a_dirty.abi3.so} +2 -2
  9. build/torch25-cxx11-cu124-x86_64-linux/flash_attn/_ops.py +3 -3
  10. build/torch25-cxx98-cu118-x86_64-linux/flash_attn/__init__.py +2 -2
  11. build/torch25-cxx98-cu118-x86_64-linux/flash_attn/{_flash_attn_a7165c8_dirty.abi3.so → _flash_attn_ab4cc6a_dirty.abi3.so} +2 -2
  12. build/torch25-cxx98-cu118-x86_64-linux/flash_attn/_ops.py +3 -3
  13. build/torch25-cxx98-cu121-x86_64-linux/flash_attn/__init__.py +2 -2
  14. build/torch25-cxx98-cu121-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so +0 -3
  15. build/torch25-cxx98-cu121-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so +3 -0
  16. build/torch25-cxx98-cu121-x86_64-linux/flash_attn/_ops.py +3 -3
  17. build/torch25-cxx98-cu124-x86_64-linux/flash_attn/__init__.py +2 -2
  18. build/torch25-cxx98-cu124-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so +0 -3
  19. build/torch25-cxx98-cu124-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so +3 -0
  20. build/torch25-cxx98-cu124-x86_64-linux/flash_attn/_ops.py +3 -3
  21. build/torch26-cxx11-cu118-x86_64-linux/flash_attn/__init__.py +2 -2
  22. build/torch26-cxx11-cu118-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so +0 -3
  23. build/torch26-cxx11-cu118-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so +3 -0
  24. build/torch26-cxx11-cu118-x86_64-linux/flash_attn/_ops.py +3 -3
  25. build/torch26-cxx11-cu124-x86_64-linux/flash_attn/__init__.py +2 -2
  26. build/torch26-cxx11-cu124-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so +0 -3
  27. build/torch26-cxx11-cu124-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so +3 -0
  28. build/torch26-cxx11-cu124-x86_64-linux/flash_attn/_ops.py +3 -3
  29. build/torch26-cxx11-cu126-x86_64-linux/flash_attn/__init__.py +2 -2
  30. build/torch26-cxx11-cu126-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so +0 -3
  31. build/torch26-cxx11-cu126-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so +3 -0
  32. build/torch26-cxx11-cu126-x86_64-linux/flash_attn/_ops.py +3 -3
  33. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/__init__.py +2 -2
  34. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so +0 -3
  35. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so +3 -0
  36. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/_ops.py +3 -3
  37. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/__init__.py +2 -2
  38. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so +0 -3
  39. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so +3 -0
  40. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/_ops.py +3 -3
  41. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/__init__.py +2 -2
  42. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so +0 -3
  43. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so +3 -0
  44. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/_ops.py +3 -3
build/torch25-cxx11-cu118-x86_64-linux/flash_attn/__init__.py CHANGED
@@ -19,7 +19,7 @@ def mha_fwd(
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
- return ops.mha_fwd(
23
  q,
24
  k,
25
  v,
@@ -34,4 +34,4 @@ def mha_fwd(
34
  return_softmax,
35
  gen,
36
  )
37
- return out
 
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
+ ops.mha_fwd(
23
  q,
24
  k,
25
  v,
 
34
  return_softmax,
35
  gen,
36
  )
37
+ return out
build/torch25-cxx11-cu118-x86_64-linux/flash_attn/{_flash_attn_a7165c8_dirty.abi3.so → _flash_attn_ab4cc6a_dirty.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c2a6f11f1665f62c8f3b96cd843c806b737966575c28804c602bc68d089c1759
3
- size 17469320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85f70ae9ee6f5b27b149808f14aedf0dbb327fcfac6e6320c48d17810009dc77
3
+ size 1301385392
build/torch25-cxx11-cu118-x86_64-linux/flash_attn/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn_a7165c8_dirty
3
- ops = torch.ops._flash_attn_a7165c8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn_a7165c8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn_ab4cc6a_dirty
3
+ ops = torch.ops._flash_attn_ab4cc6a_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn_ab4cc6a_dirty::{op_name}"
build/torch25-cxx11-cu121-x86_64-linux/flash_attn/__init__.py CHANGED
@@ -19,7 +19,7 @@ def mha_fwd(
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
- return ops.mha_fwd(
23
  q,
24
  k,
25
  v,
@@ -34,4 +34,4 @@ def mha_fwd(
34
  return_softmax,
35
  gen,
36
  )
37
- return out
 
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
+ ops.mha_fwd(
23
  q,
24
  k,
25
  v,
 
34
  return_softmax,
35
  gen,
36
  )
37
+ return out
build/torch25-cxx11-cu121-x86_64-linux/flash_attn/{_flash_attn_a7165c8_dirty.abi3.so → _flash_attn_ab4cc6a_dirty.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e481af6967a53e2017631ade57897e3ef32e1a13e8badb11310df46e8748dab
3
- size 17561616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91b3c70a49f7d039bc7a238d0147dabe94cffd2485463bdd641bb74b395ada99
3
+ size 1295653368
build/torch25-cxx11-cu121-x86_64-linux/flash_attn/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn_a7165c8_dirty
3
- ops = torch.ops._flash_attn_a7165c8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn_a7165c8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn_ab4cc6a_dirty
3
+ ops = torch.ops._flash_attn_ab4cc6a_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn_ab4cc6a_dirty::{op_name}"
build/torch25-cxx11-cu124-x86_64-linux/flash_attn/__init__.py CHANGED
@@ -19,7 +19,7 @@ def mha_fwd(
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
- return ops.mha_fwd(
23
  q,
24
  k,
25
  v,
@@ -34,4 +34,4 @@ def mha_fwd(
34
  return_softmax,
35
  gen,
36
  )
37
- return out
 
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
+ ops.mha_fwd(
23
  q,
24
  k,
25
  v,
 
34
  return_softmax,
35
  gen,
36
  )
37
+ return out
build/torch25-cxx11-cu124-x86_64-linux/flash_attn/{_flash_attn_a7165c8_dirty.abi3.so → _flash_attn_ab4cc6a_dirty.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:df22c84c094e57e3e08c4adb615637c8e1a10fc914f9601a372eb1749ffcda12
3
- size 17820800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e074eed034da9275d49c87d904babd0a718c8e22d12cdedfae01e7c38260113
3
+ size 1262747328
build/torch25-cxx11-cu124-x86_64-linux/flash_attn/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn_a7165c8_dirty
3
- ops = torch.ops._flash_attn_a7165c8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn_a7165c8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn_ab4cc6a_dirty
3
+ ops = torch.ops._flash_attn_ab4cc6a_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn_ab4cc6a_dirty::{op_name}"
build/torch25-cxx98-cu118-x86_64-linux/flash_attn/__init__.py CHANGED
@@ -19,7 +19,7 @@ def mha_fwd(
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
- return ops.mha_fwd(
23
  q,
24
  k,
25
  v,
@@ -34,4 +34,4 @@ def mha_fwd(
34
  return_softmax,
35
  gen,
36
  )
37
- return out
 
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
+ ops.mha_fwd(
23
  q,
24
  k,
25
  v,
 
34
  return_softmax,
35
  gen,
36
  )
37
+ return out
build/torch25-cxx98-cu118-x86_64-linux/flash_attn/{_flash_attn_a7165c8_dirty.abi3.so → _flash_attn_ab4cc6a_dirty.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd9920d56ee47082c06be48f07d20a869864954713bb8d05991dfcf01992cc6b
3
- size 17461960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b245e7fe66f20cef74aaab7c86d1e33913faeff9d6dae530763d4a5dd256af5
3
+ size 1301380832
build/torch25-cxx98-cu118-x86_64-linux/flash_attn/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn_a7165c8_dirty
3
- ops = torch.ops._flash_attn_a7165c8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn_a7165c8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn_ab4cc6a_dirty
3
+ ops = torch.ops._flash_attn_ab4cc6a_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn_ab4cc6a_dirty::{op_name}"
build/torch25-cxx98-cu121-x86_64-linux/flash_attn/__init__.py CHANGED
@@ -19,7 +19,7 @@ def mha_fwd(
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
- return ops.mha_fwd(
23
  q,
24
  k,
25
  v,
@@ -34,4 +34,4 @@ def mha_fwd(
34
  return_softmax,
35
  gen,
36
  )
37
- return out
 
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
+ ops.mha_fwd(
23
  q,
24
  k,
25
  v,
 
34
  return_softmax,
35
  gen,
36
  )
37
+ return out
build/torch25-cxx98-cu121-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:806a71437827eb1724e80bbaf1cee7f1ef0242cd7c9a34b7e6ff696a8536f16a
3
- size 17558544
 
 
 
 
build/torch25-cxx98-cu121-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:347160ae8e05c11d1a99da542ecb4c2f6dbd30627cc6002b08c107b9d3d8af3c
3
+ size 1295640880
build/torch25-cxx98-cu121-x86_64-linux/flash_attn/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn_a7165c8_dirty
3
- ops = torch.ops._flash_attn_a7165c8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn_a7165c8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn_ab4cc6a_dirty
3
+ ops = torch.ops._flash_attn_ab4cc6a_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn_ab4cc6a_dirty::{op_name}"
build/torch25-cxx98-cu124-x86_64-linux/flash_attn/__init__.py CHANGED
@@ -19,7 +19,7 @@ def mha_fwd(
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
- return ops.mha_fwd(
23
  q,
24
  k,
25
  v,
@@ -34,4 +34,4 @@ def mha_fwd(
34
  return_softmax,
35
  gen,
36
  )
37
- return out
 
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
+ ops.mha_fwd(
23
  q,
24
  k,
25
  v,
 
34
  return_softmax,
35
  gen,
36
  )
37
+ return out
build/torch25-cxx98-cu124-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:098cc28e134482be440715e9df0fe5b3e4023c1b5ca2c562da39571b630c4d73
3
- size 17817728
 
 
 
 
build/torch25-cxx98-cu124-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c79d6703c033ea9e1bfcc6fc3006ac88a9713d8371ea3a96d70e8495c7692f68
3
+ size 1262738936
build/torch25-cxx98-cu124-x86_64-linux/flash_attn/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn_a7165c8_dirty
3
- ops = torch.ops._flash_attn_a7165c8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn_a7165c8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn_ab4cc6a_dirty
3
+ ops = torch.ops._flash_attn_ab4cc6a_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn_ab4cc6a_dirty::{op_name}"
build/torch26-cxx11-cu118-x86_64-linux/flash_attn/__init__.py CHANGED
@@ -19,7 +19,7 @@ def mha_fwd(
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
- return ops.mha_fwd(
23
  q,
24
  k,
25
  v,
@@ -34,4 +34,4 @@ def mha_fwd(
34
  return_softmax,
35
  gen,
36
  )
37
- return out
 
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
+ ops.mha_fwd(
23
  q,
24
  k,
25
  v,
 
34
  return_softmax,
35
  gen,
36
  )
37
+ return out
build/torch26-cxx11-cu118-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:db8e9a06cafa5dffe988c22df459745deb3ee1b22b084e53ed6429e49867aae7
3
- size 17469464
 
 
 
 
build/torch26-cxx11-cu118-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8cc0f02a6eea5c9fe8e5bc7b0138cef9bf77c026dc26b08f878bd809799189e
3
+ size 1301389752
build/torch26-cxx11-cu118-x86_64-linux/flash_attn/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn_a7165c8_dirty
3
- ops = torch.ops._flash_attn_a7165c8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn_a7165c8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn_ab4cc6a_dirty
3
+ ops = torch.ops._flash_attn_ab4cc6a_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn_ab4cc6a_dirty::{op_name}"
build/torch26-cxx11-cu124-x86_64-linux/flash_attn/__init__.py CHANGED
@@ -19,7 +19,7 @@ def mha_fwd(
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
- return ops.mha_fwd(
23
  q,
24
  k,
25
  v,
@@ -34,4 +34,4 @@ def mha_fwd(
34
  return_softmax,
35
  gen,
36
  )
37
- return out
 
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
+ ops.mha_fwd(
23
  q,
24
  k,
25
  v,
 
34
  return_softmax,
35
  gen,
36
  )
37
+ return out
build/torch26-cxx11-cu124-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3177cf407996b4f51ee139bfa4dcaf647fd659429cf9901ade2ac08117e20f9d
3
- size 17821096
 
 
 
 
build/torch26-cxx11-cu124-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc6aaa5d51f3d329ec4d6fe7422ff8ff5223fa1a1e01644da196504534bd4fb6
3
+ size 1262747768
build/torch26-cxx11-cu124-x86_64-linux/flash_attn/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn_a7165c8_dirty
3
- ops = torch.ops._flash_attn_a7165c8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn_a7165c8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn_ab4cc6a_dirty
3
+ ops = torch.ops._flash_attn_ab4cc6a_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn_ab4cc6a_dirty::{op_name}"
build/torch26-cxx11-cu126-x86_64-linux/flash_attn/__init__.py CHANGED
@@ -19,7 +19,7 @@ def mha_fwd(
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
- return ops.mha_fwd(
23
  q,
24
  k,
25
  v,
@@ -34,4 +34,4 @@ def mha_fwd(
34
  return_softmax,
35
  gen,
36
  )
37
- return out
 
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
+ ops.mha_fwd(
23
  q,
24
  k,
25
  v,
 
34
  return_softmax,
35
  gen,
36
  )
37
+ return out
build/torch26-cxx11-cu126-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:43f21a0f290a6f42e004303c760e5aacc851ad55bd9093cea4752c0a7d6b202e
3
- size 17981304
 
 
 
 
build/torch26-cxx11-cu126-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afce8d0bc6516f4e2ade3b45453d6370ead51ab9d368786b20109544cc8b4772
3
+ size 1273150064
build/torch26-cxx11-cu126-x86_64-linux/flash_attn/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn_a7165c8_dirty
3
- ops = torch.ops._flash_attn_a7165c8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn_a7165c8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn_ab4cc6a_dirty
3
+ ops = torch.ops._flash_attn_ab4cc6a_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn_ab4cc6a_dirty::{op_name}"
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/__init__.py CHANGED
@@ -19,7 +19,7 @@ def mha_fwd(
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
- return ops.mha_fwd(
23
  q,
24
  k,
25
  v,
@@ -34,4 +34,4 @@ def mha_fwd(
34
  return_softmax,
35
  gen,
36
  )
37
- return out
 
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
+ ops.mha_fwd(
23
  q,
24
  k,
25
  v,
 
34
  return_softmax,
35
  gen,
36
  )
37
+ return out
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:434702696304310402d3ce50496e7f9f113b632ebc90ef602e255562a54d480a
3
- size 17462256
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f65a659aa158221085014ade1e92475fe08871894796ca8db38ef2d2dbbcb99
3
+ size 1301381128
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn_a7165c8_dirty
3
- ops = torch.ops._flash_attn_a7165c8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn_a7165c8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn_ab4cc6a_dirty
3
+ ops = torch.ops._flash_attn_ab4cc6a_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn_ab4cc6a_dirty::{op_name}"
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/__init__.py CHANGED
@@ -19,7 +19,7 @@ def mha_fwd(
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
- return ops.mha_fwd(
23
  q,
24
  k,
25
  v,
@@ -34,4 +34,4 @@ def mha_fwd(
34
  return_softmax,
35
  gen,
36
  )
37
- return out
 
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
+ ops.mha_fwd(
23
  q,
24
  k,
25
  v,
 
34
  return_softmax,
35
  gen,
36
  )
37
+ return out
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d90d1be6a4a87ec538a3b009356af76b6c1a1b5b18ce1e69b0fe8b0316972090
3
- size 17817920
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ad532518c0a821e096e21c16bd89ec4c0b57b5b9cae92daa4c75100cfe712c6
3
+ size 1262739232
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn_a7165c8_dirty
3
- ops = torch.ops._flash_attn_a7165c8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn_a7165c8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn_ab4cc6a_dirty
3
+ ops = torch.ops._flash_attn_ab4cc6a_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn_ab4cc6a_dirty::{op_name}"
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/__init__.py CHANGED
@@ -19,7 +19,7 @@ def mha_fwd(
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
- return ops.mha_fwd(
23
  q,
24
  k,
25
  v,
@@ -34,4 +34,4 @@ def mha_fwd(
34
  return_softmax,
35
  gen,
36
  )
37
- return out
 
19
  return_softmax: bool,
20
  gen: Optional[torch.Generator],
21
  ) -> torch.Tensor:
22
+ ops.mha_fwd(
23
  q,
24
  k,
25
  v,
 
34
  return_softmax,
35
  gen,
36
  )
37
+ return out
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/_flash_attn_a7165c8_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff763dccb46211a07fab8e63cfd96f76984cd994525d6c8ce0e274489e8099ca
3
- size 17978128
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/_flash_attn_ab4cc6a_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:300868f1f33c620a923efa0629916bb0afda4763af425de233e48389eede6db4
3
+ size 1273141520
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _flash_attn_a7165c8_dirty
3
- ops = torch.ops._flash_attn_a7165c8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_flash_attn_a7165c8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _flash_attn_ab4cc6a_dirty
3
+ ops = torch.ops._flash_attn_ab4cc6a_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_flash_attn_ab4cc6a_dirty::{op_name}"