mohitsha HF Staff commited on
Commit
97312d7
·
verified ·
1 Parent(s): 52ffbc5

Upload 3 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ build/torch27-cxx11-rocm63-x86_64-linux/aiter_pa/_aiter_pa_a120ba4f5566b.so filter=lfs diff=lfs merge=lfs -text
build/torch27-cxx11-rocm63-x86_64-linux/aiter_pa/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a Python package for the kernel.
2
+ from typing import Optional
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+ def paged_attention_rocm(
8
+ out: torch.Tensor,
9
+ exp_sums: torch.Tensor,
10
+ max_logits: torch.Tensor,
11
+ tmp_out: torch.Tensor,
12
+ query: torch.Tensor,
13
+ key_cache: torch.Tensor,
14
+ value_cache: torch.Tensor,
15
+ num_kv_heads: int,
16
+ scale: float,
17
+ block_tables: torch.Tensor,
18
+ context_lens: torch.Tensor,
19
+ block_size: int,
20
+ max_context_len: int,
21
+ alibi_slopes: Optional[torch.Tensor],
22
+ kv_cache_dtype: str,
23
+ k_scale: float,
24
+ v_scale: float,
25
+ fp8_out_scale: Optional[torch.Tensor],
26
+ partition_size: int,
27
+ ):
28
+ return ops.paged_attention_rocm(
29
+ out,
30
+ exp_sums,
31
+ max_logits,
32
+ tmp_out,
33
+ query,
34
+ key_cache,
35
+ value_cache,
36
+ num_kv_heads,
37
+ scale,
38
+ block_tables,
39
+ context_lens,
40
+ block_size,
41
+ max_context_len,
42
+ alibi_slopes,
43
+ kv_cache_dtype,
44
+ k_scale,
45
+ v_scale,
46
+ fp8_out_scale,
47
+ partition_size,
48
+ )
49
+
50
+ __all__ = [
51
+ "paged_attention_rocm",
52
+ ]
build/torch27-cxx11-rocm63-x86_64-linux/aiter_pa/_aiter_pa_a120ba4f5566b.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbe37179bb4344cdd65280558bd853fd6f489de7725e2df72347a620e70fee8e
3
+ size 2919776
build/torch27-cxx11-rocm63-x86_64-linux/aiter_pa/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _aiter_pa_a120ba4f5566b
3
+ ops = torch.ops._aiter_pa_a120ba4f5566b
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_aiter_pa_a120ba4f5566b::{op_name}"