diff --git a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc b/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc deleted file mode 100644 index cfe7a33504e57c0e583728719de54e97e877af5f..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc b/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc deleted file mode 100644 index 70b023a100bbc9344377d69b765d1731f881b1ff..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/_ops.py b/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/_ops.py deleted file mode 100644 index 56d920dbf5b60b10a3444a94a1035d9e72a0df99..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _paged_attention_6677800 -ops = torch.ops._paged_attention_6677800 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_paged_attention_6677800::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so b/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so deleted file mode 100755 index 451db0c23548212c70d50877ba38b4c6a479107f..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:08e178e566aba62bbe98ba07bb0f83784095cb0d2fec7946a8d5773ca8e550ae -size 91845160 diff --git a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc b/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc deleted file mode 100644 index 32c9d0e881deaa82d4b9b8a62bfcca1b7a4b415c..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc b/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc deleted file mode 100644 index 41099788420022287a62a31c9a94ecf7c50b9547..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc b/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc deleted file mode 100644 index ae0a49c6d1a33b84c622550d099a2a55ab0dc864..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/_ops.py b/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/_ops.py deleted file mode 100644 index 56d920dbf5b60b10a3444a94a1035d9e72a0df99..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _paged_attention_6677800 -ops = torch.ops._paged_attention_6677800 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_paged_attention_6677800::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so b/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so deleted file mode 100755 index 683e4227f6f6118161d7aa77f017ca8669d0d0d5..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:1bc02dd09d997be7c2d3c996d5716ff269b4e4094b6cab70f4ae73c3763c36aa -size 88666456 diff --git a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc b/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc deleted file mode 100644 index 7966f507207a15bc57b4637c3973612f222c3ec1..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc b/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc deleted file mode 100644 index b20df8d2dfd9780cddc31133742a6d4d84bebc0a..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc b/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc deleted file mode 100644 index c69ef7d3ff7ee868afff255af248a3d27c513fa1..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/_ops.py b/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/_ops.py deleted file mode 100644 index 56d920dbf5b60b10a3444a94a1035d9e72a0df99..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _paged_attention_6677800 -ops = torch.ops._paged_attention_6677800 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_paged_attention_6677800::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so b/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so deleted file mode 100755 index 57c64eaf0d2415e30608222f19f3ee08788363ce..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5077b6b8fffb349c79738c345b02903f643aa9530d10269ea143e8f3125d10e9 -size 88425448 diff --git a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc b/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc deleted file mode 100644 index f845aecac7acd96da227c219ba333273b4a8d478..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc b/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc deleted file mode 100644 index 6653f396da33cc12cd837d2ed4d24ae4c3168cfc..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc b/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc deleted file mode 100644 index 257bc2c6462caf7eec6fe4e43057fabea4de8587..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/_ops.py b/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/_ops.py deleted file mode 100644 index 56d920dbf5b60b10a3444a94a1035d9e72a0df99..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _paged_attention_6677800 -ops = torch.ops._paged_attention_6677800 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_paged_attention_6677800::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so b/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so deleted file mode 100755 index 31f13481b3d3dc81468ea82fc3002d70f313c0a4..0000000000000000000000000000000000000000 --- a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3b4781f634ed1cceabfe9b00e485748ac4e179714ec5e8a3a67475b5a1072fb5 -size 133021344 diff --git a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc b/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc deleted file mode 100644 index 74ae7c917695858b14316cfe297f06d556a2f83c..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc b/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc deleted file mode 100644 index 2a2b987edcaf3deca391b77095bfd95791e6ce83..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc b/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc deleted file mode 100644 index 697d384ad4f801a99899b26227f79128286c37f0..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/_ops.py b/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/_ops.py deleted file mode 100644 index 56d920dbf5b60b10a3444a94a1035d9e72a0df99..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _paged_attention_6677800 -ops = torch.ops._paged_attention_6677800 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_paged_attention_6677800::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so b/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so deleted file mode 100755 index a84155e9d34f24ccbf02173a98e96343bb56438e..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e5ad161ff6226eb3f697c3fcec6051c70ca5bc0a66332f927a3cc3ecb39c34dd -size 91821840 diff --git a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/__init__.py b/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/__init__.py deleted file mode 100644 index 9de56043369487facc1f163df6bd319c9806e5ca..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from ._custom_ops import ( - convert_fp8, - copy_blocks, - paged_attention_v1, - paged_attention_v2, - reshape_and_cache, - reshape_and_cache_flash, - swap_blocks, -) -from ._ops import ops - -__all__ = [ - "convert_fp8", - "copy_blocks", - "ops", - "paged_attention_v1", - "paged_attention_v2", - "reshape_and_cache", - "reshape_and_cache_flash", - "swap_blocks", -] diff --git a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc b/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc deleted file mode 100644 index bab534f86e8f1a8efb4b4fdb047b4d5fcdc421eb..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc b/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc deleted file mode 100644 index 765bc670ab5469ddd17eb1446a4b66fabc2225d5..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc b/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc deleted file mode 100644 index 6f2f0bd6e3c9efcb39a84af2b74b3b2751f29db8..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/_custom_ops.py b/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/_custom_ops.py deleted file mode 100644 index a0c0b8db085468dee5100c98d14106a9ee917bf2..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/_custom_ops.py +++ /dev/null @@ -1,173 +0,0 @@ -from typing import List, Optional - -import torch - -from ._ops import ops - - -# page attention ops -def paged_attention_v1( - out: torch.Tensor, - query: torch.Tensor, - key_cache: torch.Tensor, - value_cache: torch.Tensor, - num_kv_heads: int, - scale: float, - block_tables: torch.Tensor, - seq_lens: torch.Tensor, - block_size: int, - max_seq_len: int, - alibi_slopes: Optional[torch.Tensor], - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - tp_rank: int = 0, - blocksparse_local_blocks: int = 0, - blocksparse_vert_stride: int = 0, - blocksparse_block_size: int = 64, - blocksparse_head_sliding_step: int = 0, -) -> None: - ops.paged_attention_v1( - out, - query, - key_cache, - value_cache, - num_kv_heads, - scale, - block_tables, - seq_lens, - block_size, - max_seq_len, - alibi_slopes, - kv_cache_dtype, - k_scale, - v_scale, - tp_rank, - blocksparse_local_blocks, - blocksparse_vert_stride, - blocksparse_block_size, - blocksparse_head_sliding_step, - ) - - -def paged_attention_v2( - out: torch.Tensor, - exp_sum: torch.Tensor, - max_logits: torch.Tensor, - tmp_out: torch.Tensor, - query: torch.Tensor, - key_cache: torch.Tensor, - value_cache: torch.Tensor, - num_kv_heads: int, - scale: float, - block_tables: torch.Tensor, - seq_lens: torch.Tensor, - block_size: int, - max_seq_len: int, - alibi_slopes: Optional[torch.Tensor], - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - tp_rank: int = 0, - blocksparse_local_blocks: int = 0, - blocksparse_vert_stride: int = 0, - blocksparse_block_size: int = 64, - blocksparse_head_sliding_step: int = 0, -) -> None: - ops.paged_attention_v2( - out, - exp_sum, - max_logits, - tmp_out, - query, - key_cache, - value_cache, - num_kv_heads, - scale, - block_tables, - seq_lens, - block_size, - max_seq_len, - alibi_slopes, - kv_cache_dtype, - k_scale, - v_scale, - tp_rank, - blocksparse_local_blocks, - blocksparse_vert_stride, - blocksparse_block_size, - blocksparse_head_sliding_step, - ) - - -def reshape_and_cache( - key: torch.Tensor, - value: torch.Tensor, - key_cache: torch.Tensor, - value_cache: torch.Tensor, - slot_mapping: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, -) -> None: - ops.reshape_and_cache( - key, - value, - key_cache, - value_cache, - slot_mapping, - kv_cache_dtype, - k_scale, - v_scale, - ) - - -def reshape_and_cache_flash( - key: torch.Tensor, - value: torch.Tensor, - key_cache: torch.Tensor, - value_cache: torch.Tensor, - slot_mapping: torch.Tensor, - kv_cache_dtype: str, - k_scale: torch.Tensor, - v_scale: torch.Tensor, -) -> None: - ops.reshape_and_cache_flash( - key, - value, - key_cache, - value_cache, - slot_mapping, - kv_cache_dtype, - k_scale, - v_scale, - ) - - -def copy_blocks( - key_caches: List[torch.Tensor], - value_caches: List[torch.Tensor], - block_mapping: torch.Tensor, -) -> None: - ops.copy_blocks(key_caches, value_caches, block_mapping) - - -def swap_blocks( - src: torch.Tensor, dst: torch.Tensor, block_mapping: torch.Tensor -) -> None: - ops.swap_blocks(src, dst, block_mapping) - - -def convert_fp8( - output: torch.Tensor, input: torch.Tensor, scale: float = 1.0, kv_dtype: str = "fp8" -) -> None: - ops.convert_fp8(output, input, scale, kv_dtype) - - -__all__ = [ - "convert_fp8", - "paged_attention_v1", - "paged_attention_v2", - "reshape_and_cache", - "copy_blocks", -] diff --git a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/_ops.py b/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/_ops.py deleted file mode 100644 index 56d920dbf5b60b10a3444a94a1035d9e72a0df99..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _paged_attention_6677800 -ops = torch.ops._paged_attention_6677800 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_paged_attention_6677800::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so b/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so deleted file mode 100755 index 7dc22c44a910d4ef938effe0bde68289f627886c..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9dc095cafd06c184ecb8e2268bf1a4dbded38098c84880bdd9beb87a856a553f -size 88631224 diff --git a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/platforms.py b/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/platforms.py deleted file mode 100644 index 6277d5f50ff3ddc265bb39fa1c4d17e0341b7767..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu124-x86_64-linux/paged_attention/platforms.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -import random -from abc import ABC, abstractmethod -from functools import lru_cache, wraps -from typing import Callable, ParamSpec, TypeVar - -import numpy as np -import torch - -IS_ROCM = torch.version.hip is not None -IS_MPS = torch.backends.mps.is_available() - - -class Platform(ABC): - @classmethod - def seed_everything(cls, seed: int) -> None: - """ - Set the seed of each random module. - `torch.manual_seed` will set seed on all devices. - - Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20 - """ - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - - @abstractmethod - def get_device_name(self, device_id: int = 0) -> str: ... - - @abstractmethod - def is_cuda(self) -> bool: ... - - @abstractmethod - def is_rocm(self) -> bool: ... - - @abstractmethod - def is_mps(self) -> bool: ... - - -class CudaPlatform(Platform): - @classmethod - @lru_cache(maxsize=8) - def get_device_name(cls, device_id: int = 0) -> str: - return torch.cuda.get_device_name(0) - - def is_cuda(self) -> bool: - return True - - def is_rocm(self) -> bool: - return False - - def is_mps(self) -> bool: - return False - - -class RocmPlatform(Platform): - @classmethod - @lru_cache(maxsize=8) - def get_device_name(cls, device_id: int = 0) -> str: - return torch.cuda.get_device_name(device_id) - - def is_cuda(self) -> bool: - return False - - def is_rocm(self) -> bool: - return True - - def is_mps(self) -> bool: - return False - - -class MpsPlatform(Platform): - @classmethod - @lru_cache(maxsize=8) - def get_device_name(cls, device_id: int = 0) -> str: - return torch.cuda.get_device_name(device_id) - - def is_cuda(self) -> bool: - return False - - def is_rocm(self) -> bool: - return False - - def is_mps(self) -> bool: - return True - -current_platform = ( - RocmPlatform() if IS_ROCM else - MpsPlatform() if IS_MPS else - CudaPlatform() if torch.cuda.is_available() else - None -) diff --git a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/__init__.py b/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/__init__.py deleted file mode 100644 index 9de56043369487facc1f163df6bd319c9806e5ca..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from ._custom_ops import ( - convert_fp8, - copy_blocks, - paged_attention_v1, - paged_attention_v2, - reshape_and_cache, - reshape_and_cache_flash, - swap_blocks, -) -from ._ops import ops - -__all__ = [ - "convert_fp8", - "copy_blocks", - "ops", - "paged_attention_v1", - "paged_attention_v2", - "reshape_and_cache", - "reshape_and_cache_flash", - "swap_blocks", -] diff --git a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc b/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc deleted file mode 100644 index 2ecca74d686eae4c004b3f179b1be558cb3796e9..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc b/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc deleted file mode 100644 index dbd7935a0ff33223d9849d3907c8026a18b88214..0000000000000000000000000000000000000000 Binary files a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/_custom_ops.py b/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/_custom_ops.py deleted file mode 100644 index a0c0b8db085468dee5100c98d14106a9ee917bf2..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/_custom_ops.py +++ /dev/null @@ -1,173 +0,0 @@ -from typing import List, Optional - -import torch - -from ._ops import ops - - -# page attention ops -def paged_attention_v1( - out: torch.Tensor, - query: torch.Tensor, - key_cache: torch.Tensor, - value_cache: torch.Tensor, - num_kv_heads: int, - scale: float, - block_tables: torch.Tensor, - seq_lens: torch.Tensor, - block_size: int, - max_seq_len: int, - alibi_slopes: Optional[torch.Tensor], - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - tp_rank: int = 0, - blocksparse_local_blocks: int = 0, - blocksparse_vert_stride: int = 0, - blocksparse_block_size: int = 64, - blocksparse_head_sliding_step: int = 0, -) -> None: - ops.paged_attention_v1( - out, - query, - key_cache, - value_cache, - num_kv_heads, - scale, - block_tables, - seq_lens, - block_size, - max_seq_len, - alibi_slopes, - kv_cache_dtype, - k_scale, - v_scale, - tp_rank, - blocksparse_local_blocks, - blocksparse_vert_stride, - blocksparse_block_size, - blocksparse_head_sliding_step, - ) - - -def paged_attention_v2( - out: torch.Tensor, - exp_sum: torch.Tensor, - max_logits: torch.Tensor, - tmp_out: torch.Tensor, - query: torch.Tensor, - key_cache: torch.Tensor, - value_cache: torch.Tensor, - num_kv_heads: int, - scale: float, - block_tables: torch.Tensor, - seq_lens: torch.Tensor, - block_size: int, - max_seq_len: int, - alibi_slopes: Optional[torch.Tensor], - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - tp_rank: int = 0, - blocksparse_local_blocks: int = 0, - blocksparse_vert_stride: int = 0, - blocksparse_block_size: int = 64, - blocksparse_head_sliding_step: int = 0, -) -> None: - ops.paged_attention_v2( - out, - exp_sum, - max_logits, - tmp_out, - query, - key_cache, - value_cache, - num_kv_heads, - scale, - block_tables, - seq_lens, - block_size, - max_seq_len, - alibi_slopes, - kv_cache_dtype, - k_scale, - v_scale, - tp_rank, - blocksparse_local_blocks, - blocksparse_vert_stride, - blocksparse_block_size, - blocksparse_head_sliding_step, - ) - - -def reshape_and_cache( - key: torch.Tensor, - value: torch.Tensor, - key_cache: torch.Tensor, - value_cache: torch.Tensor, - slot_mapping: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, -) -> None: - ops.reshape_and_cache( - key, - value, - key_cache, - value_cache, - slot_mapping, - kv_cache_dtype, - k_scale, - v_scale, - ) - - -def reshape_and_cache_flash( - key: torch.Tensor, - value: torch.Tensor, - key_cache: torch.Tensor, - value_cache: torch.Tensor, - slot_mapping: torch.Tensor, - kv_cache_dtype: str, - k_scale: torch.Tensor, - v_scale: torch.Tensor, -) -> None: - ops.reshape_and_cache_flash( - key, - value, - key_cache, - value_cache, - slot_mapping, - kv_cache_dtype, - k_scale, - v_scale, - ) - - -def copy_blocks( - key_caches: List[torch.Tensor], - value_caches: List[torch.Tensor], - block_mapping: torch.Tensor, -) -> None: - ops.copy_blocks(key_caches, value_caches, block_mapping) - - -def swap_blocks( - src: torch.Tensor, dst: torch.Tensor, block_mapping: torch.Tensor -) -> None: - ops.swap_blocks(src, dst, block_mapping) - - -def convert_fp8( - output: torch.Tensor, input: torch.Tensor, scale: float = 1.0, kv_dtype: str = "fp8" -) -> None: - ops.convert_fp8(output, input, scale, kv_dtype) - - -__all__ = [ - "convert_fp8", - "paged_attention_v1", - "paged_attention_v2", - "reshape_and_cache", - "copy_blocks", -] diff --git a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/_ops.py b/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/_ops.py deleted file mode 100644 index 56d920dbf5b60b10a3444a94a1035d9e72a0df99..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/_ops.py +++ /dev/null @@ -1,9 +0,0 @@ -import torch -from . import _paged_attention_6677800 -ops = torch.ops._paged_attention_6677800 - -def add_op_namespace_prefix(op_name: str): - """ - Prefix op by namespace. - """ - return f"_paged_attention_6677800::{op_name}" \ No newline at end of file diff --git a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so b/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so deleted file mode 100755 index 17d65fda6d18a56ca70a37150b1cb60012cb68d7..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fe20bc582f45af4d0819856e77a7aa31a7d7ef4f821e362e7972a4e8bb6c2eba -size 88390208 diff --git a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/platforms.py b/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/platforms.py deleted file mode 100644 index 6277d5f50ff3ddc265bb39fa1c4d17e0341b7767..0000000000000000000000000000000000000000 --- a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/platforms.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -import random -from abc import ABC, abstractmethod -from functools import lru_cache, wraps -from typing import Callable, ParamSpec, TypeVar - -import numpy as np -import torch - -IS_ROCM = torch.version.hip is not None -IS_MPS = torch.backends.mps.is_available() - - -class Platform(ABC): - @classmethod - def seed_everything(cls, seed: int) -> None: - """ - Set the seed of each random module. - `torch.manual_seed` will set seed on all devices. - - Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20 - """ - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - - @abstractmethod - def get_device_name(self, device_id: int = 0) -> str: ... - - @abstractmethod - def is_cuda(self) -> bool: ... - - @abstractmethod - def is_rocm(self) -> bool: ... - - @abstractmethod - def is_mps(self) -> bool: ... - - -class CudaPlatform(Platform): - @classmethod - @lru_cache(maxsize=8) - def get_device_name(cls, device_id: int = 0) -> str: - return torch.cuda.get_device_name(0) - - def is_cuda(self) -> bool: - return True - - def is_rocm(self) -> bool: - return False - - def is_mps(self) -> bool: - return False - - -class RocmPlatform(Platform): - @classmethod - @lru_cache(maxsize=8) - def get_device_name(cls, device_id: int = 0) -> str: - return torch.cuda.get_device_name(device_id) - - def is_cuda(self) -> bool: - return False - - def is_rocm(self) -> bool: - return True - - def is_mps(self) -> bool: - return False - - -class MpsPlatform(Platform): - @classmethod - @lru_cache(maxsize=8) - def get_device_name(cls, device_id: int = 0) -> str: - return torch.cuda.get_device_name(device_id) - - def is_cuda(self) -> bool: - return False - - def is_rocm(self) -> bool: - return False - - def is_mps(self) -> bool: - return True - -current_platform = ( - RocmPlatform() if IS_ROCM else - MpsPlatform() if IS_MPS else - CudaPlatform() if torch.cuda.is_available() else - None -) diff --git a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc similarity index 55% rename from build/torch26-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc rename to build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc index 552263781d95d35d01fd8c2ae0798549527d5ced..3a42f6c23989d05e0c4180a8e07ce104a7ecd99c 100644 Binary files a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc and b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc deleted file mode 100644 index 731970eb28e385f3e9f247f81f62632cca0d2495..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c698ce4a5f2b3db77ed9861efc143265073f6a8 Binary files /dev/null and b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc deleted file mode 100644 index 53249ceb7dff4629772aad669fa0736f6d76af11..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4cbb5b24a571485fb2d7712b46631a1eedf0b41 Binary files /dev/null and b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/_ops.py b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/_ops.py index 56d920dbf5b60b10a3444a94a1035d9e72a0df99..a883d5cb5d351eb18b40e1d4b621a7d1544385ab 100644 --- a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/_ops.py +++ b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _paged_attention_6677800 -ops = torch.ops._paged_attention_6677800 +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_paged_attention_6677800::{op_name}" \ No newline at end of file + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..71626cdbf291f8b39d80a6b511439c2148f25112 --- /dev/null +++ b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b899f376425b8d7a213b8d26909d84ffb1213e03d3e5b33675e9408426747501 +size 113844912 diff --git a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so b/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so deleted file mode 100755 index ef90bd1abefd47d7a65d18855b4abe933604a779..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f7e9598cbcf88d836c2cbf737dafc4513e373b81f383acfce0aa74227600a166 -size 91845296 diff --git a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc similarity index 55% rename from build/torch26-cxx98-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc rename to build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc index 86adb2c008ed0396c83d39b83a3df64479afee7f..428ec0cbce6fbd264193a66297408d47d9f9882b 100644 Binary files a/build/torch26-cxx98-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc and b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc deleted file mode 100644 index 0614a9070b49b5ba96f839d60329b733c422f48c..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4257fb3f872d1ec4e346d6f3c5ec2046ab82f36 Binary files /dev/null and b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc deleted file mode 100644 index e6fd366b9e45a56263407a972e98b1ffc5e2303e..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..987598889f8379c07efa94bad6a3b6f17dec9860 Binary files /dev/null and b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/_ops.py b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/_ops.py index 56d920dbf5b60b10a3444a94a1035d9e72a0df99..a883d5cb5d351eb18b40e1d4b621a7d1544385ab 100644 --- a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/_ops.py +++ b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _paged_attention_6677800 -ops = torch.ops._paged_attention_6677800 +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_paged_attention_6677800::{op_name}" \ No newline at end of file + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..d68462effa6cb3bc865b2cdca6137261d51c2edc --- /dev/null +++ b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f98a11d55d75ee841515253dfd16b5dd17a811925a445ef765b90b4b56a10e35 +size 110732296 diff --git a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so b/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so deleted file mode 100755 index 7eb73d0955616254c8a8614ebd96d909c4078b77..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e14b5caf109855ba826adef443fc4568a170ba2bbd1738b7bbdcec5e43f38cfd -size 88425480 diff --git a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc similarity index 55% rename from build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc rename to build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc index 70c6994c7462eaf4b46ea7a83ace57daeab52fb1..7c13b4dd0a5c62749c4fffaed83061e327465cfa 100644 Binary files a/build/torch27-cxx11-cu118-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc and b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc deleted file mode 100644 index 5ad287ac40a8bc17b688a8e9c94b96bcf9302839..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb10ad97588c556a857e62d03549f1c72efeced9 Binary files /dev/null and b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc deleted file mode 100644 index 523a67b87a2fb452b488bb3d4fc4f5529a036561..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbe695e89609fad8076312195af5a04690a744ee Binary files /dev/null and b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/_ops.py b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/_ops.py index 56d920dbf5b60b10a3444a94a1035d9e72a0df99..a883d5cb5d351eb18b40e1d4b621a7d1544385ab 100644 --- a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/_ops.py +++ b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _paged_attention_6677800 -ops = torch.ops._paged_attention_6677800 +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_paged_attention_6677800::{op_name}" \ No newline at end of file + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..e2fbd54b5c6e0379de056949d3d7991b31b546e6 --- /dev/null +++ b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15f5767adca82992a20bb3b0ff047e5cd076c386f1bdff70f63c3052c47e1ac3 +size 138291040 diff --git a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so b/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so deleted file mode 100755 index fcaa5bfcae4da9cb0dd496b3e539a8aa66c49731..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5747d9347ecab35a021f746cbb37b7498c8e6e29558071c5845cf2ccc613aa61 -size 120461168 diff --git a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc similarity index 55% rename from build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc rename to build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc index a76b5a7833d5bb073bf304e511c99a89cfe195a0..8a487f6dd62db7a85db734773b9718a43478a689 100644 Binary files a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc and b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc deleted file mode 100644 index 8872c0d27f9f5d0de6c7d51782a8d5066523ab7a..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8ae1d42b0f5f3fb681398076bef151d34cc9325 Binary files /dev/null and b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc deleted file mode 100644 index cd4e26d7f74f774767789700960d43827a9d3e8d..0000000000000000000000000000000000000000 Binary files a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-312.pyc and /dev/null differ diff --git a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a4ea49b96acc139399eedbd529c4af03da4944b Binary files /dev/null and b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py index 56d920dbf5b60b10a3444a94a1035d9e72a0df99..a883d5cb5d351eb18b40e1d4b621a7d1544385ab 100644 --- a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py +++ b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _paged_attention_6677800 -ops = torch.ops._paged_attention_6677800 +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_paged_attention_6677800::{op_name}" \ No newline at end of file + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..f8326ce54482175c1763ff12b7789f71c61dc9ca --- /dev/null +++ b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5955ede80b62f77cbcf76ffc3961b20af2d5a17ceb95c5c2fc06e7b12e9d3bec +size 120178304 diff --git a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so b/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so deleted file mode 100755 index af88a817b2d5bf4ade32920c6dd1af1a79cbf770..0000000000000000000000000000000000000000 --- a/build/torch27-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_6677800.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2905e631976eeeac3816fc1907f9c23818151ecb7e54d3fece7a42e0bcf10553 -size 120280352 diff --git a/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc index 6e06d21cf688318027727e6d29af442a65cc4c6a..b9ca0ebe4eaa227b9fb750184ad70ba42e1c2205 100644 Binary files a/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc index 5f7abb5c212b78b49f6216d3974194d20af62385..e7ffbb72c9f7906a9049e8244e750d1aa706130f 100644 Binary files a/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc and b/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc index 5d09199567f3c255bc54ca65dfe4bee1ebc8c7e9..3791d9d063e969f7ae2544ea5a019aebd20ea818 100644 Binary files a/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_ops.py b/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_ops.py index fb15200f51b4ecf38aee6989985e41283ce3db12..a883d5cb5d351eb18b40e1d4b621a7d1544385ab 100644 --- a/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_ops.py +++ b/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _paged_attention_8b32f11_dirty -ops = torch.ops._paged_attention_8b32f11_dirty +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_paged_attention_8b32f11_dirty::{op_name}" \ No newline at end of file + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..c3bdbd626ebb380e1807f4d7bcfcab0403ec3cfc --- /dev/null +++ b/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4c703fe6a83f86f7cc72d70e98d5ffb09dbc5e23047a49d92a1a3edd3425d88 +size 110720952 diff --git a/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so b/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so deleted file mode 100755 index e4890ac22eea19e3605169857cb7dd719d722e56..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7b6c7bd31d6af79211868bcd2e699c47ddbbabcee833320b51e13e5cc1515cea -size 88319960 diff --git a/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc index 8da60493d28f1888aa237d69d44c5b44309191e1..275cac059e37b1867c556d2b1ce9490a5b2c3df3 100644 Binary files a/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc index a46c1f5b3b5a254c568af2e328cc73efb483234a..4b862f913256d79af394e7dc3af21b78f4e9c5ec 100644 Binary files a/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc and b/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc index 12696d2d364d01884201c473a3e8e2660015c47f..dc8a7e343c0dca4b6d27c16c5b34fd90a0fc73bf 100644 Binary files a/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_ops.py b/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_ops.py index fb15200f51b4ecf38aee6989985e41283ce3db12..a883d5cb5d351eb18b40e1d4b621a7d1544385ab 100644 --- a/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_ops.py +++ b/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _paged_attention_8b32f11_dirty -ops = torch.ops._paged_attention_8b32f11_dirty +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_paged_attention_8b32f11_dirty::{op_name}" \ No newline at end of file + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..ed7df755516235f38266fb32b54e4ada33cf2cb0 --- /dev/null +++ b/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1aae42e8db3445e710a66ae53d7eaccbdac9694c58ea759017650a1a46a5af69 +size 138280008 diff --git a/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so b/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so deleted file mode 100755 index 1a9497471d4fdbc60401f4472be229dbc188139a..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e8337615463ef75a6dcb91242d1aa4d5596ecbadb91249016481016ab016d403 -size 120355944 diff --git a/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc index bbf71a19295bf50a5ea975bbf648cef6b36c7d00..971f1be1118ed39add7ac7136c274dd84b717666 100644 Binary files a/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc index 491703360f50a5b99625f588b25fdb8851635983..1f171978b68ee2bf49bbf9d1a62278e4d75869cb 100644 Binary files a/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc and b/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc index 97cc65a3b5a16b8285a6672c3235ba19fc08bfa1..a753bdd50c6c3ef64ebb4f026260eafc403057d4 100644 Binary files a/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_ops.py b/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_ops.py index fb15200f51b4ecf38aee6989985e41283ce3db12..a883d5cb5d351eb18b40e1d4b621a7d1544385ab 100644 --- a/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_ops.py +++ b/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _paged_attention_8b32f11_dirty -ops = torch.ops._paged_attention_8b32f11_dirty +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_paged_attention_8b32f11_dirty::{op_name}" \ No newline at end of file + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..e40cd64c8a81f250cc6736781da97148fd58e03f --- /dev/null +++ b/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6adbf2844ccb26b11787f5322650bac70b78e25d8938667764bb84f0a092bd71 +size 149921816 diff --git a/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so b/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so deleted file mode 100755 index 4ec08e1028e08719406df06db0223086c2a21b96..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-cu129-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a5607d3db4b4f9d1d4d1b49b7e824773ff0c3666d0a53ebd43f04af204c2dbed -size 130523184 diff --git a/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc index 6346188129df0b864fc35f72f97fcf937e862b53..b8a344b75196bd5dd61974a13336cfcacf2e8d3b 100644 Binary files a/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc index 3a0469611a0cbf183c9271f807e1479a5fde27d8..bc65ab1681d8405458c32c35d17660dadc761a76 100644 Binary files a/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc and b/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc index c09293044c10d7e3125d92ac7043828274ef40ef..ec9d7e8c2c3e9bdf96b42fcdc353f152666424e0 100644 Binary files a/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py b/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py index fb15200f51b4ecf38aee6989985e41283ce3db12..a883d5cb5d351eb18b40e1d4b621a7d1544385ab 100644 --- a/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py +++ b/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _paged_attention_8b32f11_dirty -ops = torch.ops._paged_attention_8b32f11_dirty +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_paged_attention_8b32f11_dirty::{op_name}" \ No newline at end of file + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..8e9177ea8fc4d10cd6a8150c7006c625899c7aa7 --- /dev/null +++ b/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8805b0430cd4891491e7e2ec4af3caeaba2f079def2d643d0368363aa783baac +size 120179032 diff --git a/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so b/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so deleted file mode 100755 index ed47a0373ed20f3cd4e2f94d1dd55089c89dad6e..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d4b8a56f56adc60a2829d61a7e47711508744c4d43a2dde8501cf696632bb443 -size 120179064 diff --git a/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc index d61af51404068ee22a51c8a7b310150eab71c38f..fefbce0d1862e94edd0937d21be7c5b7c0b64a93 100644 Binary files a/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc and b/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc index 2223f0b4da20be559d24d54db2d425d988d6e881..5df6b16274a16964110a212a181a33b4b1c16f75 100644 Binary files a/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc and b/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc index f037e98ce6f6e699dc0add013f1884c63f31ead6..3284f8d39a2e84472c719b472a0629ed79a067aa 100644 Binary files a/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc and b/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_ops.py b/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_ops.py index fb15200f51b4ecf38aee6989985e41283ce3db12..a883d5cb5d351eb18b40e1d4b621a7d1544385ab 100644 --- a/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_ops.py +++ b/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_ops.py @@ -1,9 +1,9 @@ import torch -from . import _paged_attention_8b32f11_dirty -ops = torch.ops._paged_attention_8b32f11_dirty +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f def add_op_namespace_prefix(op_name: str): """ Prefix op by namespace. """ - return f"_paged_attention_8b32f11_dirty::{op_name}" \ No newline at end of file + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..cc0c8252e728892c9d34d52d4980501e679d40b6 --- /dev/null +++ b/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfa214ea3bdbe48a4b7ab1e4591aeb0112173a102b96e6ad27d3e5136d9021d7 +size 121016728 diff --git a/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so b/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so deleted file mode 100755 index e4ea6b8f1b2a7af1e58308398accd478fbbb5939..0000000000000000000000000000000000000000 --- a/build/torch28-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_8b32f11_dirty.abi3.so +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e64d968748aa3fd84b8a92e98f1fec4bd0fbccacdf7bb27d07f04adaf1247f6d -size 121016752 diff --git a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/__init__.py b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__init__.py similarity index 100% rename from build/torch26-cxx11-cu118-x86_64-linux/paged_attention/__init__.py rename to build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__init__.py diff --git a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc similarity index 55% rename from build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc rename to build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc index 562bf3258683ad6cc595aa771f3ba0772b909e1f..35911092fae3038cdd30c25048f7bf63181a4029 100644 Binary files a/build/torch27-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc and b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92757c08140c8d6d2c033cd3d46ad1ec5e9825e3 Binary files /dev/null and b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5407f06ca3cf8de3f29350a177e654416b85b8bd Binary files /dev/null and b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/_custom_ops.py b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_custom_ops.py similarity index 100% rename from build/torch26-cxx11-cu118-x86_64-linux/paged_attention/_custom_ops.py rename to build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_custom_ops.py diff --git a/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_ops.py b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a883d5cb5d351eb18b40e1d4b621a7d1544385ab --- /dev/null +++ b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..99173d2e34f4794d0ab57422847ca218b9b88a9e --- /dev/null +++ b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b2aa3a6240028cccdab057705a26057090f551120d6f2506e471a1e74f2ae7f +size 110720928 diff --git a/build/torch26-cxx11-cu118-x86_64-linux/paged_attention/platforms.py b/build/torch29-cxx11-cu126-x86_64-linux/paged_attention/platforms.py similarity index 100% rename from build/torch26-cxx11-cu118-x86_64-linux/paged_attention/platforms.py rename to build/torch29-cxx11-cu126-x86_64-linux/paged_attention/platforms.py diff --git a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/__init__.py b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__init__.py similarity index 100% rename from build/torch26-cxx11-cu124-x86_64-linux/paged_attention/__init__.py rename to build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__init__.py diff --git a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc similarity index 55% rename from build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc rename to build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc index 702e7abb792e16df32b113a1305f8e38eedf536f..ce2d15d81d2f49c1016c40d2c9d5f8b4b767b1b6 100644 Binary files a/build/torch27-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-312.pyc and b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cca305b5a01d863c44aac5163aa5335dd549813 Binary files /dev/null and b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5f215905183d000e55d867a3bfcf36b886b49a9 Binary files /dev/null and b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/_custom_ops.py b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_custom_ops.py similarity index 100% rename from build/torch26-cxx11-cu124-x86_64-linux/paged_attention/_custom_ops.py rename to build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_custom_ops.py diff --git a/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_ops.py b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a883d5cb5d351eb18b40e1d4b621a7d1544385ab --- /dev/null +++ b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..23158fda11a3123a0768177f1a42b62e6d001da4 --- /dev/null +++ b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:192972dc4a6db1ec6be97e036b222ceb570c0a17c19c66589e1068f8d5f93e8b +size 138279984 diff --git a/build/torch26-cxx11-cu124-x86_64-linux/paged_attention/platforms.py b/build/torch29-cxx11-cu128-x86_64-linux/paged_attention/platforms.py similarity index 100% rename from build/torch26-cxx11-cu124-x86_64-linux/paged_attention/platforms.py rename to build/torch29-cxx11-cu128-x86_64-linux/paged_attention/platforms.py diff --git a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/__init__.py b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__init__.py similarity index 100% rename from build/torch26-cxx11-cu126-x86_64-linux/paged_attention/__init__.py rename to build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__init__.py diff --git a/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bee01b1d0c80247145d42c5b857e888834892e22 Binary files /dev/null and b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e2ccdaa2adb49c880da03db31c5283c68866f1e Binary files /dev/null and b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d832cafa849552d73896be30bd66e1696264166 Binary files /dev/null and b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/_custom_ops.py b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_custom_ops.py similarity index 100% rename from build/torch26-cxx11-cu126-x86_64-linux/paged_attention/_custom_ops.py rename to build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_custom_ops.py diff --git a/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_ops.py b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a883d5cb5d351eb18b40e1d4b621a7d1544385ab --- /dev/null +++ b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..ec81746bbf1232a4ddbc0a3297b21d9dd6916651 --- /dev/null +++ b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:383bb9d2b65c688fd86a9c8f654018754d6f9ef7983aa3b64b11736cd779fc27 +size 77143312 diff --git a/build/torch26-cxx11-cu126-x86_64-linux/paged_attention/platforms.py b/build/torch29-cxx11-cu130-x86_64-linux/paged_attention/platforms.py similarity index 100% rename from build/torch26-cxx11-cu126-x86_64-linux/paged_attention/platforms.py rename to build/torch29-cxx11-cu130-x86_64-linux/paged_attention/platforms.py diff --git a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/__init__.py b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__init__.py similarity index 100% rename from build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/__init__.py rename to build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__init__.py diff --git a/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b7440e531df92c4ce0a4a23fa4ca8dfe9017013 Binary files /dev/null and b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d0996b1b019b038a1301218935a0c6dfbfd563d Binary files /dev/null and b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..613ac91c1843c8f9de6755b0d63e1ec657e40525 Binary files /dev/null and b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/_custom_ops.py b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_custom_ops.py similarity index 100% rename from build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/_custom_ops.py rename to build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_custom_ops.py diff --git a/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a883d5cb5d351eb18b40e1d4b621a7d1544385ab --- /dev/null +++ b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..54dfa1ea4aada5de839d42cb28dd1f623787afe6 --- /dev/null +++ b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:660c385fa3361e457cc32a01c5a2024a3c2abc9aa9bfa2efb72c63796df1f3da +size 120179008 diff --git a/build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/platforms.py b/build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/platforms.py similarity index 100% rename from build/torch26-cxx11-rocm62-x86_64-linux/paged_attention/platforms.py rename to build/torch29-cxx11-rocm63-x86_64-linux/paged_attention/platforms.py diff --git a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/__init__.py b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__init__.py similarity index 100% rename from build/torch26-cxx98-cu118-x86_64-linux/paged_attention/__init__.py rename to build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__init__.py diff --git a/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a065b4365a88afffaa3753f30179cf54b185f71e Binary files /dev/null and b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/__init__.cpython-313.pyc differ diff --git a/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8ca6cf765f2787851950a2fcddca29bf824cfa3 Binary files /dev/null and b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_custom_ops.cpython-313.pyc differ diff --git a/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fe2fbf0550bef61d60593caff8881bc6bc6a883 Binary files /dev/null and b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/__pycache__/_ops.cpython-313.pyc differ diff --git a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/_custom_ops.py b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_custom_ops.py similarity index 100% rename from build/torch26-cxx98-cu118-x86_64-linux/paged_attention/_custom_ops.py rename to build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_custom_ops.py diff --git a/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_ops.py b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..a883d5cb5d351eb18b40e1d4b621a7d1544385ab --- /dev/null +++ b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_ops.py @@ -0,0 +1,9 @@ +import torch +from . import _paged_attention_0041e3f +ops = torch.ops._paged_attention_0041e3f + +def add_op_namespace_prefix(op_name: str): + """ + Prefix op by namespace. + """ + return f"_paged_attention_0041e3f::{op_name}" \ No newline at end of file diff --git a/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so new file mode 100755 index 0000000000000000000000000000000000000000..1c91912bdc8dcb9f8b8ee051ce0f30cd3fc7f9f0 --- /dev/null +++ b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/_paged_attention_0041e3f.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fde7b0a7f6775acfeb55770cf399c6c3a2cdf4c6b4d8158fa29e0888bd281f6 +size 121016696 diff --git a/build/torch26-cxx98-cu118-x86_64-linux/paged_attention/platforms.py b/build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/platforms.py similarity index 100% rename from build/torch26-cxx98-cu118-x86_64-linux/paged_attention/platforms.py rename to build/torch29-cxx11-rocm64-x86_64-linux/paged_attention/platforms.py