1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
| """ PagedAttention: 基于虚拟内存管理的注意力机制 核心思想:将KV Cache分成固定大小的"页",实现高效内存管理 """
import torch import torch.nn as nn from typing import Optional
class PagedAttentionConfig: """ PagedAttention配置 """ def __init__(self, block_size=16, num_blocks=1024): self.block_size = block_size self.num_blocks = num_blocks self.head_dim = 128 self.num_heads = 32
class KVCache: """ 分页KV Cache """ def __init__(self, config: PagedAttentionConfig): self.config = config self.block_size = config.block_size self.num_blocks = config.num_blocks self.k_cache = None self.v_cache = None self.block_tables = {} def alloc(self, seq_len: int) -> dict: """ 为新序列分配缓存块 """ num_blocks_needed = (seq_len + self.block_size - 1) // self.block_size physical_blocks = [] for _ in range(num_blocks_needed): block_id = self._alloc_physical_block() if block_id is None: raise RuntimeError("Out of memory") physical_blocks.append(block_id) block_table = physical_blocks.copy() return { "num_blocks": num_blocks_needed, "block_table": block_table } def _alloc_physical_block(self) -> Optional[int]: """分配物理块""" for i in range(self.num_blocks): if i not in self.used_blocks: self.used_blocks.add(i) return i return None def update(self, seq_id: int, start_pos: int, k: torch.Tensor, v: torch.Tensor): """ 更新KV缓存 """ block_table = self.block_tables[seq_id] block_idx = start_pos // self.block_size block_offset = start_pos % self.block_size num_tokens = k.shape[0] for i in range(num_tokens): if block_offset == self.block_size: block_idx += 1 block_offset = 0 physical_block = block_table[block_idx] self.k_cache[physical_block, block_offset] = k[i] self.v_cache[physical_block, block_offset] = v[i] block_offset += 1 def free(self, seq_id: int): """释放序列的缓存块""" if seq_id in self.block_tables: for block_id in self.block_tables[seq_id]: self.used_blocks.discard(block_id) del self.block_tables[seq_id]
class PagedAttention(nn.Module): """ PagedAttention实现 """ def __init__(self, config: PagedAttentionConfig): super().__init__() self.config = config self.kv_cache = KVCache(config) def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, block_tables: dict, seq_lens: dict): """ PagedAttention前向传播 Args: query: 查询张量 [batch, heads, seq_len, head_dim] key: 键张量 [batch, heads, seq_len, head_dim] value: 值张量 [batch, heads, seq_len, head_dim] block_tables: 块表,映射逻辑位置到物理块 seq_lens: 每个序列的长度 """ B, H, T, D = query.shape if T != key.shape[2]: query = query.expand(-1, -1, key.shape[2], -1) scale = 1.0 / (D ** 0.5) output = torch.zeros_like(query) for batch_idx in range(B): seq_len = seq_lens[batch_idx] block_table = block_tables[batch_idx] k_seq = key[batch_idx] v_seq = value[batch_idx] q = query[batch_idx] num_blocks = (seq_len + self.config.block_size - 1) // self.config.block_size for block_idx in range(num_blocks): physical_block = block_table[block_idx] start = block_idx * self.config.block_size end = min(start + self.config.block_size, seq_len) k_block = k_seq[:, start:end] v_block = v_seq[:, start:end] attn = torch.matmul(q, k_block.transpose(-2, -1)) * scale attn = torch.softmax(attn, dim=-1) output[batch_idx, :, start:end] = torch.matmul(attn, v_block) return output
|