Skip to content

Commit 2938bd5

Browse files
authored
remove get_metadata_cls (#4087)
remove get_metadata_cls. It's only used for V0 engine and has been removed from vLLM already. - vLLM version: v0.11.0 - vLLM main: vllm-project/vllm@83f478b Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
1 parent 1cdf9ff commit 2938bd5

File tree

10 files changed

+4
-52
lines changed

10 files changed

+4
-52
lines changed

tests/ut/attention/test_attention_v1.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,7 @@
77
from vllm_ascend.attention.attention_v1 import (AscendAttentionBackend,
88
AscendAttentionBackendImpl,
99
AscendAttentionMetadataBuilder,
10-
AscendAttentionState,
11-
AscendMetadata)
10+
AscendAttentionState)
1211
from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
1312

1413

@@ -21,10 +20,6 @@ def test_get_impl_cls(self):
2120
self.assertEqual(AscendAttentionBackend.get_impl_cls(),
2221
AscendAttentionBackendImpl)
2322

24-
def test_get_metadata_cls(self):
25-
self.assertEqual(AscendAttentionBackend.get_metadata_cls(),
26-
AscendMetadata)
27-
2823
def test_get_builder_cls(self):
2924
self.assertEqual(AscendAttentionBackend.get_builder_cls(),
3025
AscendAttentionMetadataBuilder)

tests/ut/attention/test_mla_v1.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,6 @@ class TestAscendMLABackend(TestBase):
1818
def test_get_name(self):
1919
self.assertEqual(AscendMLABackend.get_name(), "ASCEND_MLA")
2020

21-
def test_get_metadata_cls(self):
22-
self.assertEqual(AscendMLABackend.get_metadata_cls(),
23-
AscendMLAMetadata)
24-
2521
def test_get_builder_cls(self):
2622
self.assertEqual(AscendMLABackend.get_builder_cls(),
2723
AscendMLAMetadataBuilder)

tests/ut/attention/test_sfa_v1.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,6 @@ class TestAscendSFABackend(TestBase):
1515
def test_get_name(self):
1616
self.assertEqual(AscendSFABackend.get_name(), "ASCEND_SFA")
1717

18-
def test_get_metadata_cls(self):
19-
self.assertEqual(AscendSFABackend.get_metadata_cls(),
20-
AscendSFAMetadata)
21-
2218
def test_get_builder_cls(self):
2319
self.assertEqual(AscendSFABackend.get_builder_cls(),
2420
AscendSFAMetadataBuilder)

tests/ut/torchair/test_torchair_mla.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,6 @@ def test_get_name(self):
2121
self.assertEqual(AscendMLATorchairBackend.get_name(),
2222
"ASCEND_MLA_TORCHAIR")
2323

24-
def test_get_metadata_cls(self):
25-
self.assertEqual(AscendMLATorchairBackend.get_metadata_cls(),
26-
AscendMLATorchairMetadata)
27-
2824
def test_get_builder_cls(self):
2925
self.assertEqual(AscendMLATorchairBackend.get_builder_cls(),
3026
AscendMLATorchairMetadataBuilder)

vllm_ascend/attention/attention_v1.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -75,10 +75,6 @@ def get_name() -> str:
7575
def get_impl_cls() -> Type["AscendAttentionBackendImpl"]:
7676
return AscendAttentionBackendImpl
7777

78-
@staticmethod
79-
def get_metadata_cls() -> Type["AscendMetadata"]:
80-
return AscendMetadata
81-
8278
@staticmethod
8379
def get_builder_cls() -> type["AscendAttentionMetadataBuilder"]:
8480
return AscendAttentionMetadataBuilder

vllm_ascend/attention/mla_v1.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,7 @@
77
import torch.distributed as dist
88
import torch_npu
99
from torch import nn
10-
from vllm.attention.backends.abstract import (AttentionBackend,
11-
AttentionMetadata,
12-
MLAAttentionImpl)
10+
from vllm.attention.backends.abstract import AttentionBackend, MLAAttentionImpl
1311
from vllm.config import VllmConfig, get_current_vllm_config
1412
from vllm.distributed import (get_dcp_group,
1513
get_decode_context_model_parallel_rank,
@@ -69,10 +67,6 @@ class AscendMLABackend(AttentionBackend):
6967
def get_name() -> str:
7068
return "ASCEND_MLA"
7169

72-
@staticmethod
73-
def get_metadata_cls() -> type["AttentionMetadata"]:
74-
return AscendMLAMetadata
75-
7670
@staticmethod
7771
def get_builder_cls():
7872
return AscendMLAMetadataBuilder

vllm_ascend/attention/sfa_v1.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,7 @@
44
import torch
55
import torch_npu
66
from torch import nn
7-
from vllm.attention.backends.abstract import (AttentionBackend,
8-
AttentionMetadata,
9-
MLAAttentionImpl)
7+
from vllm.attention.backends.abstract import AttentionBackend, MLAAttentionImpl
108
from vllm.config import VllmConfig
119
from vllm.distributed import get_tensor_model_parallel_world_size
1210
from vllm.model_executor.layers.linear import (LinearBase,
@@ -35,10 +33,6 @@ class AscendSFABackend(AttentionBackend):
3533
def get_name() -> str:
3634
return "ASCEND_SFA"
3735

38-
@staticmethod
39-
def get_metadata_cls() -> type["AttentionMetadata"]:
40-
return AscendSFAMetadata
41-
4236
@staticmethod
4337
def get_builder_cls():
4438
return AscendSFAMetadataBuilder

vllm_ascend/torchair/torchair_attention.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -55,10 +55,6 @@ def get_name() -> str:
5555
def get_impl_cls() -> Type["AscendAttentionTorchairBackendImpl"]:
5656
return AscendAttentionTorchairBackendImpl
5757

58-
@staticmethod
59-
def get_metadata_cls() -> Type["AscendTorchairMetadata"]:
60-
return AscendTorchairMetadata
61-
6258
@staticmethod
6359
def get_builder_cls() -> type["AscendAttentionTorchairMetadataBuilder"]:
6460
return AscendAttentionTorchairMetadataBuilder

vllm_ascend/torchair/torchair_mla.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
import torch.nn as nn
77
import torch_npu
88
from vllm.attention.backends.abstract import (AttentionBackend, AttentionLayer,
9-
AttentionMetadata,
109
MLAAttentionImpl)
1110
from vllm.attention.backends.utils import PAD_SLOT_ID
1211
from vllm.config import VllmConfig, get_current_vllm_config
@@ -43,10 +42,6 @@ class AscendMLATorchairBackend(AttentionBackend):
4342
def get_name() -> str:
4443
return "ASCEND_MLA_TORCHAIR"
4544

46-
@staticmethod
47-
def get_metadata_cls() -> type["AttentionMetadata"]:
48-
return AscendMLATorchairMetadata
49-
5045
@staticmethod
5146
def get_builder_cls():
5247
return AscendMLATorchairMetadataBuilder

vllm_ascend/torchair/torchair_sfa.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,7 @@
66
import torch.nn as nn
77
import torch.nn.functional as F
88
import torch_npu
9-
from vllm.attention.backends.abstract import (AttentionBackend,
10-
AttentionMetadata,
11-
MLAAttentionImpl)
9+
from vllm.attention.backends.abstract import AttentionBackend, MLAAttentionImpl
1210
from vllm.attention.backends.utils import PAD_SLOT_ID
1311
from vllm.config import VllmConfig, get_current_vllm_config
1412
from vllm.distributed import get_tensor_model_parallel_world_size, get_tp_group
@@ -43,10 +41,6 @@ class AscendSFATorchairBackend(AttentionBackend):
4341
def get_name() -> str:
4442
return "ASCEND_SFA_TORCHAIR"
4543

46-
@staticmethod
47-
def get_metadata_cls() -> type["AttentionMetadata"]:
48-
return AscendSFATorchairMetadata
49-
5044
@staticmethod
5145
def get_builder_cls():
5246
return AscendSFATorchairMetadataBuilder

0 commit comments

Comments
 (0)