From dfe03c518f785cb81969aafda5c09d69ae58dad7 Mon Sep 17 00:00:00 2001 From: Junhong Date: Tue, 22 Jul 2025 11:59:18 +0800 Subject: [PATCH] Comment of DLLM's change --- vllm_mindspore/entrypoints/__main__.py | 3 +++ .../model_executor/models/mf_models/deepseek_v3.py | 2 ++ .../model_executor/models/mf_models/mf_model_base.py | 5 +++++ vllm_mindspore/utils.py | 7 ++++++- vllm_mindspore/v1/worker/gpu_worker.py | 1 + 5 files changed, 17 insertions(+), 1 deletion(-) diff --git a/vllm_mindspore/entrypoints/__main__.py b/vllm_mindspore/entrypoints/__main__.py index 921817617..ada6e76f1 100644 --- a/vllm_mindspore/entrypoints/__main__.py +++ b/vllm_mindspore/entrypoints/__main__.py @@ -25,6 +25,9 @@ from pathlib import Path _original_run_api_server_worker_proc = None +# DLLM +# use monkey patch to add "import vllm_mindspore" +# aviod no initialing when use multi-apiserver def monkey_patch_server_run_api_server_worker_proc(*arg, **kwargs): import vllm_mindspore assert _original_run_api_server_worker_proc is not None diff --git a/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py b/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py index dfc8be5d4..e0e098854 100644 --- a/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py +++ b/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py @@ -57,6 +57,7 @@ from vllm_mindspore.model_executor.models.mf_models.mf_model_base import ( from vllm_mindspore.model_executor.models.model_base import MLAAttentionWrapper with contextlib.suppress(ImportError): + # DLLM # Need to apply dllm pd patch on vllm to use pd disagg related functions from vllm.attention.layer import maybe_save_kv_layer_to_connector @@ -192,6 +193,7 @@ class DeepseekV3ForCausalLM(MfModelBase): key_cache.append(k_cache) return mutable(key_cache), None + # DLLM def connector_send_kvcache(self): logger.debug("reached deepseek_v3 connector_send_kvcache") _pynative_executor.sync() diff --git a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py index 38e979569..f82d2d9f5 100644 --- a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py +++ b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py @@ -105,12 +105,14 @@ class MfModelBase(MsModelBase): raise NotImplementedError( "Function _create_network should be Implemented!") + # DLLM def is_decoder_task(self) -> bool: if self.kv_transfer_config is None: return False return self.kv_transfer_config.is_kv_consumer + # DLLM def is_prefill_task(self) -> bool: if self.kv_transfer_config is None: return False @@ -131,6 +133,7 @@ class MfModelBase(MsModelBase): def update_model_inputs(self, model_inputs, **kwargs): return model_inputs + # DLLM def connector_send_kvcache(self): logger.debug("reached connector_send_kvcache") _pynative_executor.sync() @@ -143,6 +146,7 @@ class MfModelBase(MsModelBase): v_cache = kv_cache.kv_cache[forward_context.virtual_engine][1] maybe_save_kv_layer_to_connector(str(i), (k_cache, v_cache)) + # DLLM def connector_wait_for_kv_layer(self): logger.debug("reached connector_wait_for_kv_layer") if not hasattr(self, 'mf_model_config'): @@ -170,6 +174,7 @@ class MfModelBase(MsModelBase): self.set_flags = True if kv_transfer_supported and is_v1_kv_transfer_group(): self.connector_send_kvcache() + # DLLM else: if kv_transfer_supported: if is_v1_kv_transfer_group() and self.is_prefill_task(): diff --git a/vllm_mindspore/utils.py b/vllm_mindspore/utils.py index b2615b467..496dacdc2 100644 --- a/vllm_mindspore/utils.py +++ b/vllm_mindspore/utils.py @@ -183,14 +183,19 @@ def is_mindone_model_backend(): == vllmModelBackendEnum.MIND_ONE) +# DLLM def register_connector(): try: from vllm.distributed.kv_transfer.kv_connector.factory import ( KVConnectorFactory) - + # use D2H for KVtransfer KVConnectorFactory.register_connector( "DLLMDsConnector", "dllm.dkvc.v1.dllm_ds_connector", "DLLMDsConnector") + # use D2D for KVtransfer + KVConnectorFactory.register_connector( + "DLLMDsD2DConnector", "dllm.dkvc.v1.dllm_ds_d2d_connector", + "DLLMDsD2DConnector") except: # noqa: E722 pass diff --git a/vllm_mindspore/v1/worker/gpu_worker.py b/vllm_mindspore/v1/worker/gpu_worker.py index ab3bd566a..2a49d5117 100644 --- a/vllm_mindspore/v1/worker/gpu_worker.py +++ b/vllm_mindspore/v1/worker/gpu_worker.py @@ -38,6 +38,7 @@ def init_device(self): config = get_current_vllm_config() if config is not None and config.parallel_config.data_parallel_size > 1: + # DLLM self.local_rank = (self.parallel_config.data_parallel_rank_local * self.parallel_config.world_size + self.local_rank) self.device = torch.device(f"cuda:{self.local_rank}") -- Gitee