diff --git a/vllm_mindspore/entrypoints/__main__.py b/vllm_mindspore/entrypoints/__main__.py index 921817617ec7ea87b59c88cb5abd58a37b0d3043..ada6e76f180c443c87b6c7eab09b1fd6dc04dd08 100644 --- a/vllm_mindspore/entrypoints/__main__.py +++ b/vllm_mindspore/entrypoints/__main__.py @@ -25,6 +25,9 @@ from pathlib import Path _original_run_api_server_worker_proc = None +# DLLM +# use monkey patch to add "import vllm_mindspore" +# aviod no initialing when use multi-apiserver def monkey_patch_server_run_api_server_worker_proc(*arg, **kwargs): import vllm_mindspore assert _original_run_api_server_worker_proc is not None diff --git a/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py b/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py index dfc8be5d4606e5d331a361a8a342fc019b925b4e..e0e0988545c23e60f93ea8ca979598fec76619a2 100644 --- a/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py +++ b/vllm_mindspore/model_executor/models/mf_models/deepseek_v3.py @@ -57,6 +57,7 @@ from vllm_mindspore.model_executor.models.mf_models.mf_model_base import ( from vllm_mindspore.model_executor.models.model_base import MLAAttentionWrapper with contextlib.suppress(ImportError): + # DLLM # Need to apply dllm pd patch on vllm to use pd disagg related functions from vllm.attention.layer import maybe_save_kv_layer_to_connector @@ -192,6 +193,7 @@ class DeepseekV3ForCausalLM(MfModelBase): key_cache.append(k_cache) return mutable(key_cache), None + # DLLM def connector_send_kvcache(self): logger.debug("reached deepseek_v3 connector_send_kvcache") _pynative_executor.sync() diff --git a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py index 38e979569ba05f18d5d48db1d4e5f9de794058e4..f82d2d9f5192485666e1c41879e4de54f690e05b 100644 --- a/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py +++ b/vllm_mindspore/model_executor/models/mf_models/mf_model_base.py @@ -105,12 +105,14 @@ class MfModelBase(MsModelBase): raise NotImplementedError( "Function _create_network should be Implemented!") + # DLLM def is_decoder_task(self) -> bool: if self.kv_transfer_config is None: return False return self.kv_transfer_config.is_kv_consumer + # DLLM def is_prefill_task(self) -> bool: if self.kv_transfer_config is None: return False @@ -131,6 +133,7 @@ class MfModelBase(MsModelBase): def update_model_inputs(self, model_inputs, **kwargs): return model_inputs + # DLLM def connector_send_kvcache(self): logger.debug("reached connector_send_kvcache") _pynative_executor.sync() @@ -143,6 +146,7 @@ class MfModelBase(MsModelBase): v_cache = kv_cache.kv_cache[forward_context.virtual_engine][1] maybe_save_kv_layer_to_connector(str(i), (k_cache, v_cache)) + # DLLM def connector_wait_for_kv_layer(self): logger.debug("reached connector_wait_for_kv_layer") if not hasattr(self, 'mf_model_config'): @@ -170,6 +174,7 @@ class MfModelBase(MsModelBase): self.set_flags = True if kv_transfer_supported and is_v1_kv_transfer_group(): self.connector_send_kvcache() + # DLLM else: if kv_transfer_supported: if is_v1_kv_transfer_group() and self.is_prefill_task(): diff --git a/vllm_mindspore/utils.py b/vllm_mindspore/utils.py index b2615b467df92900b39ae88f97941cdc94b8c9ec..496dacdc224e7a7178e707bdd1aee1c0724ce390 100644 --- a/vllm_mindspore/utils.py +++ b/vllm_mindspore/utils.py @@ -183,14 +183,19 @@ def is_mindone_model_backend(): == vllmModelBackendEnum.MIND_ONE) +# DLLM def register_connector(): try: from vllm.distributed.kv_transfer.kv_connector.factory import ( KVConnectorFactory) - + # use D2H for KVtransfer KVConnectorFactory.register_connector( "DLLMDsConnector", "dllm.dkvc.v1.dllm_ds_connector", "DLLMDsConnector") + # use D2D for KVtransfer + KVConnectorFactory.register_connector( + "DLLMDsD2DConnector", "dllm.dkvc.v1.dllm_ds_d2d_connector", + "DLLMDsD2DConnector") except: # noqa: E722 pass diff --git a/vllm_mindspore/v1/worker/gpu_worker.py b/vllm_mindspore/v1/worker/gpu_worker.py index ab3bd566a9ca7fd07ad632fa38a41ba70589a39d..2a49d51171ab9dfccb5d30b9d0cc51880131c84a 100644 --- a/vllm_mindspore/v1/worker/gpu_worker.py +++ b/vllm_mindspore/v1/worker/gpu_worker.py @@ -38,6 +38,7 @@ def init_device(self): config = get_current_vllm_config() if config is not None and config.parallel_config.data_parallel_size > 1: + # DLLM self.local_rank = (self.parallel_config.data_parallel_rank_local * self.parallel_config.world_size + self.local_rank) self.device = torch.device(f"cuda:{self.local_rank}")