diff --git a/24.1.RC1/spdk-21.01.patch b/24.1.RC1/spdk-21.01.patch new file mode 100644 index 0000000000000000000000000000000000000000..b14ed2bea0b4d1abbcb054e1e38ee431e33276e5 --- /dev/null +++ b/24.1.RC1/spdk-21.01.patch @@ -0,0 +1,19175 @@ +diff --git a/CONFIG b/CONFIG +index 92b5c97..f56a956 100644 +--- a/CONFIG ++++ b/CONFIG +@@ -74,7 +74,7 @@ CONFIG_TESTS=y + CONFIG_UNIT_TESTS=y + + # Build examples +-CONFIG_EXAMPLES=y ++CONFIG_EXAMPLES=n + + # Build with Control-flow Enforcement Technology (CET) + CONFIG_CET=n +@@ -117,6 +117,9 @@ CONFIG_RBD=n + # Build vhost library. + CONFIG_VHOST=y + ++# Build ssam library. ++CONFIG_SSAM=y ++ + # Build vhost initiator (Virtio) driver. + CONFIG_VIRTIO=y + +diff --git a/app/Makefile b/app/Makefile +index 8ff318d..9850d2c 100644 +--- a/app/Makefile ++++ b/app/Makefile +@@ -42,6 +42,7 @@ DIRS-y += iscsi_tgt + DIRS-y += spdk_tgt + DIRS-y += spdk_lspci + DIRS-y += spdk_top ++DIRS-y += ssam + ifeq ($(OS),Linux) + DIRS-$(CONFIG_VHOST) += vhost + DIRS-y += spdk_dd +diff --git a/app/ssam/Makefile b/app/ssam/Makefile +new file mode 100644 +index 0000000..94d114d +--- /dev/null ++++ b/app/ssam/Makefile +@@ -0,0 +1,58 @@ ++# ++# BSD LICENSE ++# ++# Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions ++# are met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above copyright ++# notice, this list of conditions and the following disclaimer in ++# the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Intel Corporation nor the names of its ++# contributors may be used to endorse or promote products derived ++# from this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++# ++ ++SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..) ++include $(SPDK_ROOT_DIR)/mk/spdk.common.mk ++include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk ++ ++APP = ssam ++ ++C_SRCS := ssam.c ++ ++SYS_LIBS += -lcap -lfuse3 ++SPDK_LIB_LIST = $(ALL_MODULES_LIST) event_ssam event ssam ++ ++ifeq ($(OS),Linux) ++SPDK_LIB_LIST += event_nbd ++endif ++ ++ifeq ($(SPDK_ROOT_DIR)/lib/env_dpdk,$(CONFIG_ENV)) ++SPDK_LIB_LIST += env_dpdk_rpc ++endif ++ ++include $(SPDK_ROOT_DIR)/mk/spdk.app.mk ++ ++install: $(APP) ++ $(INSTALL_APP) ++ ++uninstall: ++ $(UNINSTALL_APP) +diff --git a/app/ssam/ssam.c b/app/ssam/ssam.c +new file mode 100644 +index 0000000..ac1e108 +--- /dev/null ++++ b/app/ssam/ssam.c +@@ -0,0 +1,86 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "spdk/ssam.h" ++#include "spdk/string.h" ++ ++#define IOVA_MODE_PA "pa" ++ ++static void ++ssam_started(void *ctx) ++{ ++ spdk_ssam_poller_start(); ++ SPDK_NOTICELOG("%s server started.\n", SSAM_SERVER_NAME); ++} ++ ++int ++main(int argc, char *argv[]) ++{ ++ struct spdk_app_opts opts = {}; ++ int rc; ++ int shm_id; ++ ++ spdk_app_opts_init(&opts, sizeof(opts)); ++ opts.name = SSAM_SERVER_NAME; ++ opts.iova_mode = IOVA_MODE_PA; ++ opts.num_entries = 0; ++ ++ spdk_ssam_user_config_init(); ++ ++ shm_id = shm_open(SSAM_SHM, O_RDWR, SSAM_SHM_PERMIT); ++ if (shm_id < 0) { ++ SPDK_NOTICELOG("ssam share memory hasn't been created.\n"); ++ } else { ++ spdk_ssam_set_shm_created(true); ++ SPDK_NOTICELOG("ssam share memory has been created.\n"); ++ } ++ ++ rc = spdk_ssam_rc_preinit(); ++ if (rc < 0) { ++ exit(rc); ++ } ++ ++ rc = spdk_app_parse_args(argc, argv, &opts, NULL, NULL, NULL, NULL); ++ if (rc != SPDK_APP_PARSE_ARGS_SUCCESS) { ++ SPDK_ERRLOG("spdk app parse args fail: %d\n", rc); ++ exit(rc); ++ } ++ ++ /* Blocks until the application is exiting */ ++ rc = spdk_app_start(&opts, ssam_started, NULL); ++ spdk_ssam_exit(); ++ ++ spdk_app_fini(); ++ SPDK_NOTICELOG("%s server exited.\n", SSAM_SERVER_NAME); ++ ++ return rc; ++} +diff --git a/configure b/configure +index 723bc45..923e006 100644 +--- a/configure ++++ b/configure +@@ -97,6 +97,7 @@ function usage() + echo " No path required." + echo " raid5 Build with bdev_raid module RAID5 support." + echo " No path required." ++ echo " ssam Support to build ssam for DPU storage accel." + echo "" + echo "Environment variables:" + echo "" +@@ -419,6 +420,15 @@ for i in "$@"; do + --without-fuse) + CONFIG[FUSE]=n + ;; ++ --with-ssam) ++ CONFIG[SSAM]=y ++ ;; ++ --without-ssam) ++ CONFIG[SSAM]=n ++ ;; ++ --with-ssam-only) ++ CONFIG[SSAM_ONLY]=y ++ ;; + --with-nvme-cuse) + CONFIG[NVME_CUSE]=y + ;; +@@ -784,6 +794,13 @@ if [[ "${CONFIG[FUSE]}" = "y" ]]; then + fi + fi + ++if [[ "${CONFIG[SSAM_ONLY]}" = "y" ]]; then ++ if [[ "${CONFIG[SSAM]}" = "n" ]]; then ++ echo "--with-ssam-only requires --with-ssam." ++ exit 1 ++ fi ++fi ++ + if [ "${CONFIG[CET]}" = "y" ]; then + if ! echo -e 'int main(void) { return 0; }\n' | ${BUILD_CMD[@]} -fcf-protection - 2>/dev/null; then + echo --enable-cet requires compiler/linker that supports CET. +diff --git a/include/spdk/event.h b/include/spdk/event.h +index f757b33..cc55752 100644 +--- a/include/spdk/event.h ++++ b/include/spdk/event.h +@@ -232,6 +232,8 @@ void spdk_app_stop(int rc); + */ + int spdk_app_get_shm_id(void); + ++bool spdk_get_shutdown_sig_received(void); ++ + /** + * Convert a string containing a CPU core mask into a bitmask + * +diff --git a/include/spdk/ssam.h b/include/spdk/ssam.h +new file mode 100644 +index 0000000..4449a06 +--- /dev/null ++++ b/include/spdk/ssam.h +@@ -0,0 +1,254 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef SSAM_H ++#define SSAM_H ++ ++#include ++ ++#include "spdk/stdinc.h" ++#include "spdk/cpuset.h" ++#include "spdk/json.h" ++#include "spdk/thread.h" ++#include "spdk/event.h" ++ ++#include "../../lib/ssam/ssam_driver/dpak_ssam.h" ++ ++#ifdef DEBUG ++#define ASSERT(f) assert(f) ++#else ++#define ASSERT(f) ((void)0) ++#endif ++ ++#define SPDK_INVALID_TID UINT16_MAX ++#define SPDK_SESSION_TYPE_MAX_LEN 64 ++ ++#define SPDK_SESSION_TYPE_BLK "blk" ++#define SPDK_SESSION_TYPE_SCSI "scsi" ++#define SPDK_SESSION_TYPE_FS "fs" ++ ++#define SSAM_SHM "ssam_shm" ++#define SSAM_SHM_PERMIT 0640 ++#define SSAM_STORAGE_READY_FILE "/proc/sdi_storage/storage_ready" ++ ++enum virtio_type { ++ VIRTIO_TYPE_UNKNOWN = 0, ++ VIRTIO_TYPE_BLK = (1U << 0), ++ VIRTIO_TYPE_SCSI = (1U << 1), ++ VIRTIO_TYPE_FS = (1U << 2), ++}; ++ ++/** ++ * ssam subsystem init callback ++ * ++ * \param rc The preceding processing result, ++ * 0 on success, negative errno on error. ++ */ ++typedef void (*ssam_init_cb)(int rc); ++ ++/** ++ * ssam subsystem fini callback ++ */ ++typedef void (*ssam_fini_cb)(void); ++ ++/** ++ * ssam dump config json ++ */ ++void spdk_ssam_config_json(struct spdk_json_write_ctx *w); ++ ++/** ++ * Check if ssam support the global vf id. ++ * ++ * \param gfunc_id ssam global vf id. ++ * ++ * \return -EINVAL indicate gfunc_id invalid, -ENODEV indicate no such vf or ++ * 0 indicate gfunc_id valid. ++ */ ++int ssam_check_gfunc_id(uint16_t gfunc_id); ++ ++/** ++ * Find a ssam session by global vf id. ++ * ++ * \param gfunc_id ssam global vf id. ++ * ++ * \return ssam session or NULL indicate not find. ++ */ ++struct spdk_ssam_session *ssam_session_find(uint16_t gfunc_id); ++ ++/** ++ * Get gfunc id by controller name. ++ * ++ * \param name controller name. ++ * ++ * \return gfunc id or SPDK_INVALID_GFUNC_ID gfunc id not find. ++ */ ++uint16_t ssam_get_gfunc_id_by_name(char *name); ++ ++/** ++ * Get the next ssam device. If there's no more devices to iterate ++ * through, NULL will be returned. ++ * ++ * \param smdev ssam device. If NULL, this function will return the ++ * very first device. ++ * ++ * \return smdev ssam device or NULL indicate no more devices ++ */ ++struct spdk_ssam_dev *ssam_dev_next(const struct spdk_ssam_dev *smdev); ++ ++/** ++ * Lock the global ssam mutex synchronizing all the ssam device accesses. ++ */ ++void ssam_lock(void); ++ ++/** ++ * Lock the global ssam mutex synchronizing all the ssam device accesses. ++ * ++ * \return 0 if the mutex could be locked immediately, negative errno otherwise. ++ */ ++int ssam_trylock(void); ++ ++/** ++ * Unlock the global ssam mutex. ++ */ ++void ssam_unlock(void); ++ ++/** ++ * \param smsession ssam session. ++ * \param arg user-provided parameter. ++ * ++ * \return 0 on success, negative if failed ++ */ ++typedef int (*spdk_ssam_session_fn)(struct spdk_ssam_session *smsession, void **arg); ++ ++/** ++ * \param smsession ssam session. ++ * \param arg user-provided parameter. ++ */ ++typedef void (*spdk_ssam_session_cpl_fn)(struct spdk_ssam_session *smsession, void **arg); ++ ++/** ++ * \param arg user-provided parameter. ++ * \param rsp spdk_ssam_session_fn call back response value, 0 success, negative if failed. ++ */ ++typedef void (*spdk_ssam_session_rsp_fn)(void *arg, int rsp); ++ ++struct spdk_ssam_session_reg_info { ++ char type_name[SPDK_SESSION_TYPE_MAX_LEN]; ++ spdk_ssam_session_rsp_fn rsp_fn; ++ void *rsp_ctx; ++ uint16_t gfunc_id; ++ uint16_t tid; ++ uint16_t queues; ++ const struct spdk_ssam_session_backend *backend; ++ uint32_t session_ctx_size; ++ char *name; ++ char *dbdf; ++}; ++ ++struct ssam_fs_construct_info { ++ uint16_t gfunc_id; ++ uint16_t max_threads; ++ char *fs_name; ++ char *name; ++ char *dbdf; ++}; ++ ++/** ++ * Construct a ssam blk device. This will create a ssam ++ * blk device and then create a session. Creating the smdev will ++ * start an I/O poller and hog a CPU. If already exist a ssam ++ * blk device, then it will only create a session to this device. ++ * All sessions in the same device share one I/O poller and one CPU. ++ * ssam blk device is tightly associated with given SPDK bdev. ++ * Given bdev can not be changed, unless it has been hotremoved. This ++ * would result in all I/O failing with virtio VIRTIO_BLK_S_IOERR ++ * error code. ++ * ++ * This function is thread-safe. ++ * ++ * \param info session register information. ++ * \param dev_name bdev name to associate with this vhost device ++ * \param readonly if set, all writes to the device will fail with ++ * VIRTIO_BLK_S_IOERR error code. ++ * \param serial means volume id. ++ * ++ * \return 0 on success, negative errno on error. ++ */ ++int ssam_blk_construct(struct spdk_ssam_session_reg_info *info, ++ const char *dev_name, bool readonly, char *serial); ++ ++/** ++ * ssam user config init. ++ */ ++void spdk_ssam_user_config_init(void); ++ ++/** ++ * ssam get tid which has minimum device. ++ */ ++uint16_t ssam_get_tid(void); ++ ++uint32_t ssam_get_tids(uint16_t max_threads); ++ ++void spdk_ssam_exit(void); ++ ++void spdk_ssam_subsystem_fini(ssam_fini_cb fini_cb); ++ ++void spdk_ssam_subsystem_init(ssam_init_cb init_cb); ++ ++int ssam_scsi_construct(struct spdk_ssam_session_reg_info *info); ++ ++int ssam_scsi_dev_add_tgt(struct spdk_ssam_session *smsession, int target_num, ++ const char *bdev_name); ++ ++int ssam_scsi_dev_remove_tgt(struct spdk_ssam_session *smsession, ++ unsigned scsi_tgt_num, spdk_ssam_session_rsp_fn cb_fn, void *cb_arg); ++ ++void spdk_ssam_set_shm_created(bool shm_created); ++ ++bool spdk_ssam_get_shm_created(void); ++ ++void spdk_ssam_poller_start(void); ++ ++void ssam_deinit_device_pcie_list(void); ++ ++int ssam_init_device_pcie_list(void); ++ ++void ssam_dump_device_pcie_list(struct spdk_json_write_ctx *w); ++ ++uint32_t ssam_get_device_pcie_list_size(void); ++ ++int ssam_fs_construct(struct ssam_fs_construct_info *info); ++ ++int ssam_fs_destory(char *name, bool force, void *request, ++ spdk_ssam_session_rsp_fn rpc_ssam_send_response_cb); ++ ++#endif /* SSAM_H */ +diff --git a/lib/Makefile b/lib/Makefile +index eab297e..1cb7f87 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -45,6 +45,7 @@ endif + DIRS-$(CONFIG_OCF) += env_ocf + DIRS-$(CONFIG_IDXD) += idxd + DIRS-$(CONFIG_VHOST) += vhost ++DIRS-$(CONFIG_SSAM) += ssam + DIRS-$(CONFIG_VIRTIO) += virtio + DIRS-$(CONFIG_REDUCE) += reduce + DIRS-$(CONFIG_RDMA) += rdma +diff --git a/lib/bdev/bdev.c b/lib/bdev/bdev.c +index 191520d..934571f 100644 +--- a/lib/bdev/bdev.c ++++ b/lib/bdev/bdev.c +@@ -49,6 +49,7 @@ + #include "spdk/bdev_module.h" + #include "spdk/log.h" + #include "spdk/string.h" ++#include "spdk/event.h" + + #include "bdev_internal.h" + +@@ -2568,6 +2569,7 @@ bdev_channel_destroy_resource(struct spdk_bdev_channel *ch) + { + struct spdk_bdev_shared_resource *shared_resource; + struct lba_range *range; ++ struct spdk_bdev_io *bdev_io, *tmp; + + while (!TAILQ_EMPTY(&ch->locked_ranges)) { + range = TAILQ_FIRST(&ch->locked_ranges); +@@ -2578,6 +2580,11 @@ bdev_channel_destroy_resource(struct spdk_bdev_channel *ch) + spdk_put_io_channel(ch->channel); + + shared_resource = ch->shared_resource; ++ ch->shared_resource = NULL; ++ ++ TAILQ_FOREACH_SAFE(bdev_io, &ch->io_submitted, internal.ch_link, tmp) { ++ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED); ++ } + + assert(TAILQ_EMPTY(&ch->io_locked)); + assert(TAILQ_EMPTY(&ch->io_submitted)); +@@ -5183,6 +5190,15 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta + struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch; + struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource; + ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the bdev buf memory may have been released. ++ * Therefore, do not need to continue. ++ */ ++ return; ++ } ++ + bdev_io->internal.status = status; + + if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_RESET)) { +diff --git a/lib/event/app.c b/lib/event/app.c +index 03f9692..15b0826 100644 +--- a/lib/event/app.c ++++ b/lib/event/app.c +@@ -81,6 +81,11 @@ spdk_app_get_shm_id(void) + return g_spdk_app.shm_id; + } + ++bool spdk_get_shutdown_sig_received(void) ++{ ++ return g_shutdown_sig_received; ++} ++ + /* append one empty option to indicate the end of the array */ + static const struct option g_cmdline_options[] = { + #define CONFIG_FILE_OPT_IDX 'c' +diff --git a/lib/event/json_config.c b/lib/event/json_config.c +index 67890de..5e1b959 100644 +--- a/lib/event/json_config.c ++++ b/lib/event/json_config.c +@@ -355,6 +355,15 @@ app_json_config_load_subsystem_config_entry(void *_ctx) + size_t params_len = 0; + int rc; + ++ if (spdk_get_shutdown_sig_received()) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * rpc and thread may have been released. ++ * Therefore, dont continue. ++ */ ++ return; ++ } ++ + if (ctx->config_it == NULL) { + SPDK_DEBUG_APP_CFG("Subsystem '%.*s': configuration done.\n", ctx->subsystem_name->len, + (char *)ctx->subsystem_name->start); +diff --git a/lib/event/spdk_event.map b/lib/event/spdk_event.map +index 9a4ba56..54a6d73 100644 +--- a/lib/event/spdk_event.map ++++ b/lib/event/spdk_event.map +@@ -40,6 +40,7 @@ + spdk_subsystem_config_json; + spdk_rpc_initialize; + spdk_rpc_finish; ++ spdk_get_shutdown_sig_received; + + local: *; + }; +diff --git a/lib/scsi/lun.c b/lib/scsi/lun.c +index fef179e..7264f10 100644 +--- a/lib/scsi/lun.c ++++ b/lib/scsi/lun.c +@@ -38,6 +38,8 @@ + #include "spdk/thread.h" + #include "spdk/util.h" + #include "spdk/likely.h" ++#include "spdk/event.h" ++#include "spdk/bdev_module.h" + + static void scsi_lun_execute_tasks(struct spdk_scsi_lun *lun); + static void _scsi_lun_execute_mgmt_task(struct spdk_scsi_lun *lun); +@@ -352,6 +354,16 @@ _scsi_lun_hot_remove(void *arg1) + { + struct spdk_scsi_lun *lun = arg1; + ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the task and bdev_io memory may have been released. ++ * Therefore, outstanding task are not executed in this scenario. ++ */ ++ scsi_lun_notify_hot_remove(lun); ++ return; ++ } ++ + /* If lun->removed is set, no new task can be submitted to the LUN. + * Execute previously queued tasks, which will be immediately aborted. + */ +diff --git a/lib/ssam/Makefile b/lib/ssam/Makefile +new file mode 100644 +index 0000000..23487d9 +--- /dev/null ++++ b/lib/ssam/Makefile +@@ -0,0 +1,54 @@ ++# ++# BSD LICENSE ++# ++# Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions ++# are met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above copyright ++# notice, this list of conditions and the following disclaimer in ++# the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Intel Corporation nor the names of its ++# contributors may be used to endorse or promote products derived ++# from this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++# ++ ++SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..) ++include $(SPDK_ROOT_DIR)/mk/spdk.common.mk ++ ++SO_VER := 1 ++SO_MINOR := 0 ++ ++CFLAGS += -I. -I../../dpdk/lib/eal/common ++CFLAGS += $(ENV_CFLAGS) ++ ++C_SRCS = ssam.c ssam_blk.c ssam_rpc.c \ ++ ssam_config.c ssam_scsi.c ssam_malloc.c ssam_device_pcie.c \ ++ ssam_fs.c ssam_fuse_adapter.c ++C_SRCS += ssam_driver/ssam_driver.c ++C_SRCS += ssam_driver/ssam_dbdf.c ++C_SRCS += ssam_driver/ssam_mempool.c ++C_SRCS += ssam_driver/ssam_driver_adapter.c ++ ++LIBNAME = ssam ++ ++SPDK_MAP_FILE = $(abspath $(CURDIR)/spdk_ssam.map) ++ ++include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk +diff --git a/lib/ssam/spdk_ssam.map b/lib/ssam/spdk_ssam.map +new file mode 100644 +index 0000000..9bef6f9 +--- /dev/null ++++ b/lib/ssam/spdk_ssam.map +@@ -0,0 +1,16 @@ ++{ ++ global: ++ ++ # public functions ++ spdk_ssam_user_config_init; ++ spdk_ssam_exit; ++ spdk_ssam_subsystem_fini; ++ spdk_ssam_subsystem_init; ++ spdk_ssam_config_json; ++ spdk_ssam_set_shm_created; ++ spdk_ssam_get_shm_created; ++ spdk_ssam_poller_start; ++ spdk_ssam_rc_preinit; ++ ++ local: *; ++}; +diff --git a/lib/ssam/ssam.c b/lib/ssam/ssam.c +new file mode 100644 +index 0000000..e9e5bc7 +--- /dev/null ++++ b/lib/ssam/ssam.c +@@ -0,0 +1,1833 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++ ++#include "spdk/scsi_spec.h" ++#include "spdk/scsi.h" ++#include "spdk/stdinc.h" ++#include "spdk/env.h" ++#include "spdk/likely.h" ++#include "spdk/string.h" ++#include "spdk/util.h" ++#include "spdk/memory.h" ++#include "spdk/barrier.h" ++#include "spdk/bdev_module.h" ++#include "spdk/bdev.h" ++#include "spdk/endian.h" ++ ++#include "ssam_internal.h" ++ ++#define SSAM_PF_NUM_MAX_VAL 31 ++#define SSAM_PF_PLUS_VF_NUM_MAX_VAL 4096 ++#define SSAM_CPU_NUM_MAX 24 ++ ++#define INQUIRY_OFFSET(field) \ ++ offsetof(struct spdk_scsi_cdb_inquiry_data, field) + \ ++ sizeof(((struct spdk_scsi_cdb_inquiry_data *)0x0)->field) ++ ++#define IO_STUCK_TIMEOUT 120 ++#define SEND_EVENT_WAIT_TIME 10 ++#define VMIO_TYPE_VIRTIO_SCSI_CTRL 4 ++#define DEVICE_READY_TIMEOUT 15 ++#define DEVICE_READY_WAIT_TIME 100000 ++ ++bool g_ssam_subsystem_exit = false; ++ ++struct ssam_event_user_ctx { ++ bool session_freed; /* true if session has been freed */ ++ bool async_done; /* true if session event done */ ++ void *ctx; /* store user context pointer */ ++}; ++ ++struct ssam_session_fn_ctx { ++ /* Device session pointer obtained before enqueuing the event */ ++ struct spdk_ssam_session *smsession; ++ ++ spdk_ssam_session_rsp_fn *rsp_fn; ++ ++ void *rsp_ctx; ++ ++ /* User provided function to be executed on session's thread. */ ++ spdk_ssam_session_fn cb_fn; ++ /** ++ * User provided function to be called on the init thread ++ * after iterating through all sessions. ++ */ ++ spdk_ssam_session_cpl_fn cpl_fn; ++ ++ /* Custom user context */ ++ struct ssam_event_user_ctx user_ctx; ++ ++ /* Session start event time */ ++ uint64_t start_tsc; ++ ++ bool need_async; ++ ++ int rsp; ++}; ++ ++/* ssam total infomation */ ++struct spdk_ssam_info { ++ ssam_mempool_t *mp[SSAM_MAX_CORE_NUM]; ++}; ++ ++static struct spdk_ssam_info g_ssam_info; ++ ++/* Thread performing all ssam management operations */ ++static struct spdk_thread *g_ssam_init_thread; ++ ++static TAILQ_HEAD(, spdk_ssam_dev) g_ssam_devices = ++ TAILQ_HEAD_INITIALIZER(g_ssam_devices); ++ ++static pthread_mutex_t g_ssam_mutex = PTHREAD_MUTEX_INITIALIZER; ++ ++/* Save cpu mask when ssam management thread started */ ++static struct spdk_cpuset g_ssam_core_mask; ++ ++/* Call back when ssam_fini complete */ ++static ssam_fini_cb g_ssam_fini_cpl_cb; ++ ++static int ssam_init(void); ++ ++static int ++ssam_sessions_init(struct spdk_ssam_session ***smsession) ++{ ++ *smsession = (struct spdk_ssam_session **)calloc( ++ SSAM_MAX_SESSION_PER_DEV, sizeof(struct spdk_ssam_session *)); ++ if (*smsession == NULL) { ++ SPDK_ERRLOG("calloc sessions failed\n"); ++ return -ENOMEM; ++ } ++ return 0; ++} ++ ++static int ++ssam_sessions_insert(struct spdk_ssam_session **smsessions, struct spdk_ssam_session *smsession) ++{ ++ uint16_t i = smsession->gfunc_id; ++ ++ if (smsessions[i] != NULL) { ++ SPDK_ERRLOG("smsessions already have such sesseion\n"); ++ return -ENOSPC; ++ } ++ ++ smsessions[i] = smsession; ++ ++ return 0; ++} ++ ++void ++ssam_sessions_remove(struct spdk_ssam_session **smsessions, struct spdk_ssam_session *smsession) ++{ ++ uint16_t i = smsession->gfunc_id; ++ ++ if (smsessions[i] == NULL) { ++ SPDK_WARNLOG("smsessions no such sesseion\n"); ++ return; ++ } ++ ++ smsessions[i] = NULL; ++ return; ++} ++ ++static struct spdk_ssam_session * ++ssam_sessions_first(int begin, struct spdk_ssam_session **smsessions) ++{ ++ int i; ++ ++ for (i = begin; i < SSAM_MAX_SESSION_PER_DEV; i++) { ++ if (smsessions[i] != NULL) { ++ return smsessions[i]; ++ } ++ } ++ return NULL; ++} ++ ++bool ++ssam_sessions_empty(struct spdk_ssam_session **smsessions) ++{ ++ struct spdk_ssam_session *session; ++ ++ session = ssam_sessions_first(0, smsessions); ++ if (session == NULL) { ++ return true; ++ } ++ ++ return false; ++} ++ ++struct spdk_ssam_session * ++ssam_sessions_next(struct spdk_ssam_session **smsessions, struct spdk_ssam_session *smsession) ++{ ++ if (smsession == NULL) { ++ return ssam_sessions_first(0, smsessions); ++ } ++ if (smsession->gfunc_id == SSAM_MAX_SESSION_PER_DEV) { ++ return NULL; ++ } ++ return ssam_sessions_first(smsession->gfunc_id + 1, smsessions); ++} ++ ++void ++ssam_session_insert_io_wait(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_session_io_wait *io_wait) ++{ ++ TAILQ_INSERT_TAIL(&smsession->smdev->io_wait_queue, io_wait, link); ++ smsession->smdev->io_wait_cnt++; ++} ++ ++static void ++ssam_session_remove_io_wait(struct spdk_ssam_dev *smdev, ++ struct spdk_ssam_session_io_wait *session_io_wait) ++{ ++ TAILQ_REMOVE(&smdev->io_wait_queue, session_io_wait, link); ++ smdev->io_wait_cnt--; ++} ++ ++void ++ssam_session_insert_io_wait_r(struct spdk_ssam_dev *smdev, ++ struct spdk_ssam_session_io_wait_r *io_wait_r) ++{ ++ TAILQ_INSERT_TAIL(&smdev->io_wait_queue_r, io_wait_r, link); ++ smdev->io_wait_r_cnt++; ++} ++ ++static void ++ssam_session_remove_io_wait_r(struct spdk_ssam_dev *smdev, ++ struct spdk_ssam_session_io_wait_r *session_io_wait_r) ++{ ++ TAILQ_REMOVE(&smdev->io_wait_queue_r, session_io_wait_r, link); ++ smdev->io_wait_r_cnt--; ++} ++ ++void ++ssam_session_destroy(struct spdk_ssam_session *smsession) ++{ ++ if (smsession == NULL || smsession->smdev == NULL) { ++ return; ++ } ++ /* Remove smsession from the queue in advance to prevent access by the poller thread. */ ++ if (!ssam_sessions_empty(smsession->smdev->smsessions)) { ++ ssam_sessions_remove(smsession->smdev->smsessions, smsession); ++ } ++ /* The smdev poller is not deleted here, but at the end of the app. */ ++} ++ ++uint64_t ++ssam_get_diff_tsc(uint64_t tsc) ++{ ++ return spdk_get_ticks() - tsc; ++} ++ ++int ++ssam_check_gfunc_id(uint16_t gfunc_id) ++{ ++ enum ssam_device_type type; ++ ++ if (gfunc_id == SPDK_INVALID_GFUNC_ID) { ++ SPDK_ERRLOG("Check gfunc_id(%u) error\n", gfunc_id); ++ return -EINVAL; ++ } ++ ++ type = ssam_get_virtio_type(gfunc_id); ++ if (type >= SSAM_DEVICE_VIRTIO_MAX) { ++ SPDK_ERRLOG("Check gfunc_id(%u) virtio type(%d) error\n", gfunc_id, type); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++/* Find a tid which has minimum device */ ++static uint16_t ++ssam_get_min_payload_tid(uint16_t cpu_num) ++{ ++ if (cpu_num == 0) { ++ return SPDK_INVALID_TID; ++ } ++ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ /* All tid have been used, find a tid which has minimum device */ ++ uint32_t min = UINT32_MAX; ++ uint16_t tid = 0; ++ ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ if (smdev->active_session_num < min) { ++ min = smdev->active_session_num; ++ tid = smdev->tid; ++ } ++ } ++ ++ return tid; ++} ++ ++/* Get a tid number */ ++uint16_t ++ssam_get_tid(void) ++{ ++ uint32_t cpu_num; ++ ++ cpu_num = spdk_cpuset_count(&g_ssam_core_mask); ++ if ((cpu_num == 0) || (cpu_num > UINT16_MAX)) { ++ /* If cpu_num > UINT16_MAX, the result of tid will overflow */ ++ SPDK_ERRLOG("CPU num %u not valid.\n", cpu_num); ++ return SPDK_INVALID_TID; ++ } ++ ++ return ssam_get_min_payload_tid((uint16_t)cpu_num); ++} ++ ++struct spdk_ssam_dev_info { ++ uint32_t active_session_num; ++ uint32_t tids; ++}; ++ ++static int compare(const void *a, const void *b) ++{ ++ const struct spdk_ssam_dev_info *dev1 = (const struct spdk_ssam_dev_info *)a; ++ const struct spdk_ssam_dev_info *dev2 = (const struct spdk_ssam_dev_info *)b; ++ return dev1->active_session_num - dev2->active_session_num; ++} ++ ++static uint32_t ++ssam_get_min_payload_tids(uint16_t cpu_num, uint16_t thread_num) ++{ ++ int real_thread_num = thread_num < cpu_num ? thread_num : cpu_num; ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ /* All tid have been used, find a tid which has minimum device */ ++ struct spdk_ssam_dev_info smdevs[cpu_num]; ++ int count = 0; ++ uint32_t mask = 0; ++ ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ smdevs[count].active_session_num = smdev->active_session_num; ++ smdevs[count].tids = smdev->tid; ++ count++; ++ } ++ ++ qsort(smdevs, cpu_num, sizeof(struct spdk_ssam_dev_info), compare); ++ ++ for (uint16_t i = 0; i < real_thread_num; i++) { ++ mask |= (1U << smdevs[i].tids); ++ } ++ ++ return mask; ++} ++ ++uint32_t ++ssam_get_tids(uint16_t thread_num) ++{ ++ uint32_t cpu_num; ++ ++ cpu_num = ssam_get_core_num(); ++ if ((cpu_num == 0) || (cpu_num > SSAM_CPU_NUM_MAX) || (thread_num == 0)) { ++ SPDK_ERRLOG("CPU num %u or thread num %u not valid.\n", cpu_num, thread_num); ++ return SPDK_INVALID_TID; ++ } ++ ++ return ssam_get_min_payload_tids((uint16_t)cpu_num, thread_num); ++} ++ ++ ++void ++ssam_lock(void) ++{ ++ pthread_mutex_lock(&g_ssam_mutex); ++} ++ ++int ++ssam_trylock(void) ++{ ++ return pthread_mutex_trylock(&g_ssam_mutex); ++} ++ ++void ++ssam_unlock(void) ++{ ++ pthread_mutex_unlock(&g_ssam_mutex); ++} ++ ++static struct spdk_ssam_session * ++ssam_session_find_in_dev(const struct spdk_ssam_dev *smdev, ++ uint16_t gfunc_id) ++{ ++ return smdev->smsessions[gfunc_id]; ++} ++ ++void ++ssam_dump_info_json(struct spdk_ssam_dev *smdev, uint16_t gfunc_id, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ ++ if (gfunc_id == UINT16_MAX) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ bool is_smsession_exit = 0; ++ while (smsession != NULL) { ++ is_smsession_exit = 1; ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ if (is_smsession_exit == 0) { ++ return; ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "ctrlr", ssam_dev_get_name(smdev)); ++ spdk_json_write_named_string_fmt(w, "cpumask", "0x%s", ++ spdk_cpuset_fmt(spdk_thread_get_cpumask(smdev->thread))); ++ spdk_json_write_named_uint32(w, "session_num", (uint32_t)smdev->active_session_num); ++ spdk_json_write_named_object_begin(w, "backend_specific"); ++ spdk_json_write_named_array_begin(w, "session"); ++ while (smsession != NULL) { ++ smsession->backend->dump_info_json(smsession, w); ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ spdk_json_write_array_end(w); ++ spdk_json_write_object_end(w); ++ spdk_json_write_object_end(w); ++ } else { ++ smsession = ssam_session_find_in_dev(smdev, gfunc_id); ++ while (smsession != NULL) { ++ if (smsession->gfunc_id == gfunc_id) { ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "ctrlr", ssam_dev_get_name(smdev)); ++ spdk_json_write_named_string_fmt(w, "cpumask", "0x%s", ++ spdk_cpuset_fmt(spdk_thread_get_cpumask(smdev->thread))); ++ spdk_json_write_named_uint32(w, "session_num", (uint32_t)smdev->active_session_num); ++ spdk_json_write_named_object_begin(w, "backend_specific"); ++ spdk_json_write_named_array_begin(w, "session"); ++ smsession->backend->dump_info_json(smsession, w); ++ spdk_json_write_array_end(w); ++ spdk_json_write_object_end(w); ++ spdk_json_write_object_end(w); ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ } ++ ++} ++ ++const char * ++ssam_dev_get_name(const struct spdk_ssam_dev *smdev) ++{ ++ if (!smdev) { ++ return ""; ++ } ++ return smdev->name; ++} ++ ++const char * ++ssam_session_get_name(const struct spdk_ssam_session *smsession) ++{ ++ if (!smsession) { ++ return ""; ++ } ++ return smsession->name; ++} ++ ++struct spdk_ssam_dev * ++ssam_dev_next(const struct spdk_ssam_dev *smdev) ++{ ++ if (smdev == NULL) { ++ return TAILQ_FIRST(&g_ssam_devices); ++ } ++ ++ return TAILQ_NEXT(smdev, tailq); ++} ++ ++struct spdk_ssam_session * ++ssam_session_find(uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ smsession = ssam_session_find_in_dev(smdev, gfunc_id); ++ if (smsession != NULL) { ++ return smsession; ++ } ++ } ++ ++ return NULL; ++} ++ ++uint16_t ++ssam_get_gfunc_id_by_name(char *name) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ uint16_t gfunc_id; ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ if (smdev != NULL && smdev->active_session_num > 0) { ++ for (gfunc_id = 0; gfunc_id <= SSAM_PF_NUM_MAX_VAL; gfunc_id++) { ++ smsession = ssam_session_find_in_dev(smdev, gfunc_id); ++ if (smsession != NULL && strcmp(name, smsession->name) == 0) { ++ return gfunc_id; ++ } ++ } ++ } ++ } ++ ++ return SPDK_INVALID_GFUNC_ID; ++} ++ ++static struct spdk_ssam_dev * ++ssam_dev_find(uint16_t tid) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ if (smdev->tid == tid) { ++ return smdev; ++ } ++ } ++ ++ return NULL; ++} ++ ++int ++ssam_mount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id) ++{ ++ uint16_t gfunc_id = smsession->gfunc_id; ++ uint16_t tid = smsession->smdev->tid; ++ ++ return ssam_function_mount(gfunc_id, lun_id, SSAM_MOUNT_NORMAL, tid); ++} ++ ++int ++ssam_umount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id) ++{ ++ int rc; ++ ++ rc = ssam_function_umount(smsession->gfunc_id, lun_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: function umount failed when add scsi tgt, %d.\n", smsession->name, rc); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_remount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id) ++{ ++ return ssam_function_mount(smsession->gfunc_id, lun_id, SSAM_MOUNT_NORMAL, smsession->smdev->tid); ++} ++ ++static int ++ssam_remove_session(struct spdk_ssam_session *smsession) ++{ ++ int rc; ++ ++ if (smsession->backend->remove_session != NULL) { ++ rc = smsession->backend->remove_session(smsession); ++ if (rc != 0) { ++ SPDK_ERRLOG("session: %s can not be removed, task cnt %d.\n", ++ smsession->name, smsession->task_cnt); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_dev_thread_exit(void *unused) ++{ ++ (void)unused; ++ spdk_thread_exit(spdk_get_thread()); ++} ++ ++static uint32_t ++ssam_tid_to_cpumask(uint16_t tid, struct spdk_cpuset *cpumask) ++{ ++ uint32_t core; ++ uint32_t lcore; ++ uint32_t cnt; ++ ++ for (lcore = 0, cnt = 0; lcore < SPDK_CPUSET_SIZE - 1; lcore++) { ++ if (spdk_cpuset_get_cpu(&g_ssam_core_mask, lcore)) { ++ if (cnt == tid) { ++ core = lcore; ++ spdk_cpuset_set_cpu(cpumask, core, true); ++ return lcore; ++ } ++ cnt++; ++ } ++ } ++ ++ return SPDK_CPUSET_SIZE; ++} ++ ++void ++ssam_session_start_done(struct spdk_ssam_session *smsession, int response) ++{ ++ if (response == 0) { ++ if (smsession->smdev->active_session_num == UINT32_MAX) { ++ SPDK_ERRLOG("smsession %s: active session num reached upper limit %u\n", ++ smsession->name, smsession->smdev->active_session_num); ++ return; ++ } ++ smsession->smdev->active_session_num++; ++ } ++} ++ ++void ++ssam_set_session_be_freed(void **ctx) ++{ ++ struct ssam_event_user_ctx *_ctx; ++ ++ if (ctx == NULL) { ++ return; ++ } ++ ++ _ctx = SPDK_CONTAINEROF(ctx, struct ssam_event_user_ctx, ctx); ++ _ctx->session_freed = true; ++} ++ ++void ++ssam_send_event_async_done(void **ctx) ++{ ++ struct ssam_event_user_ctx *_ctx; ++ ++ if (ctx == NULL) { ++ return; ++ } ++ ++ _ctx = SPDK_CONTAINEROF(ctx, struct ssam_event_user_ctx, ctx); ++ _ctx->async_done = true; ++} ++ ++void ++ssam_session_stop_done(struct spdk_ssam_session *smsession, int rsp, void **ctx) ++{ ++ if (rsp == 0) { ++ if (smsession->smdev->active_session_num > 0) { ++ smsession->smdev->active_session_num--; ++ } else { ++ SPDK_ERRLOG("smsession %s: active session num reached lower limit %u\n", ++ smsession->name, smsession->smdev->active_session_num); ++ } ++ } ++ /* Smdev cannot be free here */ ++ ++ /* Stop process need async */ ++ ssam_send_event_async_done(ctx); ++} ++ ++void ++ssam_session_unreg_response_cb(struct spdk_ssam_session *smsession) ++{ ++ smsession->rsp_fn = NULL; ++ smsession->rsp_ctx = NULL; ++} ++ ++static int ++ssam_dev_create_register(struct spdk_ssam_dev *smdev, uint16_t tid) ++{ ++ char name[NAME_MAX]; ++ struct spdk_cpuset cpumask; ++ int rc; ++ ++ smdev->tid = tid; ++ ++ rc = snprintf(name, NAME_MAX, "%s%u", "ssam.", smdev->tid); ++ if (rc < 0 || rc >= NAME_MAX) { ++ SPDK_ERRLOG("ssam dev name is too long, tid %u\n", tid); ++ return -EINVAL; ++ } ++ ++ spdk_cpuset_zero(&cpumask); ++ smdev->lcore_id = ssam_tid_to_cpumask(tid, &cpumask); ++ if (smdev->lcore_id == SPDK_CPUSET_SIZE) { ++ SPDK_ERRLOG("Can not find cpu for tid %u\n", tid); ++ return -EINVAL; ++ } ++ ++ smdev->name = strdup(name); ++ if (smdev->name == NULL) { ++ SPDK_ERRLOG("Failed to create name for ssam controller %s.\n", name); ++ return -EIO; ++ } ++ ++ smdev->thread = spdk_thread_create(smdev->name, &cpumask); ++ if (smdev->thread == NULL) { ++ SPDK_ERRLOG("Failed to create thread for ssam controller %s.\n", name); ++ free(smdev->name); ++ smdev->name = NULL; ++ return -EIO; ++ } ++ ++ rc = ssam_sessions_init(&smdev->smsessions); ++ if (rc != 0) { ++ return rc; ++ } ++ TAILQ_INSERT_TAIL(&g_ssam_devices, smdev, tailq); ++ TAILQ_INIT(&smdev->io_wait_queue); ++ TAILQ_INIT(&smdev->io_wait_queue_r); ++ ++ SPDK_NOTICELOG("Controller %s: new controller added, tid %u\n", smdev->name, tid); ++ ++ return 0; ++} ++ ++void ++ssam_dev_unregister(struct spdk_ssam_dev **dev) ++{ ++ struct spdk_ssam_dev *smdev = *dev; ++ struct spdk_thread *thread = smdev->thread; ++ ++ if (!ssam_sessions_empty(smdev->smsessions)) { ++ SPDK_NOTICELOG("Controller %s still has valid session.\n", ++ smdev->name); ++ return; ++ } ++ memset(smdev->smsessions, 0, SSAM_MAX_SESSION_PER_DEV * sizeof(struct spdk_ssam_session *)); ++ free(smdev->smsessions); ++ smdev->smsessions = NULL; ++ ++ /* Used for hot restart. */ ++ if (smdev->stop_poller != NULL) { ++ spdk_poller_unregister(&smdev->stop_poller); ++ smdev->stop_poller = NULL; ++ } ++ ++ SPDK_NOTICELOG("Controller %s: removed\n", smdev->name); ++ ++ free(smdev->name); ++ smdev->name = NULL; ++ ssam_lock(); ++ TAILQ_REMOVE(&g_ssam_devices, smdev, tailq); ++ ssam_unlock(); ++ ++ free(smdev); ++ smdev = NULL; ++ *dev = NULL; ++ ++ spdk_thread_send_msg(thread, ssam_dev_thread_exit, NULL); ++ ++ return; ++} ++ ++static int ++ssam_init_session_fields(struct spdk_ssam_session_reg_info *info, ++ struct spdk_ssam_dev *smdev, struct spdk_ssam_session *smsession) ++{ ++ if (info->backend->type == VIRTIO_TYPE_FS) { ++ smsession->mp = ssam_get_fs_mp(info->gfunc_id); ++ } else { ++ smsession->mp = g_ssam_info.mp[smdev->tid % ssam_get_core_num()]; ++ } ++ smsession->initialized = true; ++ smsession->registered = true; ++ smsession->thread = smdev->thread; ++ smsession->backend = info->backend; ++ smsession->smdev = smdev; ++ smsession->gfunc_id = info->gfunc_id; ++ smsession->started = false; ++ smsession->rsp_fn = info->rsp_fn; ++ smsession->rsp_ctx = info->rsp_ctx; ++ smsession->max_queues = info->queues; ++ smsession->queue_size = SPDK_SSAM_DEFAULT_VQ_SIZE; ++ if (info->name == NULL) { ++ smsession->name = spdk_sprintf_alloc("%s_%s_%d", smdev->name, info->type_name, info->gfunc_id); ++ } else { ++ smsession->name = strdup(info->name); ++ } ++ if (smsession->name == NULL) { ++ SPDK_ERRLOG("smsession name alloc failed\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_add_session(struct spdk_ssam_session_reg_info *info, ++ struct spdk_ssam_dev *smdev, struct spdk_ssam_session **smsession) ++{ ++ struct spdk_ssam_session *l_stsession = NULL; ++ size_t with_ctx_len = sizeof(*l_stsession) + info->session_ctx_size; ++ int rc; ++ ++ if (smdev->active_session_num == SSAM_MAX_SESSION_PER_DEV) { ++ SPDK_ERRLOG("%s reached upper limit %u\n", smdev->name, SSAM_MAX_SESSION_PER_DEV); ++ return -EAGAIN; ++ } ++ ++ if (g_ssam_info.mp == NULL) { ++ SPDK_ERRLOG("No memory pool\n"); ++ return -ENOMEM; ++ } ++ ++ rc = posix_memalign((void **)&l_stsession, SPDK_CACHE_LINE_SIZE, with_ctx_len); ++ if (rc != 0) { ++ SPDK_ERRLOG("smsession alloc failed\n"); ++ return -ENOMEM; ++ } ++ memset(l_stsession, 0, with_ctx_len); ++ ++ rc = ssam_init_session_fields(info, smdev, l_stsession); ++ if (rc != 0) { ++ free(l_stsession); ++ l_stsession = NULL; ++ return rc; ++ } ++ ++ rc = ssam_sessions_insert(smdev->smsessions, l_stsession); ++ if (rc != 0) { ++ return rc; ++ } ++ *smsession = l_stsession; ++ smdev->type |= info->backend->type; ++ ++ return 0; ++} ++ ++static int ++ssam_dev_register(struct spdk_ssam_dev **dev, uint16_t tid) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ int rc; ++ ++ smdev = calloc(1, sizeof(*smdev)); ++ if (smdev == NULL) { ++ SPDK_ERRLOG("Couldn't alloc device for tid %u.\n", tid); ++ return -1; ++ } ++ ++ rc = ssam_dev_create_register(smdev, tid); ++ if (rc != 0) { ++ free(smdev); ++ smdev = NULL; ++ return -1; ++ } ++ ++ *dev = smdev; ++ ++ return 0; ++} ++ ++int ++ssam_session_register(struct spdk_ssam_session_reg_info *info, ++ struct spdk_ssam_session **smsession) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ int rc; ++ ++ if (strcmp(info->type_name, SPDK_SESSION_TYPE_FS) != 0) { ++ if (ssam_session_find(info->gfunc_id)) { ++ SPDK_ERRLOG("Session with function id %d already exists.\n", info->gfunc_id); ++ return -EEXIST; ++ } ++ } ++ ++ smdev = ssam_dev_find(info->tid); ++ if (smdev == NULL) { ++ /* The smdev has been started during process initialization. Do not need to start the poller here. */ ++ SPDK_ERRLOG("No device with function id %d tid %u.\n", info->gfunc_id, info->tid); ++ return -ENODEV; ++ } ++ ++ rc = ssam_add_session(info, smdev, smsession); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_session_unregister(struct spdk_ssam_session *smsession) ++{ ++ int rc; ++ ++ if (smsession == NULL) { ++ SPDK_ERRLOG("smsession null.\n"); ++ return -EINVAL; ++ } ++ ++ if (smsession->task_cnt > 0) { ++ SPDK_ERRLOG("%s is processing I/O(%d) and cannot be deleted.\n", ++ smsession->name, smsession->task_cnt); ++ return -EBUSY; ++ } ++ ++ if (smsession->pending_async_op_num != 0) { ++ SPDK_ERRLOG("[OFFLOAD_SNIC] %s has internal events(%d) and cannot be deleted.\n", ++ smsession->name, smsession->pending_async_op_num); ++ return -EBUSY; ++ } ++ ++ rc = ssam_remove_session(smsession); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static void ssam_io_queue_handle(struct spdk_ssam_dev *smdev) ++{ ++ uint64_t count = 0; ++ uint64_t io_wait_cnt = smdev->io_wait_cnt; ++ while (count < io_wait_cnt) { ++ struct spdk_ssam_session_io_wait *io_wait = TAILQ_FIRST(&smdev->io_wait_queue); ++ ssam_session_remove_io_wait(smdev, io_wait); ++ if (io_wait->cb_fn != NULL) { ++ io_wait->cb_fn(io_wait->cb_arg); ++ } ++ count++; ++ } ++} ++ ++struct forward_ctx { ++ struct spdk_ssam_session *smsession; ++ struct ssam_request *io_req; ++}; ++ ++static void ++ssam_handle_forward_req(void *_ctx) ++{ ++ struct forward_ctx *ctx = (struct forward_ctx *)_ctx; ++ ctx->smsession->backend->request_worker(ctx->smsession, ctx->io_req); ++ free(ctx); ++} ++/* The resent request that is polled at the beginning of the hot restart is not the smsession of this smdev ++ * and needs to be forwarded to the corresponding smdev. ++ * If the forwarding is successful, true is returned. Otherwise, false is returned. ++ */ ++static bool ++ssam_dev_forward_req(struct ssam_request *io_req) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct forward_ctx *ctx = NULL; ++ int rc; ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ if (smdev->smsessions[io_req->gfunc_id] != NULL && ++ smdev->smsessions[io_req->gfunc_id]->started == true) { ++ ctx = calloc(1, sizeof(struct forward_ctx)); ++ if (!ctx) { ++ SPDK_ERRLOG("%s: calloc failed.\n", smdev->name); ++ goto out; ++ } ++ ctx->smsession = smdev->smsessions[io_req->gfunc_id]; ++ ctx->io_req = io_req; ++ rc = spdk_thread_send_msg(smdev->smsessions[io_req->gfunc_id]->thread, ssam_handle_forward_req, ++ ctx); ++ if (rc) { ++ SPDK_ERRLOG("%s: send msg error %d.\n", smdev->name, rc); ++ free(ctx); ++ goto out; ++ } ++ ssam_unlock(); ++ return true; ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++out: ++ ssam_unlock(); ++ return false; ++} ++ ++struct ssam_dev_io_complete_arg { ++ struct spdk_ssam_dev *smdev; ++ struct ssam_io_response io_resp; ++}; ++ ++static void ++ssam_dev_io_complete_cb(void *arg) ++{ ++ struct ssam_dev_io_complete_arg *cb_arg = (struct ssam_dev_io_complete_arg *)arg; ++ int rc = ssam_io_complete(cb_arg->smdev->tid, &cb_arg->io_resp); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_dev_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ssam_dev_io_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, ++ bool success) ++{ ++ struct ssam_io_response io_resp; ++ struct ssam_virtio_res *virtio_res = (struct ssam_virtio_res *)&io_resp.data; ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ struct iovec io_vec; ++ struct virtio_scsi_cmd_resp resp = {0}; ++ enum ssam_device_type type; ++ uint8_t res_status; ++ int rc; ++ type = ssam_get_virtio_type(io_req->gfunc_id); ++ ++ if (success) { ++ switch (type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ res_status = VIRTIO_BLK_S_OK; ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ res_status = VIRTIO_SCSI_S_OK; ++ break; ++ default: ++ res_status = 0; /* unknown type, maybe 0 means ok */ ++ } ++ } else { ++ SPDK_INFOLOG(ssam, "%s: io complete return error gfunc_id %u type %d.\n", ++ smdev->name, io_req->gfunc_id, type); ++ switch (type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ res_status = VIRTIO_BLK_S_IOERR; ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ res_status = VIRTIO_SCSI_S_FAILURE; ++ break; ++ default: ++ res_status = 1; /* unknown type, maybe 1 means error */ ++ } ++ } ++ ++ memset(&io_resp, 0, sizeof(io_resp)); ++ io_resp.gfunc_id = io_req->gfunc_id; ++ io_resp.iocb_id = io_req->iocb_id; ++ io_resp.status = io_req->status; ++ io_resp.flr_seq = io_req->flr_seq; ++ io_resp.req = io_req; ++ ++ virtio_res->iovs = &io_vec; ++ if (type == SSAM_DEVICE_VIRTIO_SCSI && io_cmd->writable) { ++ virtio_res->iovs->iov_base = io_cmd->iovs[1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[1].iov_len; ++ } else { ++ virtio_res->iovs->iov_base = io_cmd->iovs[io_cmd->iovcnt - 1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[io_cmd->iovcnt - 1].iov_len; ++ } ++ virtio_res->iovcnt = 1; ++ if (type == SSAM_DEVICE_VIRTIO_SCSI && io_req->type != VMIO_TYPE_VIRTIO_SCSI_CTRL) { ++ resp.response = res_status; ++ virtio_res->rsp = &resp; ++ virtio_res->rsp_len = sizeof(struct virtio_scsi_cmd_resp); ++ } else { ++ virtio_res->rsp = &res_status; ++ virtio_res->rsp_len = sizeof(res_status); ++ } ++ ++ rc = ssam_io_complete(smdev->tid, &io_resp); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_dev_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_dev_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smdev; ++ cb_arg->io_resp = io_resp; ++ io_wait_r->cb_fn = ssam_dev_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smdev, io_wait_r); ++ } ++} ++ ++static void ++ssam_dev_io_request(struct spdk_ssam_dev *smdev, struct ssam_request *io_req) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ ++ SPDK_INFOLOG(ssam_blk_data, "handling io tid=%u gfunc_id=%u type=%d rw=%u vqid=%u reqid=%u.\n", ++ smdev->tid, io_req->gfunc_id, io_req->type, io_req->req.cmd.writable, ++ io_req->req.cmd.virtio.vq_idx, io_req->req.cmd.virtio.req_idx); ++ ++ smsession = smdev->smsessions[io_req->gfunc_id]; ++ if (smsession == NULL || smsession->started == false) { ++ if (!ssam_dev_forward_req(io_req)) { ++ SPDK_INFOLOG(ssam, "%s: not have gfunc_id %u yet in io request.\n", ++ smdev->name, io_req->gfunc_id); ++ ssam_dev_io_complete(smdev, io_req, false); ++ } ++ return; ++ } ++ ++ smsession->backend->request_worker(smsession, io_req); ++ return; ++} ++ ++static void ssam_io_wait_r_queue_handle(struct spdk_ssam_dev *smdev) ++{ ++ uint64_t count = 0; ++ uint64_t io_wait_r_cnt = smdev->io_wait_r_cnt > SSAM_MAX_REQ_POLL_SIZE ? SSAM_MAX_REQ_POLL_SIZE : ++ smdev->io_wait_r_cnt; ++ while (count < io_wait_r_cnt) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = TAILQ_FIRST(&smdev->io_wait_queue_r); ++ ssam_session_remove_io_wait_r(smdev, io_wait_r); ++ if (io_wait_r->cb_fn != NULL) { ++ io_wait_r->cb_fn(io_wait_r->cb_arg); ++ } ++ count++; ++ free(io_wait_r); ++ io_wait_r = NULL; ++ } ++} ++ ++static int ++ssam_dev_request_worker(void *arg) ++{ ++ int io_num; ++ struct ssam_request *io_req[SSAM_MAX_REQ_POLL_SIZE] = {0}; ++ struct spdk_ssam_dev *smdev = arg; ++ ++ /* The I/O waiting due to insufficient memory needs to be processed first. */ ++ if (spdk_unlikely(smdev->io_wait_cnt > 0)) { ++ ssam_io_queue_handle(smdev); ++ return SPDK_POLLER_BUSY; ++ } ++ ++ io_num = ssam_request_poll(smdev->tid, SSAM_MAX_REQ_POLL_SIZE, io_req); ++ if ((io_num <= 0) || (io_num > SSAM_MAX_REQ_POLL_SIZE)) { ++ /* ++ * The rpc delete callback is registered when the bdev deleting. spdk_put_io_channel ++ * executed the RPC delete callback.The stdev_io_no_data_request function continuously ++ * determines whether to perform the spdk_put_io_channel operation to ensure that the ++ * deletion of the bdev does not time out. ++ */ ++ if (spdk_unlikely(smdev->io_wait_r_cnt > 0)) { ++ ssam_io_wait_r_queue_handle(smdev); ++ } ++ return SPDK_POLLER_BUSY; ++ } ++ ++ if (spdk_unlikely(smdev->io_wait_r_cnt > 0)) { ++ ssam_io_wait_r_queue_handle(smdev); ++ } ++ ++ for (int i = 0; i < io_num; i++) { ++ ssam_dev_io_request(smdev, io_req[i]); ++ } ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static void ++ssam_dev_io_response(struct spdk_ssam_dev *smdev, const struct ssam_dma_rsp *dma_rsp) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ const struct spdk_ssam_dma_cb *dma_cb = (const struct spdk_ssam_dma_cb *)&dma_rsp->cb; ++ ++ SPDK_INFOLOG(ssam_blk_data, ++ "handle dma resp tid=%u gfunc_id=%u rw=%u vqid=%u task_idx=%u statuc=%u.\n", ++ smdev->tid, dma_cb->gfunc_id, dma_cb->req_dir, ++ dma_cb->vq_idx, dma_cb->task_idx, dma_cb->status); ++ ++ smsession = smdev->smsessions[dma_cb->gfunc_id]; ++ if (smsession == NULL) { ++ smdev->discard_io_num++; ++ SPDK_ERRLOG("smsessions not have gfunc_id %u yet in io response.\n", dma_cb->gfunc_id); ++ return; ++ } ++ ++ smsession->backend->response_worker(smsession, (void *)dma_rsp); ++ ++ return; ++} ++ ++static void ++ssam_dev_print_stuck_io(struct spdk_ssam_dev *smdev) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ int i; ++ ++ for (i = 0; i < SSAM_MAX_SESSION_PER_DEV; i++) { ++ smsession = smdev->smsessions[i]; ++ if (smsession == NULL) { ++ continue; ++ } ++ if (smsession->task_cnt > 0) { ++ SPDK_ERRLOG("%s: %d IO stuck for %ds\n", smsession->name, ++ smsession->task_cnt, IO_STUCK_TIMEOUT); ++ if (smsession->backend->print_stuck_io_info != NULL) { ++ smsession->backend->print_stuck_io_info(smsession); ++ } ++ } ++ } ++} ++ ++static void ++ssam_dev_io_stuck_check(struct spdk_ssam_dev *smdev) ++{ ++ uint64_t diff_tsc = spdk_get_ticks() - smdev->io_stuck_tsc; ++ ++ if (smdev->io_num == 0) { ++ smdev->io_stuck_tsc = spdk_get_ticks(); ++ return; ++ } ++ ++ if ((diff_tsc / IO_STUCK_TIMEOUT) > spdk_get_ticks_hz()) { ++ ssam_dev_print_stuck_io(smdev); ++ smdev->io_stuck_tsc = spdk_get_ticks(); ++ } ++} ++ ++void ++ssam_dev_io_dec(struct spdk_ssam_dev *smdev) ++{ ++ smdev->io_num--; ++} ++ ++static int ++ssam_dev_response_worker(void *arg) ++{ ++ int io_num; ++ struct spdk_ssam_dev *smdev = arg; ++ struct ssam_dma_rsp dma_rsp[SSAM_MAX_RESP_POLL_SIZE] = {0}; ++ ++ uint64_t ticks = spdk_get_ticks(); ++ if (smdev->stat.poll_cur_tsc == 0) { ++ smdev->stat.poll_cur_tsc = ticks; ++ } else { ++ smdev->stat.poll_tsc += ticks - smdev->stat.poll_cur_tsc; ++ smdev->stat.poll_count++; ++ smdev->stat.poll_cur_tsc = ticks; ++ } ++ ++ io_num = ssam_dma_rsp_poll(smdev->tid, SSAM_MAX_RESP_POLL_SIZE, dma_rsp); ++ if (io_num <= 0 || io_num > SSAM_MAX_RESP_POLL_SIZE) { ++ ssam_dev_io_stuck_check(smdev); ++ return SPDK_POLLER_BUSY; ++ } ++ ++ if (smdev->io_num < ((uint64_t)(uint32_t)io_num)) { ++ SPDK_ERRLOG("%s: DMA response IO num too much, should be %lu but %d\n", ++ smdev->name, smdev->io_num, io_num); ++ smdev->discard_io_num += io_num; ++ return SPDK_POLLER_BUSY; ++ } ++ smdev->io_stuck_tsc = spdk_get_ticks(); ++ ++ for (int i = 0; i < io_num; i++) { ++ ssam_dev_io_response(smdev, dma_rsp + i); ++ } ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++int ++ssam_dev_register_worker_poller(struct spdk_ssam_dev *smdev) ++{ ++ SPDK_NOTICELOG("%s: worker starting.\n", smdev->name); ++ if (smdev->requestq_poller == NULL) { ++ smdev->requestq_poller = SPDK_POLLER_REGISTER(ssam_dev_request_worker, smdev, 0); ++ if (smdev->requestq_poller == NULL) { ++ SPDK_WARNLOG("%s: stdev_request_worker start failed.\n", smdev->name); ++ return -1; ++ } ++ ++ SPDK_INFOLOG(ssam, "%s: started stdev_request_worker poller on lcore %d\n", ++ smdev->name, spdk_env_get_current_core()); ++ } ++ ++ if (smdev->responseq_poller == NULL) { ++ smdev->responseq_poller = SPDK_POLLER_REGISTER(ssam_dev_response_worker, smdev, 0); ++ if (smdev->responseq_poller == NULL) { ++ SPDK_WARNLOG("%s: stdev_response_worker start failed.\n", smdev->name); ++ return -1; ++ } ++ ++ SPDK_INFOLOG(ssam, "%s: started stdev_response_worker poller on lcore %d\n", ++ smdev->name, spdk_env_get_current_core()); ++ } ++ return 0; ++} ++ ++void ++ssam_dev_unregister_worker_poller(struct spdk_ssam_dev *smdev) ++{ ++ if (!ssam_sessions_empty(smdev->smsessions)) { ++ return; ++ } ++ ++ if (smdev->requestq_poller != NULL) { ++ spdk_poller_unregister(&smdev->requestq_poller); ++ smdev->requestq_poller = NULL; ++ } ++ ++ if (smdev->responseq_poller != NULL) { ++ spdk_poller_unregister(&smdev->responseq_poller); ++ smdev->responseq_poller = NULL; ++ } ++} ++/* When stopping the worker, need to stop the two pollers first ++ * and wait until all sessions are deleted, and then free smdev. ++ */ ++static int ++ssam_dev_stop_poller(void *arg) ++{ ++ struct spdk_ssam_dev *smdev = arg; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ /* special processing is required for virtio-scsi, ++ * because In scsi scenarios, smsessions are not actively or passively removed. ++ */ ++ if ((smdev->type & (VIRTIO_TYPE_SCSI | VIRTIO_TYPE_FS)) != 0 && smdev->active_session_num > 0) { ++ for (int i = 0; i < SSAM_MAX_SESSION_PER_DEV; i++) { ++ if (smdev->smsessions[i] != NULL) { ++ smsession = smdev->smsessions[i]; ++ if (smsession->backend->remove_self != NULL) { ++ smsession->backend->remove_self(smsession); /* remove session */ ++ } ++ } ++ } ++ } ++ ++ /* 等待session全部被移除 */ ++ if (smdev->active_session_num != 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ /* 删除smdev的资源 */ ++ ssam_dev_unregister(&smdev); ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static void ++ssam_dev_stop_worker_poller(void *args) ++{ ++ struct spdk_ssam_dev *smdev = (struct spdk_ssam_dev *)args; ++ ++ if (smdev->requestq_poller != NULL) { ++ spdk_poller_unregister(&smdev->requestq_poller); ++ smdev->requestq_poller = NULL; ++ } ++ ++ if (smdev->responseq_poller != NULL) { ++ spdk_poller_unregister(&smdev->responseq_poller); ++ smdev->responseq_poller = NULL; ++ } ++ ++ SPDK_NOTICELOG("%s: poller stopped.\n", smdev->name); ++ smdev->stop_poller = SPDK_POLLER_REGISTER(ssam_dev_stop_poller, smdev, 0); ++ if (smdev->stop_poller == NULL) { ++ SPDK_WARNLOG("%s: ssam_dev stop failed.\n", smdev->name); ++ } ++} ++/* When starting the worker, need to start the two pollers first */ ++static void ++ssam_dev_start_worker_poller(void *args) ++{ ++ struct spdk_ssam_dev *smdev = (struct spdk_ssam_dev *)args; ++ ssam_dev_register_worker_poller(smdev); ++} ++ ++static void ++ssam_send_event_response(struct ssam_session_fn_ctx *ev_ctx) ++{ ++ if (ev_ctx->user_ctx.session_freed == true) { ++ goto out; ++ } ++ ++ if (*ev_ctx->rsp_fn != NULL) { ++ (*ev_ctx->rsp_fn)(ev_ctx->rsp_ctx, ev_ctx->rsp); ++ *ev_ctx->rsp_fn = NULL; ++ } ++ ++out: ++ /* ev_ctx be allocated by another thread */ ++ free(ev_ctx); ++ ev_ctx = NULL; ++} ++ ++static void ++ssam_check_send_event_timeout(struct ssam_session_fn_ctx *ev_ctx, spdk_msg_fn fn) ++{ ++ uint64_t diff_tsc = spdk_get_ticks() - ev_ctx->start_tsc; ++ struct spdk_ssam_session *smsession = ev_ctx->smsession; ++ ++ if ((diff_tsc / SEND_EVENT_WAIT_TIME) > spdk_get_ticks_hz()) { ++ /* If timeout, finish send msg, end the process */ ++ SPDK_ERRLOG("Send event to session %s time out.\n", smsession->name); ++ ev_ctx->rsp = -ETIMEDOUT; ++ ssam_send_event_response(ev_ctx); ++ return; ++ } ++ ++ spdk_thread_send_msg(spdk_get_thread(), fn, (void *)ev_ctx); ++ ++ return; ++} ++ ++static void ++ssam_send_event_finish(void *ctx) ++{ ++ struct ssam_session_fn_ctx *ev_ctx = ctx; ++ struct spdk_ssam_session *smsession = ev_ctx->smsession; ++ ++ if ((ev_ctx->rsp == 0) && (ev_ctx->need_async) && (ev_ctx->user_ctx.async_done == false)) { ++ ssam_check_send_event_timeout(ev_ctx, ssam_send_event_finish); ++ return; ++ } ++ ++ if (ssam_trylock() != 0) { ++ ssam_check_send_event_timeout(ev_ctx, ssam_send_event_finish); ++ return; ++ } ++ ++ if (smsession->pending_async_op_num > 0) { ++ smsession->pending_async_op_num--; ++ } else { ++ SPDK_ERRLOG("[OFFLOAD_SNIC] smsession %s: internal error.\n", smsession->name); ++ } ++ ++ /* If ev_ctx->cb_fn proccess failed, ev_ctx->cpl_fn will not excute */ ++ if ((ev_ctx->rsp == 0) && (ev_ctx->cpl_fn != NULL)) { ++ ev_ctx->cpl_fn(smsession, &ev_ctx->user_ctx.ctx); ++ } ++ ++ ssam_unlock(); ++ ++ ssam_send_event_response(ev_ctx); ++} ++ ++static void ++ssam_send_event(void *ctx) ++{ ++ struct ssam_session_fn_ctx *ev_ctx = ctx; ++ struct spdk_ssam_session *smsession = ev_ctx->smsession; ++ ++ if (ssam_trylock() != 0) { ++ ssam_check_send_event_timeout(ev_ctx, ssam_send_event); ++ return; ++ } ++ ++ if (smsession->initialized && (ev_ctx->cb_fn != NULL)) { ++ ev_ctx->user_ctx.async_done = false; ++ ev_ctx->rsp = ev_ctx->cb_fn(smsession, &ev_ctx->user_ctx.ctx); ++ } else { ++ ev_ctx->rsp = 0; ++ ev_ctx->user_ctx.async_done = true; ++ } ++ ++ ssam_unlock(); ++ /* The judgment logic is used to adapt to the hot-restart. ++ * Because the session has been released during the hot restart, ++ * the following ssam_send_event_finish is not required. ++ */ ++ if (ev_ctx->user_ctx.session_freed) { ++ free(ev_ctx); ++ return; ++ } else { ++ ev_ctx->start_tsc = spdk_get_ticks(); ++ spdk_thread_send_msg(g_ssam_init_thread, ssam_send_event_finish, ctx); ++ } ++} ++ ++static spdk_ssam_session_rsp_fn g_rsp_fn = NULL; ++ ++int ++ssam_send_event_to_session(struct spdk_ssam_session *smsession, spdk_ssam_session_fn fn, ++ spdk_ssam_session_cpl_fn cpl_fn, struct spdk_ssam_send_event_flag send_event_flag, void *ctx) ++{ ++ struct ssam_session_fn_ctx *ev_ctx; ++ int rc; ++ ++ ev_ctx = calloc(1, sizeof(*ev_ctx)); ++ if (ev_ctx == NULL) { ++ SPDK_ERRLOG("Failed to alloc ssam event.\n"); ++ return -ENOMEM; ++ } ++ ++ ev_ctx->smsession = smsession; ++ ev_ctx->cb_fn = fn; ++ ev_ctx->cpl_fn = cpl_fn; ++ ev_ctx->need_async = send_event_flag.need_async; ++ if (send_event_flag.need_rsp == true) { ++ ev_ctx->rsp_fn = &smsession->rsp_fn; ++ ev_ctx->rsp_ctx = smsession->rsp_ctx; ++ } else { ++ ev_ctx->rsp_fn = &g_rsp_fn; ++ ev_ctx->rsp_ctx = NULL; ++ } ++ ++ ev_ctx->user_ctx.ctx = ctx; ++ ev_ctx->user_ctx.session_freed = false; ++ ++ if (smsession->pending_async_op_num < UINT32_MAX) { ++ smsession->pending_async_op_num++; ++ } else { ++ SPDK_ERRLOG("[OFFLOAD_SNIC] smsession %s: internel error, events stuck too much\n", ++ smsession->name); ++ } ++ ++ ev_ctx->start_tsc = spdk_get_ticks(); ++ rc = spdk_thread_send_msg(smsession->thread, ssam_send_event, ev_ctx); ++ if (rc != 0) { ++ SPDK_ERRLOG("send thread msg failed\n"); ++ free(ev_ctx); ++ return rc; ++ } ++ return 0; ++} ++ ++void ++spdk_ssam_config_json(struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ spdk_json_write_array_begin(w); ++ ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ smsession->backend->write_config_json(smsession, w); ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ ++ smdev = ssam_dev_next(smdev); ++ } ++ ++ ssam_unlock(); ++ ++ spdk_json_write_array_end(w); ++} ++ ++int ++ssam_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues) ++{ ++ const struct spdk_ssam_session_backend *backend = smsession->backend; ++ ++ if (backend->ssam_get_config == NULL) { ++ return -1; ++ } ++ ++ return backend->ssam_get_config(smsession, config, len, queues); ++} ++ ++struct dev_destroy_ctx { ++ struct spdk_ssam_session *smsession; ++ void *args; ++}; ++ ++static void spdk_ssam_dev_destroy(void *arg) ++{ ++ struct dev_destroy_ctx *ctx = (struct dev_destroy_ctx *)arg; ++ ctx->smsession->backend->destroy_bdev_device(ctx->smsession, ctx->args); ++ free(ctx); ++} ++ ++void ++ssam_send_dev_destroy_msg(struct spdk_ssam_session *smsession, void *args) ++{ ++ struct dev_destroy_ctx *ctx = calloc(1, sizeof(struct dev_destroy_ctx)); ++ if (ctx == NULL) { ++ SPDK_ERRLOG("%s: out of memory, destroy dev failed\n", smsession->name); ++ return; ++ } ++ ctx->smsession = smsession; ++ ctx->args = args; ++ spdk_thread_send_msg(g_ssam_init_thread, spdk_ssam_dev_destroy, ctx); ++} ++ ++void ++spdk_ssam_poller_start(void) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ tmp = ssam_dev_next(smdev); ++ /* Send the message to each smdev to start the worker on the smdev. */ ++ spdk_thread_send_msg(smdev->thread, ssam_dev_start_worker_poller, smdev); ++ smdev = tmp; ++ } ++ ssam_unlock(); ++} ++ ++static void ++ssam_fini(void *arg) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ SPDK_WARNLOG("ssam is finishing\n"); ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ tmp = ssam_dev_next(smdev); ++ /* Send the message to each smdev to stop the worker on the smdev. */ ++ spdk_thread_send_msg(smdev->thread, ssam_dev_stop_worker_poller, smdev); ++ smdev = tmp; ++ } ++ ssam_unlock(); ++ ++ spdk_cpuset_zero(&g_ssam_core_mask); ++ ++ g_ssam_fini_cpl_cb(); ++} ++ ++static void * ++ssam_session_shutdown(void *arg) ++{ ++ SPDK_INFOLOG(ssam, "ssam sesssion Exiting\n"); ++ spdk_thread_send_msg(g_ssam_init_thread, ssam_fini, NULL); ++ ++ return NULL; ++} ++ ++void ++spdk_ssam_subsystem_fini(ssam_fini_cb fini_cb) ++{ ++ if (spdk_get_thread() != g_ssam_init_thread) { ++ SPDK_ERRLOG("ssam finish thread not equal init thread, internel error\n"); ++ } ++ ++ g_ssam_fini_cpl_cb = fini_cb; ++ ++ ssam_session_shutdown(NULL); ++ ++ spdk_ssam_fs_poller_destroy(); ++} ++ ++void ++spdk_ssam_subsystem_init(ssam_init_cb init_cb) ++{ ++ uint32_t i; ++ int ret; ++ int shm_id; ++ ++ g_ssam_init_thread = spdk_get_thread(); ++ if (g_ssam_init_thread == NULL) { ++ ret = -EBUSY; ++ SPDK_ERRLOG("get thread error\n"); ++ goto exit; ++ } ++ ++ /* init ssam core mask */ ++ spdk_cpuset_zero(&g_ssam_core_mask); ++ SPDK_ENV_FOREACH_CORE(i) { ++ spdk_cpuset_set_cpu(&g_ssam_core_mask, i, true); ++ } ++ ++ ret = ssam_set_core_num(spdk_cpuset_count(&g_ssam_core_mask)); ++ if (ret != 0) { ++ goto exit; ++ } ++ ++ ret = ssam_init(); ++ if (ret != 0) { ++ goto exit; ++ } ++ ++ if (!spdk_ssam_get_shm_created()) { ++ shm_id = shm_open(SSAM_SHM, O_CREAT | O_EXCL | O_RDWR, SSAM_SHM_PERMIT); ++ if (shm_id < 0) { ++ SPDK_ERRLOG("failed to create shared memory %s\n", SSAM_SHM); ++ ret = -1; ++ goto exit; ++ } ++ spdk_ssam_set_shm_created(true); ++ } ++ ++exit: ++ init_cb(ret); ++ return; ++} ++ ++/* Initialize all smdev modules during submodule initialization. */ ++static int ++ssam_smdev_init(void) ++{ ++ int rc = 0; ++ struct spdk_ssam_dev *smdev; ++ struct spdk_ssam_dev *tmp = NULL; ++ uint16_t core_num = ssam_get_core_num(); ++ for (uint16_t i = 0; i < core_num; ++i) { ++ rc = ssam_dev_register(&smdev, i); ++ if (rc != 0) { ++ goto out; ++ } ++ } ++ ++ rc = ssam_get_hot_upgrade_state(); ++ if (rc != 0) { ++ SPDK_ERRLOG(": virtio upgrade state failed.\n"); ++ return rc; ++ } ++ ++ return 0; ++out: ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ tmp = ssam_dev_next(smdev); ++ ssam_dev_unregister(&smdev); ++ smdev = tmp; ++ } ++ return rc; ++} ++ ++static int ++ssam_server_init(void) ++{ ++ uint32_t core_num = ssam_get_core_num(); ++ uint32_t mempool_size = (ssam_get_mempool_size() / core_num) & (~0U - 1); ++ uint32_t i; ++ ++ /* Disable dummy I/O for hot restart */ ++ ++ for (i = 0; i < core_num; i++) { ++ g_ssam_info.mp[i] = ssam_mempool_create(mempool_size * SSAM_MB, SSAM_DEFAULT_MEMPOOL_EXTRA_SIZE); ++ if (g_ssam_info.mp[i] == NULL) { ++ SPDK_ERRLOG("ssam create mempool[%d] failed, mempool_size = %uMB.\n", i, mempool_size); ++ return -ENOMEM; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_server_exit(void) ++{ ++ uint32_t core_num = ssam_get_core_num(); ++ uint32_t i; ++ ++ for (i = 0; i < core_num; i++) { ++ if (g_ssam_info.mp[i] != NULL) { ++ ssam_mempool_destroy(g_ssam_info.mp[i]); ++ g_ssam_info.mp[i] = NULL; ++ } ++ } ++ ++ memset(&g_ssam_info, 0x0, sizeof(struct spdk_ssam_info)); ++} ++ ++ ++static int ++ssam_check_device_status(void) ++{ ++ uint8_t ready = 0; ++ int times = 0; ++ int rc; ++ ++ do { ++ rc = ssam_check_device_ready(0, 0, &ready); ++ if (rc != 0) { ++ SPDK_ERRLOG("device check failed.\n"); ++ return rc; ++ } ++ ++ if (ready != 0) { ++ break; ++ } ++ ++ usleep(DEVICE_READY_WAIT_TIME); ++ times++; ++ } while (times < DEVICE_READY_TIMEOUT); ++ ++ if (ready == 0) { ++ SPDK_ERRLOG("device has not been ready after 1.5s.\n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++ ++static int ++ssam_init(void) ++{ ++ int rc; ++ ++ rc = ssam_check_device_status(); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ rc = ssam_config_init(); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ rc = ssam_server_init(); ++ if (rc != 0) { ++ ssam_config_exit(); ++ return rc; ++ } ++ ++ rc = spdk_ssam_fs_poller_init(); ++ if (rc != 0) { ++ ssam_server_exit(); ++ ssam_config_exit(); ++ return rc; ++ } ++ ++ rc = ssam_smdev_init(); ++ if (rc != 0) { ++ ssam_server_exit(); ++ ssam_config_exit(); ++ spdk_ssam_fs_poller_destroy(); ++ } ++ ++ return rc; ++} ++ ++void ++spdk_ssam_exit(void) ++{ ++ ssam_deinit_device_pcie_list(); ++ ssam_config_exit(); ++ ssam_server_exit(); ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam) +diff --git a/lib/ssam/ssam_blk.c b/lib/ssam/ssam_blk.c +new file mode 100644 +index 0000000..e52a54d +--- /dev/null ++++ b/lib/ssam/ssam_blk.c +@@ -0,0 +1,2130 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++ ++#include "spdk/env.h" ++#include "spdk/bdev.h" ++#include "spdk/bdev_module.h" ++#include "spdk/thread.h" ++#include "spdk/likely.h" ++#include "spdk/string.h" ++#include "spdk/util.h" ++ ++#include "ssam_internal.h" ++ ++#define SESSION_STOP_POLLER_PERIOD 1000 ++#define ENQUEUE_TIMES_PER_IO 1000 ++ ++#define IOV_HEADER_TAIL_NUM 2 ++ ++#define SECTOR_SIZE 512 ++#define ALIGNMENT_2M (2048 * 1024) ++#define SERIAL_STRING_LEN 128 ++#define SMSESSION_STOP_TIMEOUT 2 /* s */ ++ ++/* Related to (SPDK_SSAM_IOVS_MAX * SPDK_SSAM_MAX_SEG_SIZE) */ ++#define PAYLOAD_SIZE_MAX (2048U * 2048) ++ ++#define RETRY_TIMEOUT 120 ++ ++/* Minimal set of features supported by every virtio-blk device */ ++#define SPDK_SSAM_BLK_FEATURES_BASE (SPDK_SSAM_FEATURES | \ ++ (1ULL << VIRTIO_BLK_F_SIZE_MAX) | (1ULL << VIRTIO_BLK_F_SEG_MAX) | \ ++ (1ULL << VIRTIO_BLK_F_GEOMETRY) | (1ULL << VIRTIO_BLK_F_BLK_SIZE) | \ ++ (1ULL << VIRTIO_BLK_F_TOPOLOGY) | (1ULL << VIRTIO_BLK_F_BARRIER) | \ ++ (1ULL << VIRTIO_BLK_F_SCSI) | (1ULL << VIRTIO_BLK_F_CONFIG_WCE) | \ ++ (1ULL << VIRTIO_BLK_F_MQ)) ++ ++extern bool g_ssam_subsystem_exit; ++ ++struct ssam_task_stat { ++ uint64_t start_tsc; ++ uint64_t dma_start_tsc; ++ uint64_t dma_end_tsc; ++ uint64_t bdev_start_tsc; ++ uint64_t bdev_func_tsc; ++ uint64_t bdev_end_tsc; ++ uint64_t complete_start_tsc; ++ uint64_t complete_end_tsc; ++}; ++ ++struct spdk_ssam_blk_task { ++ /* Returned status of I/O processing, it can be VIRTIO_BLK_S_OK, ++ * VIRTIO_BLK_S_IOERR or VIRTIO_BLK_S_UNSUPP ++ */ ++ volatile uint8_t *status; ++ ++ /* Number of bytes processed successfully */ ++ uint32_t used_len; ++ ++ /* Records the amount of valid data in the struct iovec iovs array. */ ++ uint32_t iovcnt; ++ struct ssam_iovec iovs; ++ ++ /* If set, the task is currently used for I/O processing. */ ++ bool used; ++ ++ /* For bdev io wait */ ++ struct spdk_bdev_io_wait_entry bdev_io_wait; ++ struct spdk_ssam_session_io_wait session_io_wait; ++ struct spdk_ssam_blk_session *bsmsession; ++ ++ /* Size of whole payload in bytes */ ++ uint32_t payload_size; ++ ++ /* ssam request data */ ++ struct ssam_request *io_req; ++ ++ uint16_t vq_idx; ++ uint16_t req_idx; ++ uint16_t task_idx; ++ struct ssam_task_stat task_stat; ++}; ++ ++struct ssam_blk_stat { ++ uint64_t count; ++ uint64_t start_count; ++ uint64_t total_tsc; /* pre_dma <- -> post_return */ ++ uint64_t dma_tsc; /* pre_dma <- -> post_dma */ ++ uint64_t dma_count; ++ uint64_t dma_complete_count; ++ uint64_t bdev_tsc; /* pre_bdev <- -> post_bdev */ ++ uint64_t bdev_submit_tsc; /* <- spdk_bdev_xxx -> */ ++ uint64_t bdev_count; ++ uint64_t bdev_complete_count; ++ uint64_t complete_tsc; /* pre_return <- -> post_return */ ++ uint64_t internel_tsc; /* total_tsc - dma_tsc - bdev_tsc - complete_tsc */ ++ ++ uint64_t complete_read_ios; /* Number of successfully completed read requests */ ++ uint64_t err_read_ios; /* Number of failed completed read requests */ ++ uint64_t complete_write_ios; /* Number of successfully completed write requests */ ++ uint64_t err_write_ios; /* Number of failed completed write requests */ ++ uint64_t flush_ios; /* Total number of flush requests */ ++ uint64_t complete_flush_ios; /* Number of successfully completed flush requests */ ++ uint64_t err_flush_ios; /* Number of failed completed flush requests */ ++ uint64_t other_ios; ++ uint64_t complete_other_ios; ++ uint64_t err_other_ios; ++ uint64_t fatal_ios; /* Number of discarded requests */ ++ uint64_t io_retry; ++}; ++ ++struct spdk_ssam_blk_session { ++ /* The parent session must be the very first field in this struct */ ++ struct spdk_ssam_session smsession; ++ struct spdk_poller *stop_poller; ++ struct spdk_bdev *bdev; ++ struct spdk_bdev_desc *bdev_desc; ++ struct spdk_io_channel *io_channel; ++ ++ /* volume id */ ++ char *serial; ++ ++ /* accumulated I/O statistics */ ++ struct spdk_bdev_io_stat stat; ++ ++ /* Current count of bdev operations for hot-restart. */ ++ int32_t bdev_count; ++ ++ /* poller for waiting bdev finish when hot-restart */ ++ struct spdk_poller *stop_bdev_poller; ++ ++ /* controller statistics. */ ++ struct ssam_blk_stat blk_stat; ++ ++ /* if set, all writes to the device will fail with ++ * VIRTIO_BLK_S_IOERR error code ++ */ ++ bool readonly; ++ ++ /* if set, indicate the session not have a bdev, all writes to the device ++ * will fail with VIRTIO_BLK_S_IOERR error code ++ */ ++ bool no_bdev; ++}; ++ ++struct ssam_blk_session_ctx { ++ struct spdk_ssam_blk_session *bsmsession; ++ void **user_ctx; ++}; ++ ++static const struct spdk_ssam_session_backend g_ssam_blk_session_backend; ++static int ssam_blk_remove_session(struct spdk_ssam_session *smsession); ++static void ssam_blk_request_worker(struct spdk_ssam_session *smsession, void *arg); ++static void ssam_blk_destroy_bdev_device(struct spdk_ssam_session *smsession, void *args); ++static void ssam_blk_response_worker(struct spdk_ssam_session *smsession, void *arg); ++static void ssam_blk_no_data_request_worker(struct spdk_ssam_session *smsession); ++static inline void ssam_request_queue_io(struct spdk_ssam_blk_task *task); ++static void ssam_task_complete(struct spdk_ssam_blk_task *task, uint8_t status); ++static void ssam_data_request_para(struct ssam_dma_request *dma_req, ++ struct spdk_ssam_blk_task *task, uint32_t type, uint8_t status); ++static void ssam_blk_print_stuck_io_info(struct spdk_ssam_session *smsession); ++static int ssam_process_blk_request(struct spdk_ssam_blk_task *task); ++static void ssam_free_task_pool(struct spdk_ssam_blk_session *bsmsession); ++static int ssam_blk_io_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, ++ uint8_t status); ++static void ssam_session_io_resubmit(void *arg); ++ ++static inline struct spdk_ssam_blk_session * ++ssam_to_blk_session(struct spdk_ssam_session *smsession) ++{ ++ return (struct spdk_ssam_blk_session *)smsession; ++} ++ ++static void ++ssam_blk_dump_info_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "name", ssam_session_get_name(smsession)); ++ spdk_json_write_named_uint32(w, "function_id", (uint32_t)smsession->gfunc_id); ++ spdk_json_write_named_uint32(w, "queues", (uint32_t)smsession->max_queues); ++ ++ spdk_json_write_named_object_begin(w, "block"); ++ spdk_json_write_named_bool(w, "readonly", bsmsession->readonly); ++ spdk_json_write_name(w, "bdev"); ++ if (bsmsession->bdev != NULL) { ++ spdk_json_write_string(w, spdk_bdev_get_name(bsmsession->bdev)); ++ } else { ++ spdk_json_write_null(w); ++ } ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_dev_bdev_remove_cpl_cb(struct spdk_ssam_session *smsession, void **unnused) ++{ ++ /* All sessions have been notified, time to close the bdev */ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ if (bsmsession == NULL) { ++ return; ++ } ++ ++ if (bsmsession->bdev_desc != NULL) { ++ spdk_bdev_close(bsmsession->bdev_desc); ++ bsmsession->bdev_desc = NULL; ++ } ++ ++ /* bdev not create by ssam blk, no need be freed here */ ++ bsmsession->bdev = NULL; ++} ++ ++static void ++ssam_blk_stop_cpl_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ spdk_ssam_session_rsp_fn rsp_fn = smsession->rsp_fn; ++ void *rsp_ctx = smsession->rsp_ctx; ++ int rc; ++ ++ ssam_dev_bdev_remove_cpl_cb(smsession, NULL); ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, 0); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed when remove session.\n", smsession->name); ++ } ++ ++ /* Can not umount function here, whenever the gfunc_id must be mounted to ++ * the dummy tid or to the specific tid ++ */ ++ ++ SPDK_NOTICELOG("BLK controller %s deleted\n", smsession->name); ++ ++ if (smsession->name != NULL) { ++ free(smsession->name); ++ smsession->name = NULL; ++ } ++ ++ ssam_set_session_be_freed(ctx); ++ memset(bsmsession, 0, sizeof(*bsmsession)); ++ free(bsmsession); ++ ++ if (rsp_fn != NULL) { ++ rsp_fn(rsp_ctx, 0); ++ rsp_fn = NULL; ++ } ++} ++ ++static void ++ssam_task_stat_tick(uint64_t *tsc) ++{ ++#ifdef PERF_STAT ++ *tsc = spdk_get_ticks(); ++#endif ++ return; ++} ++ ++static void ++ssam_blk_stat_statistics(struct spdk_ssam_blk_task *task, uint8_t status) ++{ ++#ifdef PERF_STAT ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ uint64_t dma_tsc = task->task_stat.dma_end_tsc - task->task_stat.dma_start_tsc; ++ uint64_t bdev_tsc = task->task_stat.bdev_end_tsc - task->task_stat.bdev_start_tsc; ++ uint64_t bdev_submit_tsc = task->task_stat.bdev_func_tsc - task->task_stat.bdev_start_tsc; ++ uint64_t complete_tsc = task->task_stat.complete_end_tsc - task->task_stat.complete_start_tsc; ++ uint64_t total_tsc = task->task_stat.complete_end_tsc - task->task_stat.start_tsc; ++ struct virtio_blk_outhdr *req = (struct virtio_blk_outhdr *)task->io_req->req.cmd.header; ++ ++ if (req->type == VIRTIO_BLK_T_IN) { /* read */ ++ bsmsession->stat.read_latency_ticks += total_tsc; ++ bsmsession->stat.bytes_read += task->payload_size; ++ bsmsession->stat.num_read_ops++; ++ if (status == VIRTIO_BLK_S_OK) { ++ bsmsession->blk_stat.complete_read_ios++; ++ } else { ++ bsmsession->blk_stat.err_read_ios++; ++ } ++ } else if (req->type == VIRTIO_BLK_T_OUT) { /* write */ ++ bsmsession->stat.write_latency_ticks += total_tsc; ++ bsmsession->stat.bytes_written += task->payload_size; ++ bsmsession->stat.num_write_ops++; ++ if (status == VIRTIO_BLK_S_OK) { ++ bsmsession->blk_stat.complete_write_ios++; ++ } else { ++ bsmsession->blk_stat.err_write_ios++; ++ } ++ } else if (req->type == VIRTIO_BLK_T_FLUSH) { /* flush */ ++ bsmsession->blk_stat.flush_ios++; ++ if (status == VIRTIO_BLK_S_OK) { ++ bsmsession->blk_stat.complete_flush_ios++; ++ } else { ++ bsmsession->blk_stat.err_flush_ios++; ++ } ++ } else { ++ bsmsession->blk_stat.other_ios++; ++ if (status == VIRTIO_BLK_S_OK) { ++ bsmsession->blk_stat.complete_other_ios++; ++ } else { ++ bsmsession->blk_stat.err_other_ios++; ++ } ++ } ++ ++ bsmsession->blk_stat.dma_tsc += dma_tsc; ++ bsmsession->blk_stat.bdev_tsc += bdev_tsc; ++ bsmsession->blk_stat.bdev_submit_tsc += bdev_submit_tsc; ++ bsmsession->blk_stat.complete_tsc += complete_tsc; ++ bsmsession->blk_stat.total_tsc += total_tsc; ++ bsmsession->blk_stat.internel_tsc += total_tsc - complete_tsc - bdev_tsc - dma_tsc; ++ bsmsession->blk_stat.count += 1; ++#endif ++} ++ ++static void ++ssam_blk_configs(uint8_t *config, struct virtio_blk_config *blkcfg, ++ uint32_t len, struct spdk_bdev *bdev) ++{ ++ uint32_t cfg_len; ++ ++ /* minimum I/O size in blocks */ ++ blkcfg->min_io_size = 1; ++ ++ if (bdev && spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) { ++ /* 32768 sectors is 16MiB, expressed in 512 Bytes */ ++ blkcfg->max_discard_sectors = 32768; ++ blkcfg->max_discard_seg = 1; ++ /* expressed in 512 Bytes sectors */ ++ blkcfg->discard_sector_alignment = blkcfg->blk_size / SECTOR_SIZE; ++ } ++ if (bdev && spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) { ++ /* 32768 sectors is 16MiB, expressed in 512 Bytes */ ++ blkcfg->max_write_zeroes_sectors = 32768; ++ blkcfg->max_write_zeroes_seg = 1; ++ } ++ ++ cfg_len = sizeof(struct virtio_blk_config); ++ memcpy(config, blkcfg, (unsigned long)spdk_min(len, cfg_len)); ++ if (len < cfg_len) { ++ SPDK_NOTICELOG("Out config len %u < total config len %u\n", len, cfg_len); ++ } ++ ++ return; ++} ++ ++static int ++ssam_blk_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues) ++{ ++ struct virtio_blk_config blkcfg; ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ uint32_t blk_size; ++ uint64_t blkcnt; ++ ++ memset(&blkcfg, 0, sizeof(blkcfg)); ++ bsmsession = ssam_to_blk_session(smsession); ++ if (bsmsession == NULL) { ++ SPDK_ERRLOG("session is null.\n"); ++ return -1; ++ } ++ bdev = bsmsession->bdev; ++ if (bdev == NULL) { ++ return -1; ++ } ++ blk_size = spdk_bdev_get_block_size(bdev); ++ blkcnt = spdk_bdev_get_num_blocks(bdev); ++ /* ssam will use this configuration, this is the max capability of ++ * the ssam, configurations will be obtained through negotiation ++ * in the future. ++ */ ++ blkcfg.size_max = SPDK_SSAM_MAX_SEG_SIZE; ++ blkcfg.seg_max = SPDK_SSAM_IOVS_MAX; ++ ++ if (blk_size == 0) { ++ SPDK_ERRLOG("bdev's blk_size %u error.\n", blk_size); ++ return -1; ++ } ++ if (blkcnt > (UINT64_MAX / blk_size)) { ++ SPDK_ERRLOG("bdev's blkcnt %lu or blk_size %u out of range.\n", ++ blkcnt, blk_size); ++ return -1; ++ } ++ blkcfg.blk_size = blk_size; ++ /* expressed in 512 Bytes sectors */ ++ blkcfg.capacity = (blkcnt * blk_size) / 512; ++ blkcfg.num_queues = 1; /* TODO: 1 change to queues after the VBS problem is fixed */ ++ ssam_blk_configs(config, &blkcfg, len, bdev); ++ ++ return 0; ++} ++ ++static void ++ssam_blk_write_config_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ if (bsmsession == NULL || bsmsession->bdev == NULL) { ++ return; ++ } ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "method", "create_blk_controller"); ++ ++ spdk_json_write_named_object_begin(w, "params"); ++ spdk_json_write_named_string(w, "dev_name", spdk_bdev_get_name(bsmsession->bdev)); ++ char *gfunc_id = spdk_sprintf_alloc("%u", bsmsession->smsession.gfunc_id); ++ if (gfunc_id == NULL) { ++ SPDK_ERRLOG("alloc for gfunc_id failed\n"); ++ } else { ++ spdk_json_write_named_string(w, "index", gfunc_id); ++ free(gfunc_id); ++ } ++ spdk_json_write_named_bool(w, "readonly", bsmsession->readonly); ++ if (bsmsession->serial != NULL) { ++ spdk_json_write_named_string(w, "serial", bsmsession->serial); ++ } ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_blk_show_iostat_json(struct spdk_ssam_session *smsession, uint32_t id, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ struct spdk_bdev *bdev = ssam_get_session_bdev(smsession); ++ struct spdk_bdev_io_stat stat = {0}; ++ struct ssam_blk_stat blk_stat; ++ uint64_t ticks_hz = spdk_get_ticks_hz(); ++ uint64_t poll_count = smsession->smdev->stat.poll_count; ++ ++ memcpy(&stat, &bsmsession->stat, sizeof(struct spdk_bdev_io_stat)); /* a little question, mutex */ ++ memcpy(&blk_stat, &bsmsession->blk_stat, sizeof(struct ssam_blk_stat)); ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_uint32(w, "function_id", smsession->gfunc_id); ++ if (smsession->smdev->stat.poll_count == 0) { ++ poll_count = 1; ++ } ++ spdk_json_write_named_string_fmt(w, "poll_lat", "%.9f", ++ (float)smsession->smdev->stat.poll_tsc / poll_count / ticks_hz); ++ spdk_json_write_named_string(w, "bdev_name", (bdev == NULL) ? "" : spdk_bdev_get_name(bdev)); ++ spdk_json_write_named_uint64(w, "bytes_read", stat.bytes_read); ++ spdk_json_write_named_uint64(w, "num_read_ops", stat.num_read_ops); ++ spdk_json_write_named_uint64(w, "bytes_written", stat.bytes_written); ++ spdk_json_write_named_uint64(w, "num_write_ops", stat.num_write_ops); ++ spdk_json_write_named_uint64(w, "read_latency_ticks", stat.read_latency_ticks); ++ spdk_json_write_named_uint64(w, "write_latency_ticks", stat.write_latency_ticks); ++ spdk_json_write_named_uint64(w, "complete_read_ios", blk_stat.complete_read_ios); ++ spdk_json_write_named_uint64(w, "err_read_ios", blk_stat.err_read_ios); ++ spdk_json_write_named_uint64(w, "complete_write_ios", blk_stat.complete_write_ios); ++ spdk_json_write_named_uint64(w, "err_write_ios", blk_stat.err_write_ios); ++ spdk_json_write_named_uint64(w, "flush_ios", blk_stat.flush_ios); ++ spdk_json_write_named_uint64(w, "complete_flush_ios", blk_stat.complete_flush_ios); ++ spdk_json_write_named_uint64(w, "err_flush_ios", blk_stat.err_flush_ios); ++ spdk_json_write_named_uint64(w, "other_ios", blk_stat.other_ios); ++ spdk_json_write_named_uint64(w, "complete_other_ios", blk_stat.complete_other_ios); ++ spdk_json_write_named_uint64(w, "err_other_ios", blk_stat.err_other_ios); ++ ++ spdk_json_write_named_uint64(w, "fatal_ios", blk_stat.fatal_ios); ++ spdk_json_write_named_uint64(w, "io_retry", blk_stat.io_retry); ++ spdk_json_write_named_object_begin(w, "counters"); ++ spdk_json_write_named_uint64(w, "start_count", blk_stat.start_count); ++ spdk_json_write_named_uint64(w, "dma_count", blk_stat.dma_count); ++ spdk_json_write_named_uint64(w, "dma_complete_count", blk_stat.dma_complete_count); ++ spdk_json_write_named_uint64(w, "bdev_count", blk_stat.bdev_count); ++ spdk_json_write_named_uint64(w, "bdev_complete_count", blk_stat.bdev_complete_count); ++ spdk_json_write_object_end(w); ++ spdk_json_write_named_object_begin(w, "details"); ++ spdk_json_write_named_uint64(w, "count", blk_stat.count); ++ if (blk_stat.count == 0) { ++ blk_stat.count = 1; ++ } ++ spdk_json_write_named_string_fmt(w, "total_lat", "%.9f", ++ (float)blk_stat.total_tsc / blk_stat.count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "dma_lat", "%.9f", ++ (float)blk_stat.dma_tsc / blk_stat.count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "bdev_lat", "%.9f", ++ (float)blk_stat.bdev_tsc / blk_stat.count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "bdev_submit_lat", "%.9f", ++ (float)blk_stat.bdev_submit_tsc / blk_stat.count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "complete_lat", "%.9f", ++ (float)blk_stat.complete_tsc / blk_stat.count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "internal_lat", "%.9f", ++ (float)blk_stat.internel_tsc / blk_stat.count / ticks_hz); ++ spdk_json_write_object_end(w); ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_blk_clear_iostat_json(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ memset(&bsmsession->stat, 0, sizeof(struct spdk_bdev_io_stat) - sizeof( ++ uint64_t)); /* exclude ticks_rate */ ++ memset(&bsmsession->blk_stat, 0, sizeof(struct ssam_blk_stat)); ++} ++ ++static struct spdk_bdev *ssam_blk_get_bdev(struct spdk_ssam_session *smsession, uint32_t id) ++{ ++ struct spdk_bdev *bdev = ssam_get_session_bdev(smsession); ++ ++ return bdev; ++} ++ ++static const struct spdk_ssam_session_backend g_ssam_blk_session_backend = { ++ .type = VIRTIO_TYPE_BLK, ++ .remove_session = ssam_blk_remove_session, ++ .request_worker = ssam_blk_request_worker, ++ .destroy_bdev_device = ssam_blk_destroy_bdev_device, ++ .response_worker = ssam_blk_response_worker, ++ .no_data_req_worker = ssam_blk_no_data_request_worker, ++ .ssam_get_config = ssam_blk_get_config, ++ .print_stuck_io_info = ssam_blk_print_stuck_io_info, ++ .dump_info_json = ssam_blk_dump_info_json, ++ .write_config_json = ssam_blk_write_config_json, ++ .show_iostat_json = ssam_blk_show_iostat_json, ++ .clear_iostat_json = ssam_blk_clear_iostat_json, ++ .get_bdev = ssam_blk_get_bdev, ++ .remove_self = NULL, ++}; ++ ++/* Clean Smsession */ ++static int ++ssam_destroy_poller_cb(void *arg) ++{ ++ struct spdk_ssam_blk_session *bsmsession = (struct spdk_ssam_blk_session *)arg; ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ struct spdk_ssam_dev *smdev = smsession->smdev; ++ ++ SPDK_NOTICELOG("%s: remaining %u tasks\n", smsession->name, smsession->task_cnt); ++ ++ /* stop poller */ ++ spdk_poller_unregister(&bsmsession->stop_bdev_poller); ++ ++ /* remove session */ ++ ssam_sessions_remove(smdev->smsessions, smsession); ++ smdev->active_session_num--; ++ smsession->smdev = NULL; ++ ++ /* put ioChannle */ ++ if (bsmsession->io_channel != NULL) { ++ spdk_put_io_channel(bsmsession->io_channel); ++ bsmsession->io_channel = NULL; ++ } ++ ++ /* close bdev device, last step, async */ ++ ssam_send_dev_destroy_msg(smsession, NULL); ++ ++ /* free smsession not here, but after close bdev device; */ ++ /* see ssam_blk_destroy_bdev_device() */ ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static int ++ssam_session_bdev_remove_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ int rc = 0; ++ ++ /* smsession already removed */ ++ if (!smsession->started) { ++ return 0; ++ } else { ++ smsession->started = false; ++ } ++ ++ bsmsession->stop_bdev_poller = SPDK_POLLER_REGISTER(ssam_destroy_poller_cb, ++ bsmsession, 0); ++ ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, 0); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed when remove session.\n", smsession->name); ++ } ++ ++ ssam_set_session_be_freed(ctx); ++ ssam_send_event_async_done(ctx); ++ ++ return 0; ++} ++ ++static void ++ssam_bdev_remove_cb(void *remove_ctx) ++{ ++ struct spdk_ssam_session *smsession = remove_ctx; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ ++ SPDK_WARNLOG("%s: hot-removing bdev - all further requests will be stucked.\n", ++ smsession->name); ++ ++ ssam_send_event_to_session(smsession, ssam_session_bdev_remove_cb, ++ NULL, send_event_flag, NULL); ++} ++ ++static void ++ssam_session_bdev_resize_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ int rc; ++ ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, bsmsession->bdev->blockcnt); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed.\n", smsession->name); ++ } ++} ++ ++static void ++ssam_blk_resize_cb(void *resize_ctx) ++{ ++ struct spdk_ssam_session *smsession = resize_ctx; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ ++ ssam_send_event_to_session(smsession, NULL, ssam_session_bdev_resize_cb, send_event_flag, NULL); ++} ++ ++static void ++ssam_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, ++ void *event_ctx) ++{ ++ SPDK_DEBUGLOG(ssam_blk, "Bdev event: type %d, name %s\n", ++ type, bdev->name); ++ ++ switch (type) { ++ case SPDK_BDEV_EVENT_REMOVE: ++ SPDK_NOTICELOG("bdev name (%s) received event(SPDK_BDEV_EVENT_REMOVE)\n", ++ bdev->name); ++ ssam_bdev_remove_cb(event_ctx); ++ break; ++ case SPDK_BDEV_EVENT_RESIZE: ++ SPDK_NOTICELOG("bdev name (%s) received event(SPDK_BDEV_EVENT_RESIZE)\n", ++ bdev->name); ++ ssam_blk_resize_cb(event_ctx); ++ break; ++ default: ++ SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); ++ break; ++ } ++} ++ ++static void ++ssam_free_task_pool(struct spdk_ssam_blk_session *bsmsession) ++{ ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = NULL; ++ uint16_t max_queues = smsession->max_queues; ++ uint16_t i; ++ ++ if (max_queues > SPDK_SSAM_MAX_VQUEUES) { ++ return; ++ } ++ ++ for (i = 0; i < max_queues; i++) { ++ vq = &smsession->virtqueue[i]; ++ if (vq->tasks != NULL) { ++ spdk_free(vq->tasks); ++ vq->tasks = NULL; ++ } ++ ++ if (vq->index != NULL) { ++ spdk_free(vq->index); ++ vq->index = NULL; ++ } ++ } ++} ++ ++static int ++ssam_alloc_task_pool(struct spdk_ssam_blk_session *bsmsession) ++{ ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = NULL; ++ struct spdk_ssam_blk_task *task = NULL; ++ uint16_t max_queues = smsession->max_queues; ++ uint32_t task_cnt = smsession->queue_size; ++ uint16_t i; ++ uint32_t j; ++ ++ if ((max_queues > SPDK_SSAM_MAX_VQUEUES) || (max_queues == 0)) { ++ SPDK_ERRLOG("%s: max_queues %u invalid\n", smsession->name, max_queues); ++ return -EINVAL; ++ } ++ ++ if ((task_cnt == 0) || (task_cnt > SPDK_SSAM_MAX_VQ_SIZE)) { ++ SPDK_ERRLOG("%s: virtuque size %u invalid\n", smsession->name, task_cnt); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < max_queues; i++) { ++ vq = &smsession->virtqueue[i]; ++ vq->smsession = smsession; ++ vq->num = task_cnt; ++ vq->use_num = 0; ++ vq->index_l = 0; ++ vq->index_r = 0; ++ vq->tasks = spdk_zmalloc(sizeof(struct spdk_ssam_blk_task) * task_cnt, ++ SPDK_CACHE_LINE_SIZE, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ vq->index = spdk_zmalloc(sizeof(uint32_t) * task_cnt, ++ SPDK_CACHE_LINE_SIZE, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ if (vq->tasks == NULL || vq->index == NULL) { ++ SPDK_ERRLOG("%s: failed to allocate %"PRIu32" tasks for virtqueue %"PRIu16"\n", ++ smsession->name, task_cnt, i); ++ ssam_free_task_pool(bsmsession); ++ return -ENOMEM; ++ } ++ for (j = 0; j < task_cnt; j++) { ++ task = &((struct spdk_ssam_blk_task *)vq->tasks)[j]; ++ task->bsmsession = bsmsession; ++ task->task_idx = j; ++ vq->index[j] = j; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_blk_print_stuck_io_info(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_task *tasks; ++ struct spdk_ssam_blk_task *task; ++ int i, j; ++ ++ for (i = 0; i < smsession->max_queues; i++) { ++ for (j = 0; j < smsession->queue_size; j++) { ++ tasks = (struct spdk_ssam_blk_task *)smsession->virtqueue[i].tasks; ++ task = &tasks[j]; ++ if (task == NULL) { ++ continue; ++ } ++ if (task->used) { ++ SPDK_INFOLOG(ssam_blk, "%s: stuck io payload_size %u, vq_idx %u, req_idx %u\n", ++ smsession->name, task->payload_size, task->vq_idx, task->req_idx); ++ } ++ } ++ } ++} ++ ++static uint16_t ++get_req_idx(struct spdk_ssam_blk_task *task) ++{ ++ return task->io_req->req.cmd.virtio.req_idx; ++} ++ ++static void ++ssam_blk_task_init(struct spdk_ssam_blk_task *task) ++{ ++ task->used = true; ++ task->iovcnt = 0; ++ task->io_req = NULL; ++ task->payload_size = 0; ++ memset(&task->task_stat, 0, sizeof(task->task_stat)); ++ ssam_task_stat_tick(&task->task_stat.start_tsc); ++} ++ ++static void ++ssam_blk_task_finish(struct spdk_ssam_blk_task *task) ++{ ++ struct spdk_ssam_session *smsession = &task->bsmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[task->vq_idx]; ++ ++ if (smsession->task_cnt == 0) { ++ SPDK_ERRLOG("smsession %s: task internel error\n", smsession->name); ++ return; ++ } ++ ++ task->io_req = NULL; ++ task->payload_size = 0; ++ ++ if (task->iovs.virt.sges[0].iov_base != NULL) { ++ ssam_mempool_free(smsession->mp, task->iovs.virt.sges[0].iov_base); ++ task->iovs.virt.sges[0].iov_base = NULL; ++ } ++ ++ memset(&task->iovs, 0, sizeof(task->iovs)); ++ ++ task->iovcnt = 0; ++ smsession->task_cnt--; ++ task->used = false; ++ vq->index[vq->index_l] = task->task_idx; ++ vq->index_l = (vq->index_l + 1) & 0xFF; ++ vq->use_num--; ++} ++ ++static int ++ssam_blk_io_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, uint8_t status) ++{ ++ struct ssam_io_response io_resp; ++ struct ssam_virtio_res *virtio_res = (struct ssam_virtio_res *)&io_resp.data; ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ struct iovec io_vec; ++ uint8_t res_status = status; ++ int rc; ++ ++ if (status != VIRTIO_BLK_S_OK) { ++ SPDK_ERRLOG("ssam io complete return error tid=%u gfunc_id:%u.\n", smdev->tid, io_req->gfunc_id); ++ } ++ ++ memset(&io_resp, 0, sizeof(io_resp)); ++ io_resp.gfunc_id = io_req->gfunc_id; ++ io_resp.iocb_id = io_req->iocb_id; ++ io_resp.status = io_req->status; ++ io_resp.req = io_req; ++ io_resp.flr_seq = io_req->flr_seq; ++ ++ virtio_res->iovs = &io_vec; ++ virtio_res->iovs->iov_base = io_cmd->iovs[io_cmd->iovcnt - 1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[io_cmd->iovcnt - 1].iov_len; ++ virtio_res->iovcnt = 1; ++ virtio_res->rsp = &res_status; ++ virtio_res->rsp_len = sizeof(res_status); ++ ++ rc = ssam_io_complete(smdev->tid, &io_resp); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ ssam_dev_io_dec(smdev); ++ return 0; ++} ++ ++struct ssam_task_complete_arg { ++ struct spdk_ssam_blk_task *task; ++ uint8_t status; ++}; ++ ++static void ++ssam_task_complete_cb(void *arg) ++{ ++ struct ssam_task_complete_arg *cb_arg = (struct ssam_task_complete_arg *)arg; ++ struct spdk_ssam_session *smsession = &cb_arg->task->bsmsession->smsession; ++ struct spdk_ssam_blk_task *task = cb_arg->task; ++ int rc = ssam_blk_io_complete(smsession->smdev, task->io_req, cb_arg->status); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_task_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_task_stat_tick(&task->task_stat.complete_end_tsc); ++ ssam_blk_stat_statistics(task, cb_arg->status); ++ ssam_blk_task_finish(task); ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_task_complete(struct spdk_ssam_blk_task *task, uint8_t status) ++{ ++ struct spdk_ssam_session *smsession = &task->bsmsession->smsession; ++ if (status != VIRTIO_BLK_S_OK) { ++ SPDK_ERRLOG("ssam task return error tid=%u gfunc_id:%u.\n", ++ smsession->smdev->tid, task->io_req->gfunc_id); ++ } ++ SPDK_INFOLOG(ssam_blk_data, "handled io tid=%u gfunc_id=%u rw=%u vqid=%u reqid=%u status=%u.\n", ++ smsession->smdev->tid, smsession->gfunc_id, task->io_req->req.cmd.writable, ++ task->io_req->req.cmd.virtio.vq_idx, task->io_req->req.cmd.virtio.req_idx, status); ++ ssam_task_stat_tick(&task->task_stat.complete_start_tsc); ++ int rc = ssam_blk_io_complete(smsession->smdev, task->io_req, status); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_task_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_task_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->status = status; ++ cb_arg->task = task; ++ io_wait_r->cb_fn = ssam_task_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_task_stat_tick(&task->task_stat.complete_end_tsc); ++ ssam_blk_stat_statistics(task, status); ++ ssam_blk_task_finish(task); ++} ++ ++struct ssam_blk_dma_data_request_arg { ++ struct spdk_ssam_dev *smdev; ++ struct spdk_ssam_blk_task *task; ++ struct ssam_dma_request dma_req; ++}; ++ ++static void ++ssam_blk_dma_data_request_cb(void *arg) ++{ ++ struct ssam_blk_dma_data_request_arg *cb_arg = (struct ssam_blk_dma_data_request_arg *)arg; ++ int ret = ssam_dma_data_request(cb_arg->smdev->tid, &cb_arg->dma_req); ++ if (ret == -ENOMEM || ret == -EIO) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_blk_dma_data_request_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ if (ret < 0) { ++ SPDK_ERRLOG("%s: ssam dma data request failed:%s\n", ++ cb_arg->task->bsmsession->smsession.name, spdk_strerror(-ret)); ++ ssam_task_complete(cb_arg->task, VIRTIO_BLK_S_IOERR); ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_res_dma_process(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_blk_task *task, uint32_t type, uint8_t status) ++{ ++ struct ssam_dma_request dma_req = {0}; ++ uint16_t tid = smsession->smdev->tid; ++ int ret; ++ ++ ssam_data_request_para(&dma_req, task, type, status); ++ ssam_task_stat_tick(&task->task_stat.dma_start_tsc); ++ task->bsmsession->blk_stat.dma_count++; ++ ret = ssam_dma_data_request(tid, &dma_req); ++ if (ret == -ENOMEM || ret == -EIO) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_dma_data_request_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_dma_data_request_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->dma_req = dma_req; ++ cb_arg->task = task; ++ io_wait_r->cb_fn = ssam_blk_dma_data_request_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ++ if (ret < 0) { ++ SPDK_ERRLOG("%s: ssam dma data request failed:%s\n", smsession->name, spdk_strerror(-ret)); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ } ++} ++ ++static void ++ssam_blk_request_finish(bool success, struct spdk_ssam_blk_task *task) ++{ ++ uint8_t res_status = success ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR; ++ const struct virtio_blk_outhdr *req = NULL; ++ struct spdk_ssam_session *smsession = &task->bsmsession->smsession; ++ if (res_status != VIRTIO_BLK_S_OK) { ++ SPDK_ERRLOG("request finish return error gfunc_id=%u.\n", smsession->gfunc_id); ++ } ++ ++ req = (struct virtio_blk_outhdr *)task->io_req->req.cmd.header; ++ switch (req->type) { ++ case VIRTIO_BLK_T_IN: ++ case VIRTIO_BLK_T_GET_ID: ++ ssam_res_dma_process(smsession, task, SSAM_REQUEST_DATA_STORE, res_status); ++ break; ++ ++ case VIRTIO_BLK_T_OUT: ++ case VIRTIO_BLK_T_DISCARD: ++ case VIRTIO_BLK_T_WRITE_ZEROES: ++ case VIRTIO_BLK_T_FLUSH: ++ ssam_task_complete(task, res_status); ++ break; ++ ++ default: ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ SPDK_ERRLOG("Not supported request type '%"PRIu32"'.\n", req->type); ++ break; ++ } ++} ++ ++static void ++ssam_blk_req_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) ++{ ++ struct spdk_ssam_blk_task *task = cb_arg; ++ ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the task and bdev_io memory may have been released. ++ * Therefore, task and bdev_io are not released in this scenario. ++ */ ++ return; ++ } ++ ++ /* Second part start of read and write */ ++ SPDK_INFOLOG(ssam_blk_data, ++ "backend io finish tid=%u gfunc_id=%u rw=%u vqid=%u reqid=%u success=%d.\n", ++ task->bsmsession->smsession.smdev->tid, task->bsmsession->smsession.gfunc_id, ++ task->io_req->req.cmd.writable, task->io_req->req.cmd.virtio.vq_idx, ++ task->io_req->req.cmd.virtio.req_idx, ++ success); ++ task->bsmsession->bdev_count--; ++ task->bsmsession->blk_stat.bdev_complete_count++; ++ ssam_task_stat_tick(&task->task_stat.bdev_end_tsc); ++ ++ spdk_bdev_free_io(bdev_io); ++ ssam_blk_request_finish(success, task); ++} ++ ++static int ++ssam_request_rc_process(int rc, struct spdk_ssam_blk_task *task) ++{ ++ if (rc == 0) { ++ return rc; ++ } ++ ++ if (rc == -ENOMEM) { ++ SPDK_WARNLOG("No memory, start to queue io.\n"); ++ ssam_request_queue_io(task); ++ } else { ++ SPDK_ERRLOG("IO error, gfunc_id=%u.\n", task->bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ return rc; ++} ++ ++static bool ++ssam_is_req_sector_err(uint64_t sector) ++{ ++ if (sector > (UINT64_MAX / SECTOR_SIZE)) { ++ SPDK_ERRLOG("req sector out of range, need less or equal than %lu, actually %lu\n", ++ (UINT64_MAX / SECTOR_SIZE), sector); ++ return true; ++ } ++ ++ return false; ++} ++ ++static int ++ssam_virtio_read_write_process(struct spdk_ssam_blk_task *task, ++ const struct virtio_blk_outhdr *req) ++{ ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ struct ssam_io_message *io_cmd = NULL; ++ uint32_t payload_size = task->payload_size; ++ int rc; ++ ++ io_cmd = &task->io_req->req.cmd; ++ ++ if (ssam_is_req_sector_err(req->sector)) { ++ SPDK_ERRLOG("rw check sector error, gfunc_id=%u.\n", bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (spdk_unlikely(payload_size == 0 || (payload_size & (SECTOR_SIZE - 1)) != 0)) { ++ SPDK_ERRLOG("%s - passed IO buffer is not multiple of 512 Bytes (req_idx = %"PRIu16"), " ++ "payload_size = %u, iovcnt = %u.\n", req->type ? "WRITE" : "READ", ++ get_req_idx(task), payload_size, io_cmd->iovcnt); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ if (req->type == VIRTIO_BLK_T_IN) { ++ bsmsession->bdev_count++; ++ ssam_task_stat_tick(&task->task_stat.bdev_start_tsc); ++ rc = spdk_bdev_readv(bsmsession->bdev_desc, bsmsession->io_channel, ++ task->iovs.virt.sges, task->iovcnt, req->sector * SECTOR_SIZE, ++ payload_size, ssam_blk_req_complete_cb, task); ++ ssam_task_stat_tick(&task->task_stat.bdev_func_tsc); ++ } else if (!bsmsession->readonly) { ++ bsmsession->bdev_count++; ++ ssam_task_stat_tick(&task->task_stat.bdev_start_tsc); ++ rc = spdk_bdev_writev(bsmsession->bdev_desc, bsmsession->io_channel, ++ task->iovs.virt.sges, task->iovcnt, req->sector * SECTOR_SIZE, ++ payload_size, ssam_blk_req_complete_cb, task); ++ ssam_task_stat_tick(&task->task_stat.bdev_func_tsc); ++ } else { ++ SPDK_DEBUGLOG(ssam_blk, "Device is in read-only mode!\n"); ++ rc = -1; ++ } ++ ++ return ssam_request_rc_process(rc, task); ++} ++ ++static int ++ssam_virtio_discard_process(struct spdk_ssam_blk_task *task) ++{ ++ int rc; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ struct virtio_blk_discard_write_zeroes *desc = task->iovs.virt.sges[0].iov_base; ++ ++ if (ssam_is_req_sector_err(desc->sector)) { ++ SPDK_ERRLOG("discard check sector error, gfunc_id=%u.\n", bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (task->payload_size != sizeof(*desc)) { ++ SPDK_ERRLOG("Invalid discard payload size: %u\n", task->payload_size); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (desc->flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) { ++ SPDK_ERRLOG("UNMAP flag is only used for WRITE ZEROES command\n"); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ bsmsession->bdev_count++; ++ rc = spdk_bdev_unmap(bsmsession->bdev_desc, bsmsession->io_channel, ++ desc->sector * SECTOR_SIZE, desc->num_sectors * SECTOR_SIZE, ++ ssam_blk_req_complete_cb, task); ++ ++ return ssam_request_rc_process(rc, task); ++} ++ ++static int ++ssam_virtio_write_zeroes_process(struct spdk_ssam_blk_task *task) ++{ ++ int rc; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ struct virtio_blk_discard_write_zeroes *desc = task->iovs.virt.sges[0].iov_base; ++ ++ if (ssam_is_req_sector_err(desc->sector)) { ++ SPDK_ERRLOG("write zeros check sector error, gfunc_id=%u.\n", bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (task->payload_size != sizeof(*desc)) { ++ SPDK_NOTICELOG("Invalid write zeroes payload size: %u\n", task->payload_size); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (desc->flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) { ++ SPDK_WARNLOG("Ignore the unmap flag for WRITE ZEROES from %"PRIx64", len %"PRIx64"\n", ++ (uint64_t)desc->sector * SECTOR_SIZE, (uint64_t)desc->num_sectors * SECTOR_SIZE); ++ } ++ bsmsession->bdev_count++; ++ rc = spdk_bdev_write_zeroes(bsmsession->bdev_desc, bsmsession->io_channel, ++ desc->sector * SECTOR_SIZE, desc->num_sectors * SECTOR_SIZE, ssam_blk_req_complete_cb, task); ++ ++ return ssam_request_rc_process(rc, task); ++} ++ ++static int ++ssam_virtio_flush_process(struct spdk_ssam_blk_task *task, ++ const struct virtio_blk_outhdr *req) ++{ ++ int rc; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ uint64_t blockcnt = spdk_bdev_get_num_blocks(bsmsession->bdev); ++ uint32_t blocklen = spdk_bdev_get_block_size(bsmsession->bdev); ++ uint64_t flush_bytes; ++ ++ if (blocklen == 0) { ++ SPDK_ERRLOG("bdev's blocklen %u error.\n", blocklen); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ if (req->sector != 0) { ++ SPDK_ERRLOG("sector must be zero for flush command\n"); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (blockcnt > (UINT64_MAX / blocklen)) { ++ SPDK_ERRLOG("bdev's blockcnt %lu or blocklen %u out of range.\n", ++ blockcnt, blocklen); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ flush_bytes = blockcnt * blocklen; ++ bsmsession->bdev_count++; ++ rc = spdk_bdev_flush(bsmsession->bdev_desc, bsmsession->io_channel, ++ 0, flush_bytes, ssam_blk_req_complete_cb, task); ++ ++ return ssam_request_rc_process(rc, task); ++} ++ ++static int ++ssam_virtio_get_id_process(struct spdk_ssam_blk_task *task) ++{ ++ uint32_t used_length; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ ++ if (task->iovcnt == 0 || task->payload_size == 0) { ++ SPDK_ERRLOG("check task param error, gfunc_id=%u.\n", bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ used_length = spdk_min((size_t)VIRTIO_BLK_ID_BYTES, task->iovs.virt.sges[0].iov_len); ++ if (bsmsession->serial == NULL) { ++ spdk_strcpy_pad(task->iovs.virt.sges[0].iov_base, spdk_bdev_get_product_name(bsmsession->bdev), ++ used_length, ' '); ++ } else { ++ spdk_strcpy_pad(task->iovs.virt.sges[0].iov_base, bsmsession->serial, ++ used_length, ' '); ++ } ++ bsmsession->blk_stat.bdev_complete_count++; ++ ssam_blk_request_finish(true, task); ++ ++ return 0; ++} ++ ++static int ++ssam_io_process(struct spdk_ssam_blk_task *task, const struct virtio_blk_outhdr *req) ++{ ++ int rc; ++ SPDK_INFOLOG(ssam_blk_data, ++ "backend io start tid=%u gfunc_id=%u reqtype=%d rw=%u vqid=%u reqid=%u offset=%llu length=%u.\n", ++ task->bsmsession->smsession.smdev->tid, task->bsmsession->smsession.gfunc_id, req->type, ++ task->io_req->req.cmd.writable, task->io_req->req.cmd.virtio.vq_idx, ++ task->io_req->req.cmd.virtio.req_idx, ++ req->sector * SECTOR_SIZE, task->payload_size); ++ task->bsmsession->blk_stat.bdev_count++; ++ switch (req->type) { ++ case VIRTIO_BLK_T_IN: ++ case VIRTIO_BLK_T_OUT: ++ rc = ssam_virtio_read_write_process(task, req); ++ break; ++ case VIRTIO_BLK_T_DISCARD: ++ rc = ssam_virtio_discard_process(task); ++ break; ++ case VIRTIO_BLK_T_WRITE_ZEROES: ++ rc = ssam_virtio_write_zeroes_process(task); ++ break; ++ case VIRTIO_BLK_T_FLUSH: ++ rc = ssam_virtio_flush_process(task, req); ++ break; ++ case VIRTIO_BLK_T_GET_ID: ++ rc = ssam_virtio_get_id_process(task); ++ break; ++ default: ++ SPDK_ERRLOG("Not supported request type '%"PRIu32"'.\n", req->type); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ return rc; ++} ++ ++static int ++ssam_process_blk_request(struct spdk_ssam_blk_task *task) ++{ ++ int ret; ++ struct iovec *iov = NULL; ++ const struct virtio_blk_outhdr *req = NULL; ++ struct ssam_io_message *io_cmd = NULL; ++ ++ io_cmd = &task->io_req->req.cmd; ++ /* get req header */ ++ if (spdk_unlikely(io_cmd->iovs[0].iov_len != sizeof(*req))) { ++ SPDK_ERRLOG("First descriptor size is %zu but expected %zu (req_idx = %"PRIu16").\n", ++ io_cmd->iovs[0].iov_len, sizeof(*req), get_req_idx(task)); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ req = (struct virtio_blk_outhdr *)io_cmd->header; ++ /* get req tail */ ++ iov = &io_cmd->iovs[io_cmd->iovcnt - 1]; ++ if (spdk_unlikely(iov->iov_len != 1)) { ++ SPDK_ERRLOG("Last descriptor size is %zu but expected %d (req_idx = %"PRIu16").\n", ++ iov->iov_len, 1, get_req_idx(task)); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ ret = ssam_io_process(task, req); ++ if (ret < 0) { ++ SPDK_ERRLOG("ssam io process failed(%d)\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_get_payload_size(struct ssam_request *io_req, uint32_t *payload_size) ++{ ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ uint32_t payload = 0; ++ uint32_t i; ++ ++ for (i = 1; i < io_cmd->iovcnt - 1; i++) { ++ if (spdk_unlikely((UINT32_MAX - io_cmd->iovs[i].iov_len) < payload)) { ++ SPDK_ERRLOG("payload size overflow\n"); ++ return -1; ++ } ++ payload += io_cmd->iovs[i].iov_len; ++ } ++ ++ if (spdk_unlikely(payload > PAYLOAD_SIZE_MAX)) { ++ SPDK_ERRLOG("payload size larger than %u, payload_size = %u\n", ++ PAYLOAD_SIZE_MAX, payload); ++ return -1; ++ } ++ ++ *payload_size = payload; ++ ++ return 0; ++} ++ ++static int ++ssam_task_iovs_memory_get(struct spdk_ssam_blk_task *task) ++{ ++ struct ssam_mempool *mp = task->bsmsession->smsession.mp; ++ void *buffer = NULL; ++ uint64_t phys_addr = 0; ++ ++ if (task->payload_size == 0) { ++ /* request type of VIRTIO_BLK_T_FLUSH does not have payload */ ++ task->iovs.virt.sges[0].iov_base = NULL; ++ return 0; ++ } ++ ++ task->iovs.virt.sges[0].iov_base = NULL; ++ task->iovs.phys.sges[0].iov_base = NULL; ++ task->iovs.virt.sges[0].iov_len = task->payload_size; ++ task->iovs.phys.sges[0].iov_len = task->payload_size; ++ task->iovcnt = 1; ++ ++ buffer = ssam_mempool_alloc(mp, task->payload_size, &phys_addr); ++ if (spdk_unlikely(buffer == NULL)) { ++ return -ENOMEM; ++ } ++ ++ /* ssam request max IO size is PAYLOAD_SIZE_MAX, only use one iov to save data */ ++ task->iovs.virt.sges[0].iov_base = buffer; ++ task->iovs.phys.sges[0].iov_base = (void *)phys_addr; ++ ++ return 0; ++} ++ ++static void ++ssam_data_request_para(struct ssam_dma_request *dma_req, struct spdk_ssam_blk_task *task, ++ uint32_t type, uint8_t status) ++{ ++ struct ssam_io_message *io_cmd = NULL; ++ struct spdk_ssam_dma_cb dma_cb = { ++ .status = status, ++ .req_dir = type, ++ .gfunc_id = task->io_req->gfunc_id, ++ .vq_idx = task->vq_idx, ++ .task_idx = task->task_idx ++ }; ++ ++ io_cmd = &task->io_req->req.cmd; ++ dma_req->cb = (void *) * (uint64_t *)&dma_cb; ++ dma_req->gfunc_id = task->io_req->gfunc_id; ++ dma_req->flr_seq = task->io_req->flr_seq; ++ dma_req->direction = type; ++ dma_req->data_len = task->payload_size; ++ if (type == SSAM_REQUEST_DATA_STORE) { ++ dma_req->src = task->iovs.phys.sges; ++ dma_req->src_num = task->iovcnt; ++ dma_req->dst = &io_cmd->iovs[1]; ++ /* dma data iovs does not contain header and tail */ ++ dma_req->dst_num = io_cmd->iovcnt - IOV_HEADER_TAIL_NUM; ++ } else if (type == SSAM_REQUEST_DATA_LOAD) { ++ dma_req->src = &io_cmd->iovs[1]; ++ /* dma data iovs does not contain header and tail */ ++ dma_req->src_num = io_cmd->iovcnt - IOV_HEADER_TAIL_NUM; ++ dma_req->dst = task->iovs.phys.sges; ++ dma_req->dst_num = task->iovcnt; ++ } ++} ++ ++static void ++ssam_request_dma_process(struct spdk_ssam_session *smsession, struct spdk_ssam_blk_task *task) ++{ ++ struct virtio_blk_outhdr *req = NULL; ++ int ret; ++ ++ req = (struct virtio_blk_outhdr *)task->io_req->req.cmd.header; ++ SPDK_INFOLOG(ssam_blk_data, ++ "request dma request io tid=%u gfunc_id=%u reqtype=%d rw=%u vqid=%u reqid=%u.\n", ++ smsession->smdev->tid, smsession->gfunc_id, req->type, task->io_req->req.cmd.writable, ++ task->io_req->req.cmd.virtio.vq_idx, task->io_req->req.cmd.virtio.req_idx); ++ ++ switch (req->type) { ++ case VIRTIO_BLK_T_IN: ++ case VIRTIO_BLK_T_GET_ID: ++ case VIRTIO_BLK_T_FLUSH: ++ ret = ssam_process_blk_request(task); ++ if (ret < 0) { ++ SPDK_ERRLOG("====== Task: req_idx %u failed ======\n", task->req_idx); ++ } ++ break; ++ ++ case VIRTIO_BLK_T_OUT: ++ case VIRTIO_BLK_T_DISCARD: ++ case VIRTIO_BLK_T_WRITE_ZEROES: ++ /* dma request: Host -> ipu */ ++ ssam_res_dma_process(smsession, task, SSAM_REQUEST_DATA_LOAD, 0); ++ break; ++ ++ default: ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ SPDK_ERRLOG("Not supported request type '%"PRIu32"'.\n", req->type); ++ } ++} ++ ++struct ssam_blk_io_complete_arg { ++ struct spdk_ssam_dev *smdev; ++ struct ssam_request *io_req; ++}; ++ ++static void ++ssam_blk_io_complete_cb(void *arg) ++{ ++ struct ssam_blk_io_complete_arg *cb_arg = (struct ssam_blk_io_complete_arg *)arg; ++ int rc = ssam_blk_io_complete(cb_arg->smdev, cb_arg->io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_process_blk_task(struct spdk_ssam_session *smsession, struct ssam_request *io_req, ++ uint16_t vq_idx, uint16_t req_idx, uint32_t payload_size) ++{ ++ int rc; ++ struct spdk_ssam_blk_task *task = NULL; ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[vq_idx]; ++ ++ if (spdk_unlikely(vq->use_num >= vq->num)) { ++ SPDK_ERRLOG("Session:%s vq(%hu) task_cnt(%u) limit(%u).\n", smsession->name, vq_idx, vq->use_num, ++ vq->num); ++ rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ } ++ return; ++ } ++ ++ uint32_t index = vq->index[vq->index_r]; ++ task = &((struct spdk_ssam_blk_task *)vq->tasks)[index]; ++ if (spdk_unlikely(task->used)) { ++ SPDK_ERRLOG("%s: vq(%u) task with idx %u is already pending.\n", smsession->name, vq_idx, index); ++ rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ } ++ return; ++ } ++ ++ smsession->task_cnt++; ++ vq->index_r = (vq->index_r + 1) & 0xFF; ++ vq->use_num++; ++ ++ ssam_blk_task_init(task); ++ task->io_req = io_req; ++ task->vq_idx = vq_idx; ++ task->req_idx = req_idx; ++ task->payload_size = payload_size; ++ task->session_io_wait.cb_fn = ssam_session_io_resubmit; ++ task->session_io_wait.cb_arg = task; ++ ++ rc = ssam_task_iovs_memory_get(task); ++ if (rc != 0) { ++ ssam_session_insert_io_wait(smsession, &task->session_io_wait); ++ return; ++ } ++ ++ ssam_request_dma_process(smsession, task); ++ return; ++} ++ ++static void ++ssam_process_vq(struct spdk_ssam_session *smsession, struct ssam_request *io_req) ++{ ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ uint16_t vq_idx = io_cmd->virtio.vq_idx; ++ uint16_t req_idx = io_cmd->virtio.req_idx; ++ uint32_t payload_size = 0; ++ int rc; ++ ++ if (vq_idx >= smsession->max_queues) { ++ SPDK_ERRLOG("vq_idx out of range, need less than %u, actually %u\n", ++ smsession->max_queues, vq_idx); ++ goto err; ++ } ++ ++ if (io_req->status != SSAM_IO_STATUS_OK) { ++ SPDK_WARNLOG("%s: ssam request status invalid, but still process, status=%d\n", ++ smsession->name, io_req->status); ++ goto err; ++ } ++ ++ rc = ssam_get_payload_size(io_req, &payload_size); ++ if (rc != 0) { ++ goto err; ++ } ++ ++ ssam_process_blk_task(smsession, io_req, vq_idx, req_idx, payload_size); ++ return; ++ ++err: ++ rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ } ++ return; ++} ++ ++static void ++ssam_no_bdev_put_io_channel(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ if (smsession->task_cnt == 0 && (bsmsession->io_channel != NULL)) { ++ spdk_put_io_channel(bsmsession->io_channel); ++ bsmsession->io_channel = NULL; ++ } ++} ++ ++struct ssam_no_bdev_process_vq_arg { ++ struct spdk_ssam_session *smsession; ++ struct ssam_request *io_req; ++}; ++ ++static void ++ssam_no_bdev_process_vq_cb(void *arg) ++{ ++ struct ssam_no_bdev_process_vq_arg *cb_arg = (struct ssam_no_bdev_process_vq_arg *)arg; ++ int rc = ssam_blk_io_complete(cb_arg->smsession->smdev, cb_arg->io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_no_bdev_process_vq_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_no_bdev_put_io_channel(cb_arg->smsession); ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_no_bdev_process_vq(struct spdk_ssam_session *smsession, struct ssam_request *io_req) ++{ ++ SPDK_ERRLOG("gfunc_id %u No bdev, aborting request, return EIO\n", io_req->gfunc_id); ++ int rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_no_bdev_process_vq_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_no_bdev_process_vq_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smsession = smsession; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_no_bdev_process_vq_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ++ ssam_no_bdev_put_io_channel(smsession); ++} ++ ++static void ++ssam_blk_response_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct ssam_dma_rsp *dma_rsp = (struct ssam_dma_rsp *)arg; ++ struct spdk_ssam_dma_cb *dma_cb = (struct spdk_ssam_dma_cb *)&dma_rsp->cb; ++ struct spdk_ssam_blk_task *task = NULL; ++ uint16_t vq_idx = dma_cb->vq_idx; ++ uint16_t task_idx = dma_cb->task_idx; ++ uint8_t req_dir = dma_cb->req_dir; ++ ++ if (vq_idx >= smsession->max_queues) { ++ smsession->smdev->discard_io_num++; ++ SPDK_ERRLOG("vq_idx out of range, need less than %u, actually %u\n", ++ smsession->max_queues, vq_idx); ++ return; ++ } ++ ++ task = &((struct spdk_ssam_blk_task *)smsession->virtqueue[vq_idx].tasks)[task_idx]; ++ if (dma_rsp->status != 0) { ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ SPDK_ERRLOG("dma data process failed!\n"); ++ return; ++ } ++ if (dma_rsp->last_flag == 0) { ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ SPDK_ERRLOG("last_flag should not equal 0!\n"); ++ return; ++ } ++ ssam_task_stat_tick(&task->task_stat.dma_end_tsc); ++ task->bsmsession->blk_stat.dma_complete_count++; ++ if (req_dir == SSAM_REQUEST_DATA_LOAD) { ++ /* Write data ready, start a request to backend */ ++ ssam_process_blk_request(task); ++ } else if (req_dir == SSAM_REQUEST_DATA_STORE) { ++ /* Data have been read by user, complete the task */ ++ ssam_task_complete(task, dma_cb->status); ++ } ++} ++ ++static int ++ssam_blk_check_io_req(struct spdk_ssam_dev *smdev, struct ssam_request *io_req) ++{ ++ struct ssam_io_message *io_cmd = NULL; ++ uint16_t vq_idx; ++ uint16_t req_idx; ++ const struct virtio_blk_outhdr *req = NULL; ++ ++ if (io_req == NULL) { ++ SPDK_ERRLOG("%s: received a NULL IO message\n", smdev->name); ++ return -1; ++ } ++ ++ io_cmd = &io_req->req.cmd; ++ vq_idx = io_cmd->virtio.vq_idx; ++ req_idx = io_cmd->virtio.req_idx; ++ req = (struct virtio_blk_outhdr *)io_cmd->header; ++ ++ if (io_cmd->iovs == NULL) { ++ SPDK_ERRLOG("%s: received an empty IO, vq_idx:%u, req_idx:%u\n", ++ smdev->name, vq_idx, req_idx); ++ return -1; ++ } ++ ++ if (io_cmd->iovcnt < IOV_HEADER_TAIL_NUM) { ++ SPDK_ERRLOG("%s: iovcnt %u less than %d but expected not less than %d\n", ++ smdev->name, io_cmd->iovcnt, IOV_HEADER_TAIL_NUM, IOV_HEADER_TAIL_NUM); ++ return -1; ++ } ++ ++ if ((io_cmd->iovcnt == IOV_HEADER_TAIL_NUM) && (req->type != VIRTIO_BLK_T_FLUSH)) { ++ SPDK_ERRLOG("%s: received an IO not contain valid data, iovcnt:%u, vq_idx:%u, " ++ "req_idx:%u, req_type:%u, req_ioprio:%u, req_sector:%llu\n", ++ smdev->name, io_cmd->iovcnt, vq_idx, req_idx, req->type, req->ioprio, req->sector); ++ return -1; ++ } ++ ++ if (io_cmd->iovcnt > (SPDK_SSAM_IOVS_MAX + IOV_HEADER_TAIL_NUM)) { ++ SPDK_ERRLOG("%s: received too much IO, iovcnt:%u, vq_idx:%u, req_idx:%u\n", ++ smdev->name, io_cmd->iovcnt, vq_idx, req_idx); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_blk_request_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct spdk_ssam_dev *smdev = smsession->smdev; ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ struct ssam_request *io_req = (struct ssam_request *)arg; ++ int ret; ++ ++ smdev->io_num++; ++ bsmsession->blk_stat.start_count++; ++ ++ ret = ssam_blk_check_io_req(smdev, io_req); ++ if (ret < 0) { ++ smdev->discard_io_num++; ++ return; ++ } ++ ++ if (bsmsession->no_bdev || bsmsession->io_channel == NULL) { ++ ssam_no_bdev_process_vq(smsession, io_req); ++ } else { ++ ssam_process_vq(smsession, io_req); ++ } ++} ++ ++static void ++ssam_blk_no_data_request_worker(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ ++ bsmsession = ssam_to_blk_session(smsession); ++ if (bsmsession->no_bdev) { ++ ssam_no_bdev_put_io_channel(smsession); ++ } ++} ++ ++static void ++ssam_blk_destroy_bdev_device(struct spdk_ssam_session *smsession, void *args) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ spdk_bdev_close(bsmsession->bdev_desc); ++ ++ /* free taskpool */ ++ ssam_free_task_pool(bsmsession); ++ ++ /* free */ ++ free(bsmsession); ++} ++ ++static void ++ssam_request_resubmit(void *arg) ++{ ++ struct spdk_ssam_blk_task *task = (struct spdk_ssam_blk_task *)arg; ++ int rc; ++ ++ rc = ssam_process_blk_request(task); ++ if (rc == 0) { ++ SPDK_DEBUGLOG(ssam_blk_data, "====== Task: req_idx = %"PRIu16" resubmitted ======\n", ++ get_req_idx(task)); ++ } else { ++ SPDK_WARNLOG("====== Task: req_idx = %"PRIu16" failed ======\n", get_req_idx(task)); ++ } ++} ++ ++static inline void ++ssam_request_queue_io(struct spdk_ssam_blk_task *task) ++{ ++ int rc; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ ++ task->bdev_io_wait.bdev = bsmsession->bdev; ++ task->bdev_io_wait.cb_fn = ssam_request_resubmit; ++ task->bdev_io_wait.cb_arg = task; ++ ++ rc = spdk_bdev_queue_io_wait(bsmsession->bdev, bsmsession->io_channel, &task->bdev_io_wait); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: failed to queue I/O, rc=%d\n", bsmsession->smsession.name, rc); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ } ++} ++ ++static void ++ssam_session_io_resubmit(void *arg) ++{ ++ struct spdk_ssam_blk_task *task = (struct spdk_ssam_blk_task *)arg; ++ struct spdk_ssam_session *smsession = &task->bsmsession->smsession; ++ int rc; ++ ++ rc = ssam_task_iovs_memory_get(task); ++ if (rc != 0) { ++ ssam_session_insert_io_wait(smsession, &task->session_io_wait); ++ return; ++ } ++ ssam_request_dma_process(smsession, task); ++} ++ ++static void ++ssam_blk_start_post_cb(struct spdk_ssam_session *smsession, void **arg) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ int rc; ++ ++ smsession->started = true; ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, bsmsession->bdev->blockcnt); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed.\n", smsession->name); ++ } ++ ++ rc = ssam_mount_normal(smsession, 0); ++ if (rc != SSAM_MOUNT_OK) { ++ SPDK_WARNLOG("%s: mount ssam volume failed\n", smsession->name); ++ } ++ ++ /* Smdev poller is not created here, but is created in the initialization process. */ ++ SPDK_NOTICELOG("BLK controller %s created with bdev %s, queues %u\n", ++ smsession->name, spdk_bdev_get_name(bsmsession->bdev), smsession->max_queues); ++} ++ ++static int ++ssam_blk_start_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ if (bsmsession->bdev == NULL) { ++ SPDK_ERRLOG("%s: session not have a bdev.\n", smsession->name); ++ return -ENODEV; ++ } ++ ++ bsmsession->io_channel = spdk_bdev_get_io_channel(bsmsession->bdev_desc); ++ if (bsmsession->io_channel == NULL) { ++ ssam_free_task_pool(bsmsession); ++ SPDK_ERRLOG("%s: I/O channel allocation failed\n", smsession->name); ++ return -ENODEV; ++ } ++ ++ ssam_session_start_done(smsession, 0); ++ ++ ssam_send_event_async_done(ctx); ++ ++ return 0; ++} ++ ++static int ++ssam_blk_start(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = true, ++ .need_rsp = true, ++ }; ++ int rc = ssam_alloc_task_pool(bsmsession); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: failed to alloc task pool.\n", smsession->name); ++ return rc; ++ } ++ return ssam_send_event_to_session(smsession, ssam_blk_start_cb, ssam_blk_start_post_cb, ++ send_event_flag, NULL); ++} ++ ++static void ++ssam_blk_destroy_session(struct ssam_blk_session_ctx *ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ctx->bsmsession; ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ ++ if (smsession->task_cnt > 0) { ++ return; ++ } ++ ++ /* If in ssam subsystem finish process, session registered flag will ++ * be set to false first, bdev will be removed in ssam_bdev_remove_cb() ++ * call back process, wating for the call back process finish first. ++ */ ++ if ((smsession->registered == false) && (bsmsession->bdev != NULL)) { ++ return; ++ } ++ ++ SPDK_NOTICELOG("%s: removing on lcore %d\n", ++ smsession->name, spdk_env_get_current_core()); ++ ++ ssam_session_destroy(smsession); ++ ++ if (bsmsession->io_channel != NULL) { ++ spdk_put_io_channel(bsmsession->io_channel); ++ bsmsession->io_channel = NULL; ++ } ++ ssam_free_task_pool(bsmsession); ++ ++ if (bsmsession->serial != NULL) { ++ free(bsmsession->serial); ++ } ++ spdk_poller_unregister(&bsmsession->stop_poller); ++ ++ ssam_session_stop_done(smsession, 0, ctx->user_ctx); ++ free(ctx); ++ ++ return; ++} ++ ++static int ++ssam_destroy_session_poller_cb(void *arg) ++{ ++ struct ssam_blk_session_ctx *ctx = arg; ++ ++ if (ssam_trylock() != 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ ssam_blk_destroy_session(ctx); ++ ++ ssam_unlock(); ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static int ++ssam_blk_stop_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ smsession->started = false; ++ ++ struct ssam_blk_session_ctx *_ctx = ++ (struct ssam_blk_session_ctx *)calloc(1, sizeof(struct ssam_blk_session_ctx)); ++ ++ if (_ctx == NULL) { ++ SPDK_ERRLOG("%s: calloc blk session ctx error.\n", smsession->name); ++ return -ENOMEM; ++ } ++ ++ _ctx->bsmsession = bsmsession; ++ _ctx->user_ctx = ctx; ++ ++ bsmsession->stop_poller = SPDK_POLLER_REGISTER(ssam_destroy_session_poller_cb, ++ _ctx, SESSION_STOP_POLLER_PERIOD); ++ if (bsmsession->stop_poller == NULL) { ++ SPDK_WARNLOG("%s: ssam_destroy_session_poller_cb start failed.\n", smsession->name); ++ ssam_session_stop_done(smsession, -EBUSY, ctx); ++ free(_ctx); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_blk_stop(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = true, ++ .need_rsp = true, ++ }; ++ return ssam_send_event_to_session(smsession, ssam_blk_stop_cb, ssam_blk_stop_cpl_cb, ++ send_event_flag, NULL); ++} ++ ++static int ++ssam_blk_remove_session(struct spdk_ssam_session *smsession) ++{ ++ SPDK_NOTICELOG("session gfunc_id=%u removing\n", smsession->gfunc_id); ++ int ret = ssam_blk_stop(smsession); ++ if ((ret != 0) && (smsession->registered == true)) { ++ (void)ssam_remount_normal(smsession, 0); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++const char * ++ssam_get_bdev_name_by_gfunc_id(uint16_t gfunc_id) ++{ ++ struct spdk_ssam_session *smsession; ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ return NULL; ++ } ++ bsmsession = ssam_to_blk_session(smsession); ++ ++ return spdk_bdev_get_name(bsmsession->bdev); ++} ++ ++struct spdk_bdev * ++ssam_get_session_bdev(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ return bsmsession->bdev; ++} ++ ++int ++ssam_blk_construct(struct spdk_ssam_session_reg_info *info, const char *dev_name, ++ bool readonly, char *serial) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ uint32_t session_ctx_size = sizeof(struct spdk_ssam_blk_session) - ++ sizeof(struct spdk_ssam_session); ++ uint16_t tid; ++ int ret = 0; ++ int rc; ++ ++ ssam_lock(); ++ ++ tid = ssam_get_tid(); ++ if (tid == SPDK_INVALID_TID) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ info->tid = tid; ++ info->backend = &g_ssam_blk_session_backend; ++ info->session_ctx_size = session_ctx_size; ++ snprintf(info->type_name, SPDK_SESSION_TYPE_MAX_LEN, "%s", SPDK_SESSION_TYPE_BLK); ++ ret = ssam_session_register(info, &smsession); ++ if (ret != 0) { ++ goto out; ++ } ++ ++ bsmsession = ssam_to_blk_session(smsession); ++ ++ ret = spdk_bdev_open_ext(dev_name, true, ssam_bdev_event_cb, smsession, ++ &bsmsession->bdev_desc); ++ if (ret != 0) { ++ SPDK_ERRLOG("function id %d: could not open bdev, error:%s\n", info->gfunc_id, spdk_strerror(-ret)); ++ goto out; ++ } ++ bdev = spdk_bdev_desc_get_bdev(bsmsession->bdev_desc); ++ bsmsession->bdev = bdev; ++ bsmsession->readonly = readonly; ++ ++ if (serial == NULL) { ++ SPDK_INFOLOG(ssam_blk, "function id %d: not set volume id.\n", info->gfunc_id); ++ } else { ++ bsmsession->serial = calloc(SERIAL_STRING_LEN, sizeof(char)); ++ if (!bsmsession->serial) { ++ SPDK_ERRLOG("no memory for alloc.\n"); ++ goto out; ++ } ++ (void)snprintf(bsmsession->serial, SERIAL_STRING_LEN, "%s", serial); ++ } ++ ++ ret = ssam_blk_start(smsession); ++ if (ret != 0) { ++ SPDK_ERRLOG("%s: start failed\n", smsession->name); ++ goto out; ++ } ++ ++ SPDK_INFOLOG(ssam_blk, "function id %d: using bdev '%s'\n", info->gfunc_id, dev_name); ++out: ++ if ((ret != 0) && (smsession != NULL) && (smsession->smdev != NULL)) { ++ ssam_session_unreg_response_cb(smsession); ++ rc = ssam_session_unregister(smsession); ++ if (rc != 0) { ++ SPDK_ERRLOG("function id %d: blk construct failed and session remove failed, ret=%d\n", ++ info->gfunc_id, ret); ++ } ++ } ++ ssam_unlock(); ++ return ret; ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_blk) ++SPDK_LOG_REGISTER_COMPONENT(ssam_blk_data) +diff --git a/lib/ssam/ssam_config.c b/lib/ssam/ssam_config.c +new file mode 100644 +index 0000000..06ac36d +--- /dev/null ++++ b/lib/ssam/ssam_config.c +@@ -0,0 +1,549 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++ ++#include "spdk/string.h" ++#include "spdk/file.h" ++#include "ssam_internal.h" ++ ++/* dma queue must be 1 with 15 core */ ++#define SSAM_DEFAULT_DMA_QUEUE_NUM 1 ++#define SSAM_DEFAULT_MEMPOOL_SIZE 1024 ++#define SSAM_DEFAULT_FS_MEMPOOL_SIZE 256 ++#define SSAM_MAX_DMA_QUEUE_NUM 4 ++#define SPDK_SSAM_VIRTIO_BLK_DEFAULT_FEATURE 0x3f11001046 ++#define SPDK_SSAM_VIRTIO_SCSI_DEFAULT_FEATURE 0x3f11000007 ++#define SPDK_SSAM_VIRTIO_FS_DEFAULT_FEATURE 0x3f19000000 ++ ++struct ssam_user_config { ++ uint32_t mempool_size; ++ uint32_t queues; ++ uint32_t extra_size; ++ uint32_t dma_queue_num; ++ uint32_t fs_mempool_size; ++}; ++ ++struct ssam_config { ++ struct ssam_user_config user_config; ++ struct ssam_hostep_info ep_info; ++ uint32_t core_num; ++ bool shm_created; ++ bool virtio_fs_enable; ++}; ++ ++struct ssam_fs_config { ++ uint16_t queue_id; ++ ssam_mempool_t *mp; ++}; ++ ++static struct ssam_config g_ssam_config; ++ ++static int ++ssam_heap_malloc(const char *type, size_t size, int socket_arg, ++ unsigned int flags, size_t align, size_t bound, bool contig, struct ssam_melem *mem) ++{ ++ void *addr = NULL; ++ unsigned long long pg_size; ++ int socket_id; ++ int rc; ++ uint64_t iova; ++ ++ addr = rte_malloc_socket(type, size, align, socket_arg); ++ if (addr == NULL) { ++ return -ENOMEM; ++ } ++ ++ rc = ssam_malloc_elem_from_addr(addr, &pg_size, &socket_id); ++ if (rc != 0) { ++ ssam_free_ex(addr); ++ return -ENOMEM; ++ } ++ ++ iova = rte_malloc_virt2iova(addr); ++ if (iova == RTE_BAD_IOVA) { ++ ssam_free_ex(addr); ++ return -ENOMEM; ++ } ++ ++ mem->addr = addr; ++ mem->iova = iova; ++ mem->page_sz = pg_size; ++ mem->socket_id = socket_id; ++ return 0; ++} ++ ++static int ++ssam_heap_free(void *addr) ++{ ++ return ssam_free_ex(addr); ++} ++ ++static void ++ssam_get_ssam_lib_init_config(struct ssam_lib_args *cfg) ++{ ++ uint32_t core_num = g_ssam_config.core_num; ++ ++ cfg->role = 0; ++ cfg->dma_queue_num = g_ssam_config.user_config.dma_queue_num; ++ cfg->ssam_heap_malloc = ssam_heap_malloc; ++ cfg->ssam_heap_free = ssam_heap_free; ++ ++ /* The number of tid is 1 greater than the number of cores. */ ++ cfg->core_num = core_num; ++} ++ ++void spdk_ssam_set_shm_created(bool shm_created) ++{ ++ g_ssam_config.shm_created = shm_created; ++} ++ ++bool spdk_ssam_get_shm_created(void) ++{ ++ return g_ssam_config.shm_created; ++} ++ ++int ++ssam_set_core_num(uint32_t core_num) ++{ ++ if (core_num > SSAM_MAX_CORE_NUM) { ++ SPDK_ERRLOG("Invalid coremask, total cores need less or equal than %d, " ++ "actually %u, please check startup item.\n", ++ SSAM_MAX_CORE_NUM, core_num); ++ return -EINVAL; ++ } ++ if (g_ssam_config.user_config.dma_queue_num == SSAM_MAX_DMA_QUEUE_NUM ++ && core_num > SSAM_MAX_CORE_NUM_WITH_LARGE_IO) { ++ SPDK_ERRLOG("Invalid coremask, total cores need less or equal than %d, " ++ "actually %u, please check startup item.\n", ++ SSAM_MAX_CORE_NUM_WITH_LARGE_IO, core_num); ++ return -EINVAL; ++ } ++ g_ssam_config.core_num = core_num; ++ return 0; ++} ++ ++uint16_t ++ssam_get_core_num(void) ++{ ++ return (uint16_t)g_ssam_config.core_num; ++} ++ ++uint32_t ++ssam_get_mempool_size(void) ++{ ++ return g_ssam_config.user_config.mempool_size; ++} ++ ++uint16_t ++ssam_get_queues(void) ++{ ++ uint16_t cfg_queues = (uint16_t)g_ssam_config.user_config.queues; ++ ++ if (cfg_queues == 0) { ++ SPDK_INFOLOG(ssam_config, "Use default queues number: %u.\n", SPDK_SSAM_DEFAULT_VQUEUES); ++ return SPDK_SSAM_DEFAULT_VQUEUES; ++ } ++ return cfg_queues; ++} ++ ++bool ++ssam_get_virtio_fs_enable(void) ++{ ++ return g_ssam_config.virtio_fs_enable; ++} ++ ++enum ssam_device_type ++ssam_get_virtio_type(uint16_t gfunc_id) { ++ uint16_t vf_start, vf_end; ++ struct ssam_pf_list *pf = g_ssam_config.ep_info.host_pf_list; ++ ++ for (uint32_t i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) ++ { ++ if (pf[i].pf_funcid == UINT16_MAX) { ++ continue; ++ } ++ if (gfunc_id == pf[i].pf_funcid) { ++ return pf[i].pf_type; ++ } ++ ++ vf_start = pf[i].vf_funcid_start; ++ if (((uint32_t)vf_start + (uint32_t)pf[i].vf_num) > UINT16_MAX) { ++ SPDK_ERRLOG("vf_start %u + vf_num %u out of range, need less or equal than %u.\n", ++ vf_start, pf[i].vf_num, UINT16_MAX); ++ continue; ++ } ++ vf_end = vf_start + pf[i].vf_num; ++ if ((gfunc_id >= vf_start) && (gfunc_id < vf_end)) { ++ return pf[i].pf_type; ++ } ++ } ++ ++ return SSAM_DEVICE_VIRTIO_MAX; ++} ++ ++static void ++ssam_get_virtio_blk_config(struct ssam_virtio_config *cfg) ++{ ++ struct virtio_blk_config *dev_cfg = (struct virtio_blk_config *)cfg->device_config; ++ ++ cfg->device_feature = SPDK_SSAM_VIRTIO_BLK_DEFAULT_FEATURE; ++ cfg->queue_num = g_ssam_config.user_config.queues; ++ cfg->config_len = sizeof(struct virtio_blk_config); ++ ++ memset(dev_cfg, 0, cfg->config_len); ++ dev_cfg->blk_size = 0x200; ++ dev_cfg->min_io_size = 0; ++ dev_cfg->capacity = 0; ++ dev_cfg->num_queues = cfg->queue_num; ++ dev_cfg->seg_max = 0x7d; ++ dev_cfg->size_max = 0x200000; ++ cfg->queue_size = VIRITO_DEFAULT_QUEUE_SIZE; ++ ++ return; ++} ++ ++static void ++ssam_get_virtio_scsi_config(struct ssam_virtio_config *cfg) ++{ ++ struct virtio_scsi_config *dev_cfg = (struct virtio_scsi_config *)cfg->device_config; ++ ++ cfg->device_feature = SPDK_SSAM_VIRTIO_SCSI_DEFAULT_FEATURE; ++ cfg->queue_num = g_ssam_config.user_config.queues; ++ cfg->config_len = sizeof(struct virtio_scsi_config); ++ ++ memset(dev_cfg, 0, sizeof(struct virtio_scsi_config)); ++ dev_cfg->num_queues = 0x04; ++ dev_cfg->seg_max = 0x6f; ++ dev_cfg->max_sectors = 0x1ff; ++ dev_cfg->cmd_per_lun = 0x80; ++ dev_cfg->event_info_size = 0; ++ dev_cfg->sense_size = 0x60; ++ dev_cfg->cdb_size = 0x20; ++ dev_cfg->max_channel = 0; ++ dev_cfg->max_target = SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; ++ dev_cfg->max_lun = 0xff; ++ cfg->queue_size = VIRITO_DEFAULT_QUEUE_SIZE; ++ ++ return; ++} ++ ++static struct ssam_fs_config g_ssam_fs_config_map[SSAM_HOSTEP_NUM_MAX]; ++ ++uint16_t ++ssam_get_queue_id(uint32_t func_id) ++{ ++ if (func_id >= SSAM_HOSTEP_NUM_MAX) { ++ return 0; ++ } ++ return g_ssam_fs_config_map[func_id].queue_id; ++} ++ ++ssam_mempool_t * ++ssam_get_fs_mp(uint32_t func_id) ++{ ++ if (func_id >= SSAM_HOSTEP_NUM_MAX) { ++ return 0; ++ } ++ return g_ssam_fs_config_map[func_id].mp; ++} ++ ++static int ++ssam_virtio_fs_config_init(struct ssam_hostep_info *ep_info) ++{ ++ int rc = 0; ++ uint32_t i; ++ uint16_t queue_id; ++ struct ssam_pf_list *pf = ep_info->host_pf_list; ++ ++ for (i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ if (pf[i].pf_funcid == UINT16_MAX || pf[i].pf_type != SSAM_DEVICE_VIRTIO_FS) { ++ continue; ++ } ++ rc = ssam_vmio_rxq_create(&queue_id); ++ if (rc < 0) { ++ SPDK_ERRLOG("Failed to create vmio rx queue: %d\n", rc); ++ return -1; ++ } ++ g_ssam_fs_config_map[pf[i].pf_funcid].queue_id = queue_id; ++ g_ssam_fs_config_map[pf[i].pf_funcid].mp = ++ ssam_mempool_create(g_ssam_config.user_config.fs_mempool_size * SSAM_MB, ++ SSAM_DEFAULT_MEMPOOL_EXTRA_SIZE); ++ if (g_ssam_fs_config_map[pf[i].pf_funcid].mp == NULL) { ++ SPDK_ERRLOG("ssam create fs mempool failed, mempool_size = %uMB.\n", ++ g_ssam_config.user_config.fs_mempool_size); ++ return -ENOMEM; ++ } ++ g_ssam_config.virtio_fs_enable = true; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_get_virtio_fs_config(struct ssam_virtio_config *cfg, uint32_t func_id) ++{ ++ int ret = 0; ++ uint32_t *buf = (uint32_t *)cfg->device_config; ++ ++ cfg->device_feature = SPDK_SSAM_VIRTIO_FS_DEFAULT_FEATURE; ++ cfg->queue_num = g_ssam_config.user_config.queues; ++ cfg->queue_size = VIRITO_FS_DEFAULT_QUEUE_SIZE; ++ cfg->rx_queue_id = ssam_get_queue_id(func_id); ++ cfg->config_len = VIRTIO_FS_DEFAULT_CONFIG_LEN; ++ ++ memset(buf, 0, sizeof(cfg->device_config)); ++ ret = snprintf((char *)buf, VIRTIO_FS_DEFAULT_TAG_LEN, "FS_%u", func_id); ++ if (ret < 0 || ret >= VIRTIO_FS_DEFAULT_TAG_LEN) { ++ SPDK_ERRLOG("Failed to init tag of func_id: %u\n", func_id); ++ return -EINVAL; ++ } ++ *(buf + VIRTIO_FS_DEFAULT_CONFIG_QUEUE_OFFSET) = g_ssam_config.user_config.queues; ++ ++ return 0; ++} ++ ++static int ++ssam_virtio_config_get(struct ssam_pf_list *pf, struct ssam_function_config *cfg) ++{ ++ int ret = 0; ++ ++ cfg->gfunc_id = pf->pf_funcid; ++ cfg->type = pf->pf_type; ++ switch (cfg->type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ ssam_get_virtio_blk_config(&cfg->virtio_config); ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ ssam_get_virtio_scsi_config(&cfg->virtio_config); ++ break; ++ case SSAM_DEVICE_VIRTIO_FS: ++ ret = ssam_get_virtio_fs_config(&cfg->virtio_config, cfg->gfunc_id); ++ if (ret != 0) { ++ return ret; ++ } ++ break; ++ default: { ++ SPDK_ERRLOG("function config init fail (%d|%d)\n", cfg->gfunc_id, cfg->type); ++ ret = -EINVAL; ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++static int ++ssam_setup_pf(struct ssam_pf_list *pf, struct ssam_function_config *cfg) ++{ ++ int rc; ++ ++ rc = ssam_setup_function(pf->pf_funcid, pf->vf_num, pf->pf_type); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam init function(%u) failed:%s\n", pf->pf_funcid, spdk_strerror(-rc)); ++ return rc; ++ } ++ rc = ssam_write_function_config(cfg); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam write function(%d) config failed:%s\n", cfg->gfunc_id, spdk_strerror(-rc)); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_setup_vf(struct ssam_pf_list *pf, struct ssam_function_config *cfg) ++{ ++ struct ssam_function_config l_cfg; ++ uint16_t vf_funcid_start = pf->vf_funcid_start; ++ uint16_t vf_num = pf->vf_num; ++ int rc; ++ uint16_t i; ++ ++ if (((uint32_t)vf_funcid_start + (uint32_t)vf_num) > UINT16_MAX) { ++ SPDK_ERRLOG("vf_funcid_start %u or vf_num %u out of range.\n", ++ vf_funcid_start, vf_num); ++ return -1; ++ } ++ ++ memcpy(&l_cfg, cfg, sizeof(struct ssam_function_config)); ++ for (i = vf_funcid_start; i < vf_funcid_start + vf_num; i++) { ++ l_cfg.gfunc_id = i; ++ rc = ssam_write_function_config(&l_cfg); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam write function(%u) config failed:%s\n", i, spdk_strerror(-rc)); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_virtio_config_init(struct ssam_hostep_info *ep_info) ++{ ++ int rc = 0; ++ uint32_t i; ++ struct ssam_function_config cfg = {0}; ++ struct ssam_pf_list *pf = ep_info->host_pf_list; ++ ++ if (spdk_ssam_get_shm_created()) { ++ /* If server is crashed from last time, no need setup config this time */ ++ return 0; ++ } ++ ++ /** ++ * During chip initialization, the vq and msix resources are initialized. ++ * However, the ssam configuration may be different from the initialization configuration. ++ * In the scene of virtio-blk, resources will be alloced at the function `ssam_blk_controller_set_vqueue`. ++ * Therefore, the original resources need to be released before negotiation with the host end. ++ */ ++ for (i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ if (pf[i].pf_funcid == UINT16_MAX || pf[i].pf_type != SSAM_DEVICE_VIRTIO_BLK) { ++ continue; ++ } ++ rc = ssam_virtio_blk_release_resource(i); ++ if (rc != 0) { ++ SPDK_WARNLOG("virtio blk release vq failed.\n"); ++ } ++ } ++ ++ for (i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ if (pf[i].pf_funcid == UINT16_MAX) { ++ continue; ++ } ++ rc = ssam_virtio_config_get(&pf[i], &cfg); ++ if (rc != 0) { ++ return rc; ++ } ++ rc = ssam_setup_pf(&pf[i], &cfg); ++ if (rc != 0) { ++ return rc; ++ } ++ rc = ssam_setup_vf(&pf[i], &cfg); ++ if (rc != 0) { ++ return rc; ++ } ++ } ++ ++ return rc; ++} ++ ++static int ++ssam_virtio_init(void) ++{ ++ struct ssam_lib_args ssam_args = { 0 }; ++ struct ssam_hostep_info *ep_info = &g_ssam_config.ep_info; ++ int rc; ++ ++ ssam_get_ssam_lib_init_config(&ssam_args); ++ ++ rc = ssam_lib_init(&ssam_args, ep_info); ++ if (rc != 0) { ++ SPDK_ERRLOG("Failed to init ssam:%s\n", spdk_strerror(-rc)); ++ return rc; ++ } ++ ++ rc = ssam_virtio_fs_config_init(ep_info); ++ if (rc != 0) { ++ SPDK_ERRLOG("Failed to init virtio fs config:%s\n", spdk_strerror(-rc)); ++ return rc; ++ } ++ ++ rc = ssam_virtio_config_init(ep_info); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam virtio device init failed:%s\n", spdk_strerror(-rc)); ++ if (ssam_lib_exit() != 0) { ++ SPDK_WARNLOG("ssam lib exit failed\n"); ++ } ++ return rc; ++ } ++ ++ return 0; ++} ++ ++void ++spdk_ssam_user_config_init(void) ++{ ++ struct ssam_user_config *user_config = &g_ssam_config.user_config; ++ ++ user_config->queues = SPDK_SSAM_DEFAULT_VQUEUES; ++ user_config->dma_queue_num = SSAM_DEFAULT_DMA_QUEUE_NUM; ++ user_config->mempool_size = SSAM_DEFAULT_MEMPOOL_SIZE; ++ user_config->fs_mempool_size = SSAM_DEFAULT_FS_MEMPOOL_SIZE; ++ user_config->extra_size = SSAM_DEFAULT_MEMPOOL_EXTRA_SIZE; ++ g_ssam_config.virtio_fs_enable = false; ++} ++ ++static void ++ssam_virtio_exit(void) ++{ ++ int rc; ++ int i; ++ ++ rc = ssam_lib_exit(); ++ if (rc != 0) { ++ SPDK_WARNLOG("ssam lib exit failed\n"); ++ } ++ ++ for (i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ if (g_ssam_fs_config_map[i].mp != NULL) { ++ ssam_mempool_destroy(g_ssam_fs_config_map[i].mp); ++ g_ssam_fs_config_map[i].mp = NULL; ++ ssam_update_virtio_device_used(i, 0); ++ } ++ } ++} ++ ++int ++ssam_config_init(void) ++{ ++ int rc; ++ ++ rc = ssam_virtio_init(); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return 0; ++} ++ ++void ++ssam_config_exit(void) ++{ ++ ssam_virtio_exit(); ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_config) +diff --git a/lib/ssam/ssam_config.h b/lib/ssam/ssam_config.h +new file mode 100644 +index 0000000..f49acca +--- /dev/null ++++ b/lib/ssam/ssam_config.h +@@ -0,0 +1,56 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef SSAM_CONFIG_H ++#define SSAM_CONFIG_H ++ ++int ssam_set_core_num(uint32_t core_num); ++ ++uint16_t ssam_get_core_num(void); ++ ++uint32_t ssam_get_mempool_size(void); ++ ++uint16_t ssam_get_queues(void); ++ ++bool ssam_get_virtio_fs_enable(void); ++ ++enum ssam_device_type ssam_get_virtio_type(uint16_t gfunc_id); ++ ++int ssam_config_init(void); ++ ++void ssam_config_exit(void); ++ ++uint16_t ssam_get_queue_id(uint32_t func_id); ++ ++ssam_mempool_t *ssam_get_fs_mp(uint32_t func_id); ++ ++#endif /* SSAM_CONFIG_H */ +diff --git a/lib/ssam/ssam_device_pcie.c b/lib/ssam/ssam_device_pcie.c +new file mode 100644 +index 0000000..84625a1 +--- /dev/null ++++ b/lib/ssam/ssam_device_pcie.c +@@ -0,0 +1,250 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "spdk/string.h" ++#include "spdk/file.h" ++#include "ssam_internal.h" ++ ++#define SSAM_KEY_MAX_LEN 16 ++#define SSAM_TYPE_MAX_LEN 12 ++#define SSAM_DBDF_MAX_LEN 16 ++ ++struct ssam_device_pcie_info { ++ uint32_t func_id; ++ char type[SSAM_TYPE_MAX_LEN]; ++ char dbdf[SSAM_DBDF_MAX_LEN]; ++}; ++ ++struct ssam_device_pcie_list { ++ uint32_t size; ++ struct ssam_device_pcie_info *device_pcie_list; ++}; ++ ++static struct ssam_device_pcie_list g_ssam_device_pcie_list = { ++ .size = 0, ++ .device_pcie_list = NULL, ++}; ++ ++void ++ssam_deinit_device_pcie_list(void) ++{ ++ if (g_ssam_device_pcie_list.device_pcie_list != NULL) { ++ free(g_ssam_device_pcie_list.device_pcie_list); ++ g_ssam_device_pcie_list.device_pcie_list = NULL; ++ } ++} ++ ++static int ++ssam_alloc_device_pcie_list(struct spdk_json_val *values, size_t num_values) ++{ ++ size_t i; ++ uint32_t size = 0; ++ ++ for (i = 0; i < num_values; i++) { ++ if (values[i].type == SPDK_JSON_VAL_OBJECT_END) { ++ size++; ++ } ++ } ++ ++ if (g_ssam_device_pcie_list.device_pcie_list == NULL) { ++ g_ssam_device_pcie_list.size = size; ++ g_ssam_device_pcie_list.device_pcie_list = calloc(size, sizeof(struct ssam_device_pcie_info)); ++ if (g_ssam_device_pcie_list.device_pcie_list == NULL) { ++ SPDK_ERRLOG("Unable to allocate enough memory for device_pcie_list\n"); ++ return -ENOMEM; ++ } ++ } ++ return 0; ++} ++ ++static void ++ssam_set_device_pcie_index(struct spdk_json_val *value, uint32_t cur_index) ++{ ++ char val[16]; ++ uint32_t gfunc_id; ++ if (value->type != SPDK_JSON_VAL_NUMBER || value->len > 5) { ++ SPDK_ERRLOG("device pcie gfunc id is invalid, type: %u, len: %u\n", value->type, value->len); ++ return; ++ } ++ ++ memset(val, 0, 16); ++ memcpy(val, value->start, value->len); ++ gfunc_id = spdk_strtol(val, 10); ++ if (gfunc_id >= SPDK_INVALID_GFUNC_ID) { ++ SPDK_ERRLOG("device pcie gfunc id(%u) is more than %u\n", gfunc_id, SPDK_INVALID_GFUNC_ID); ++ return; ++ } ++ g_ssam_device_pcie_list.device_pcie_list[cur_index].func_id = gfunc_id; ++} ++ ++static void ++ssam_set_device_pcie_dbdf(struct spdk_json_val *value, uint32_t cur_index) ++{ ++ if (value->type != SPDK_JSON_VAL_STRING || value->len >= SSAM_DBDF_MAX_LEN) { ++ SPDK_ERRLOG("device pcie dbdf is invalid, type: %u, len: %u\n", value->type, value->len); ++ return; ++ } ++ ++ memset(g_ssam_device_pcie_list.device_pcie_list[cur_index].dbdf, 0, SSAM_DBDF_MAX_LEN); ++ memcpy(g_ssam_device_pcie_list.device_pcie_list[cur_index].dbdf, value->start, value->len); ++} ++ ++static void ++ssam_set_device_pcie_type(struct spdk_json_val *value, uint32_t cur_index) ++{ ++ if (value->type != SPDK_JSON_VAL_STRING || value->len >= SSAM_TYPE_MAX_LEN) { ++ SPDK_ERRLOG("device pcie type is invalid, type: %u, len: %u\n", value->type, value->len); ++ return; ++ } ++ ++ memset(g_ssam_device_pcie_list.device_pcie_list[cur_index].type, 0, SSAM_TYPE_MAX_LEN); ++ memcpy(g_ssam_device_pcie_list.device_pcie_list[cur_index].type, value->start, value->len); ++} ++ ++static void ++ssam_init_device_pcie_list_by_values(struct spdk_json_val *values, size_t num_values) ++{ ++ char key[SSAM_KEY_MAX_LEN]; ++ uint32_t cur_index = 0; ++ size_t i; ++ ++ for (i = 0; i < num_values; i++) { ++ if (values[i].type == SPDK_JSON_VAL_OBJECT_END) { ++ cur_index++; ++ } ++ if (values[i].type != SPDK_JSON_VAL_NAME || values[i].len >= SSAM_KEY_MAX_LEN) { ++ continue; ++ } ++ ++ memset(key, 0, SSAM_KEY_MAX_LEN); ++ memcpy(key, values[i].start, values[i].len); ++ ++ /* point to val */ ++ i++; ++ ++ if (strcmp(key, "index") == 0) { ++ ssam_set_device_pcie_index(&values[i], cur_index); ++ } else if (strcmp(key, "dbdf") == 0) { ++ ssam_set_device_pcie_dbdf(&values[i], cur_index); ++ } else if (strcmp(key, "type") == 0) { ++ ssam_set_device_pcie_type(&values[i], cur_index); ++ } ++ } ++} ++ ++int ++ssam_init_device_pcie_list(void) ++{ ++ FILE *fp = NULL; ++ void *buf = NULL; ++ ssize_t rc = 0; ++ size_t size; ++ size_t num_values; ++ struct spdk_json_val *values = NULL; ++ ++ fp = popen("dpak-smi info -t device_pcie_list -f storage", "r"); ++ if (fp == NULL) { ++ SPDK_ERRLOG("execute dpak-smi failed\n"); ++ return -EINVAL; ++ } ++ ++ buf = spdk_posix_file_load(fp, &size); ++ if (buf == NULL) { ++ SPDK_ERRLOG("get size of json failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_parse(buf, size, NULL, 0, NULL, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS); ++ if (rc < 0) { ++ SPDK_ERRLOG("dpak-smi error: %s\n", (char *)buf); ++ goto invalid; ++ } ++ num_values = (size_t)rc; ++ values = calloc(num_values, sizeof(*values)); ++ if (values == NULL) { ++ SPDK_ERRLOG("Unable to allocate enough memory for values\n"); ++ rc = -ENOMEM; ++ goto invalid; ++ } ++ ++ rc = spdk_json_parse(buf, size, values, num_values, NULL, ++ SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS | SPDK_JSON_PARSE_FLAG_DECODE_IN_PLACE); ++ if (rc <= 0) { ++ SPDK_ERRLOG("parse json to values failed\n"); ++ goto invalid; ++ } ++ ++ rc = ssam_alloc_device_pcie_list(values, num_values); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_init_device_pcie_list_by_values(values, num_values); ++ rc = 0; ++ ++invalid: ++ if (values != NULL) { ++ free(values); ++ values = NULL; ++ } ++ if (buf != NULL) { ++ free(buf); ++ buf = NULL; ++ } ++ if (fp != NULL) { ++ pclose(fp); ++ fp = NULL; ++ } ++ return rc; ++} ++ ++void ++ssam_dump_device_pcie_list(struct spdk_json_write_ctx *w) ++{ ++ uint32_t i; ++ spdk_json_write_named_array_begin(w, "device_pcie_list"); ++ for (i = 0; i < g_ssam_device_pcie_list.size; i++) { ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_uint32(w, "index", g_ssam_device_pcie_list.device_pcie_list[i].func_id); ++ spdk_json_write_named_string(w, "dbdf", g_ssam_device_pcie_list.device_pcie_list[i].dbdf); ++ spdk_json_write_named_string(w, "type", g_ssam_device_pcie_list.device_pcie_list[i].type); ++ spdk_json_write_object_end(w); ++ } ++ spdk_json_write_array_end(w); ++} ++ ++uint32_t ++ssam_get_device_pcie_list_size(void) ++{ ++ return g_ssam_device_pcie_list.size; ++} +diff --git a/lib/ssam/ssam_driver/dpak_ssam.h b/lib/ssam/ssam_driver/dpak_ssam.h +new file mode 100644 +index 0000000..de5b3bb +--- /dev/null ++++ b/lib/ssam/ssam_driver/dpak_ssam.h +@@ -0,0 +1,601 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef DPAK_SSAM_H ++#define DPAK_SSAM_H ++ ++#include "spdk/stdinc.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#define SSAM_HOSTEP_NUM_MAX 32 ++#define SSAM_MAX_REQ_POLL_SIZE 16 ++#define SSAM_MAX_RESP_POLL_SIZE 10 ++#define SSAM_VIRTIO_HEAD_LEN 64 ++#define SSAM_DEV_CFG_MAX_LEN 60 ++#define SSAM_DBDF_STR_MAX_LEN 13 ++#define SSAM_MB (uint64_t)(1 << 20) ++#define SSAM_SERVER_NAME "ssam" ++ ++enum ssam_device_type { ++ SSAM_DEVICE_NVME = 0, /* NVMe device */ ++ SSAM_DEVICE_VIRTIO_BLK = 2, /* virtio-blk device */ ++ SSAM_DEVICE_VIRTIO_SCSI = 3, /* virtio-scsi device */ ++ SSAM_DEVICE_VIRTIO_FS = 5, /* virtio-fs device */ ++ SSAM_DEVICE_VIRTIO_MAX = 6 /* virtio device type upper boundary */ ++}; ++ ++enum ssam_mount_type { ++ SSAM_MOUNT_DUMMY = 0, /* mount virtio to dummy function */ ++ SSAM_MOUNT_NORMAL /* mount virtio to normal function */ ++}; ++ ++enum ssam_function_mount_status { ++ SSAM_MOUNT_OK, /* mount ok */ ++ SSAM_MOUNT_VOLUME_NOT_FOUND, /* mount volume not found */ ++ SSAM_MOUNT_PARAMETERS_ERROR, /* mount parameter error */ ++ SSAM_MOUNT_UNKNOWN_ERROR /* unknow error */ ++}; ++ ++enum ssam_io_type { ++ SSAM_VIRTIO_BLK_IO = 2, /* virtio-blk IO */ ++ SSAM_VIRTIO_SCSI_IO, /* virtio-scsi normal IO */ ++ SSAM_VIRTIO_SCSI_CTRL, /* virtio-scsi control IO */ ++ SSAM_VIRTIO_SCSI_EVT, /* virtio-scsi event IO */ ++ SSAM_VIRTIO_VSOCK_IO, /* virtio-vsock IO */ ++ SSAM_VIRTIO_VSOCK_EVT, /* virtio-vsock event */ ++ SSAM_VIRTIO_FUNC_STATUS, /* virtio function status change */ ++ SSAM_VIRTIO_FS_IO, /* virtio-fs normal IO */ ++ SSAM_VIRTIO_FS_HIPRI, /* virtio-fs high priority IO */ ++ SSAM_VIRTIO_TYPE_RSVD, /* virtio type rsvd */ ++}; ++ ++enum ssam_io_status { ++ SSAM_IO_STATUS_OK, /* ok */ ++ SSAM_IO_STATUS_EMPTY, /* poll return empty */ ++ SSAM_IO_STATUS_ERROR /* error */ ++}; ++ ++enum ssam_function_action { ++ SSAM_FUNCTION_ACTION_START, /* start */ ++ SSAM_FUNCTION_ACTION_STOP, /* stop */ ++ SSAM_FUNCTION_ACTION_RESET, /* reset */ ++ SSAM_FUNCTION_ACTION_CONFIG_CHANGE, /* config change report */ ++ SSAM_FUNCTION_ACTION_SCSI_EVENT, /* SCSI event report */ ++ SSAM_FUNCTION_ACTION_MAX ++}; ++ ++enum ssam_function_status { ++ SSAM_FUNCTION_STATUS_START, /* start */ ++ SSAM_FUNCTION_STATUS_STOP, /* stop */ ++ SSAM_FUNCTION_EVENT_MIGRATE /* migrate */ ++}; ++ ++enum data_request_dma_type { ++ SSAM_REQUEST_DATA_LOAD = 0, /* load data from host->CPU DDR */ ++ SSAM_REQUEST_DATA_STORE = 1, /* store data frome CPU DDR->host */ ++ SSAM_REQUEST_DATA_MAX ++}; ++ ++struct ssam_melem { ++ void *addr; /* virtual address */ ++ uint64_t iova; /* IO address */ ++ uint64_t page_sz; /* page size of underlying memory */ ++ int socket_id; /* NUMA socket ID */ ++ int rsvd; ++}; ++ ++enum ssam_blk_hash_mode { ++ SSAM_PF_HASH_MODE = 0, ++ SSAM_VQ_HASH_MODE, ++ SSAM_IO_HASH_MODE, ++}; ++ ++struct ssam_lib_args { ++ uint8_t role; /* reserved */ ++ uint8_t core_num; /* core num that polled by SPDK thread */ ++ uint8_t dma_queue_num; /* host dma queue num per channel */ ++ uint8_t hash_mode; /* hash mode: BLK:0-1bits SCSI:2-3bits FS:4-5bits NVMe:6-7bits */ ++ uint8_t rsvd[32]; /* for rsvd */ ++ /* register DPDK function rte_malloc_heap_alloc */ ++ int (*ssam_heap_malloc)(const char *type, size_t size, ++ int socket_arg, unsigned int flags, size_t align, ++ size_t bound, bool contig, struct ssam_melem *mem); ++ int (*ssam_heap_free)(void *addr); /* register DPDK function rte_malloc_heap_free */ ++}; ++ ++struct ssam_pf_list { ++ uint16_t pf_funcid; /* pf_funcid = -1 means invalid */ ++ uint16_t pf_type; /* refer to enum ssam_device_type */ ++ uint16_t vf_funcid_start; /* the start function id of vf */ ++ uint16_t vf_num; /* the number of vf that have been configured */ ++ uint16_t vf_max; /* the max number of vf that can be configured */ ++}; ++ ++/* the host side all pf/vf end point info */ ++struct ssam_hostep_info { ++ struct ssam_pf_list host_pf_list[SSAM_HOSTEP_NUM_MAX]; ++}; ++ ++struct ssam_virtio_config { ++ uint64_t device_feature; /* the virtio device feature */ ++ uint16_t queue_num; /* the queue number of virtio device */ ++ uint16_t config_len; /* the actual length of device_config */ ++ uint8_t device_config[SSAM_DEV_CFG_MAX_LEN]; /* the virtio device configure */ ++ uint16_t queue_size; ++ uint16_t rx_queue_id; ++}; ++ ++/* ssam function config */ ++struct ssam_function_config { ++ int gfunc_id; /* pf or vf funcion id */ ++ enum ssam_device_type type; /* pf or vf type */ ++ struct ssam_virtio_config virtio_config; /* pf or vf configure */ ++}; ++ ++struct ssam_virt_request { ++ uint16_t vq_idx; ++ uint16_t req_idx; ++}; ++ ++struct ssam_nvme_request { ++ void *data; ++}; ++ ++struct ssam_io_message { ++ uint32_t header_len; /* io header length */ ++ uint8_t header[SSAM_VIRTIO_HEAD_LEN]; /* refer to struct virtio_blk_outhdr */ ++ uint32_t iovcnt; /* io vector count */ ++ struct iovec *iovs; /* io vectors, max 1MB IO */ ++ uint8_t writable; /* 0 : write io, 1 : read io */ ++ uint8_t rsvd[3]; /* for byte alignment */ ++ union { ++ struct ssam_virt_request virtio; ++ struct ssam_nvme_request nvme; ++ }; ++}; ++ ++/** ++ * @brief function event structure ++ */ ++struct ssam_func_event { ++ enum ssam_function_status status; /* function status */ ++ uint32_t data; /* virtio version: 0--v0.95 1--v1.0 2--v1.1 */ ++}; ++ ++struct ssam_request { ++ uint16_t gfunc_id; /* function id vf id number */ ++ uint16_t rsvd; ++ uint32_t iocb_id; /* response need */ ++ enum ssam_io_type type; ++ union { ++ struct ssam_io_message cmd; /* VMIO command structure */ ++ struct ssam_func_event event; /* report function event */ ++ } req; ++ enum ssam_io_status status; /* request status */ ++ uint32_t flr_seq; /* response need */ ++}; ++ ++struct ssam_request_poll_opt { ++ struct iovec ++ *sge1_iov; /**< output for req->req.cmd.iovs[1] (per VMIO req). Actual data length set in iov_len */ ++ uint16_t queue_id; /**< (optional) poll a queue id instead of using 'tid' parameter to calculate the queue */ ++ uint8_t rsvd[54]; ++}; ++ ++struct ssam_virtio_res { ++ struct iovec *iovs; /* rsp io vectors */ ++ void *rsp; /* data of rsp */ ++ uint32_t rsp_len; /* length of rsp */ ++ uint32_t iovcnt; /* rsp vector count */ ++}; ++ ++struct ssam_io_response { ++ uint16_t gfunc_id; /* global function id in chip */ ++ uint16_t rsvd; ++ uint32_t iocb_id; /* copy from struct ssam_request */ ++ struct ssam_virtio_res data; ++ struct ssam_request *req; /* corresponding to struct vmio_request */ ++ enum ssam_io_status status; /* IO status, copy from struct ssam_request */ ++ uint32_t flr_seq; /* copy from struct ssam_request */ ++}; ++ ++struct ssam_dma_request { ++ uint16_t gfunc_id; ++ uint16_t direction; ++ uint32_t flr_seq; ++ uint32_t src_num; /* source sge number */ ++ uint32_t dst_num; /* dest sge number */ ++ struct iovec *src; /* source buffer address, gpa mode */ ++ struct iovec *dst; /* dest buffer address, va mode */ ++ uint32_t data_len; ++ void *cb; ++}; ++ ++struct ssam_dma_rsp { ++ void *cb; ++ uint32_t status; /* process status, 0--OK, 1--ERR */ ++ uint32_t last_flag; /* data copy finish until receive this last flag */ ++}; ++ ++struct memory_info_stats { ++ size_t total_size; /* Total bytes of mempool */ ++ size_t free_size; /* Total free bytes of mempool */ ++ size_t greatest_free_size; /* Size in bytes of largest free block */ ++ unsigned free_count; /* Number of free elements of mempool */ ++ unsigned alloc_count; /* Number of allocated elements of mempool */ ++ size_t used_size; /* Total allocated bytes of mempool */ ++}; ++ ++/** ++ * Init ssam lib, set ssam work mode, set core num, set functions, get host pf/vf endpoint info. ++ * ++ * \param args_in input work mode, core num, functions. ++ * \param eps_out output host pf/vf endpoint info. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_lib_init(struct ssam_lib_args *args_in, struct ssam_hostep_info *eps_out); ++ ++/** ++ * Exit ssam lib when not use ssam any more. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_lib_exit(void); ++ ++typedef void ssam_mempool_t; ++ ++/** ++ * Create the memory pool, the memory is allocated by spdk_dma_malloc. ++ * ++ * \param size the memory pool size. ++ * \param extra_size_limit the memory size that can alloc in addition to the memory pool ++ * ++ * \return a pointer to memory pool when succeed or null when failed ++ */ ++ssam_mempool_t *ssam_mempool_create(uint64_t size, uint64_t extra_size_limit); ++ ++/** ++ * Allocate one piece of memory from the memory pool. ++ * ++ * \param mp the memory pool. ++ * \param size the memory size that want to allocate. ++ * \param phys_addr save the physical address of the allocated memory, ++ * if allocate failed, will not change the value. ++ * ++ * \return the allocated memory's start virtual address when succeed or null when failed ++ */ ++void *ssam_mempool_alloc(ssam_mempool_t *mp, uint64_t size, uint64_t *phys_addr); ++ ++/** ++ * Free the memory back to the memory pool. ++ * ++ * \param mp the memory pool. ++ * \param ptr the memory virtual address that return by ssam_mempool_alloc. ++ */ ++void ssam_mempool_free(ssam_mempool_t *mp, void *ptr); ++ ++/** ++ * Destroy the memory pool, when this done, the memory pool cannot be used again. ++ * ++ * \param mp the memory pool. ++ */ ++void ssam_mempool_destroy(ssam_mempool_t *mp); ++ ++/** ++ * get the memory pool info status. ++ * ++ * \param mp the memory pool. ++ * \param info the mempool info status. ++ */ ++int ssam_get_mempool_info(ssam_mempool_t *mp, struct memory_info_stats *info); ++ ++/** ++ * ssam recover module preinit. ++ * ++ * \return 0 for succeed, 1 for config file exist, and less then 0 for failed. ++ */ ++int spdk_ssam_rc_preinit(void); ++ ++/** ++ * Get recover json file path. ++ * ++ * \return a file path string ++ */ ++char *ssam_rc_get_recover_json_file_path(void); ++ ++/** ++ * Get parameter json file path. ++ * ++ * \return a file path string ++ */ ++char *ssam_rc_get_param_json_file_path(void); ++ ++/** ++ * Initialize PF (include all VFs belong to this PF) to specific device type. ++ * The interface must be called with increasing pf_id. The function is not ++ * visible to host after init. ++ * ++ * \param pf_id PF function id. ++ * \param num_vf number of VFs of the PF. ++ * \param dev_type PF/VF type. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_setup_function(uint16_t pf_id, uint16_t num_vf, enum ssam_device_type dev_type); ++ ++/** ++ * Change specific device config. ++ * ++ * \param cfg new device configuration data. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_write_function_config(struct ssam_function_config *cfg); ++ ++/** ++ * send action to function. Invoked by SPDK. ++ * ++ * \param gfunc_id the global function index of the chip ++ * \param action the action to take on the function ++ * \param data extra action data if used ++ * \param data_len extra action data len ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_send_action(uint16_t gfunc_id, enum ssam_function_action action, const void *data, ++ uint16_t data_len); ++ ++/** ++ * Mount ssam volume, synchronous interface. ++ * ++ * \param gfunc_id the global function id of chip. ++ * \param lun_id the lun id of this volume. ++ * \param type mount type, refer to enum ssam_mount_type. ++ * \param tid it's used as the request queue id per CPU core. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_function_mount(uint16_t gfunc_id, uint32_t lun_id, enum ssam_mount_type type, ++ uint16_t tid); ++ ++/** ++ * Umount ssam volume, synchronous interface. ++ * ++ * \param gfunc_id the global function id of chip. ++ * \param lun_id the lun id of this volume. ++ * ++ * \return refer to enum ssam_function_mount_status ++ */ ++int ssam_function_umount(uint16_t gfunc_id, uint32_t lun_id); ++ ++/** ++ * Poll request queue for ssam request. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param poll_num the number of ssam request that want to be polled. ++ * \param io_req output for received request, the buffer is allocated by ssam, ++ * and released when IO complete. ++ * ++ * \return the number of vmio has been polled, less than 0 or bigger than poll_num for failed ++ */ ++int ssam_request_poll(uint16_t tid, uint16_t poll_num, struct ssam_request **io_req); ++ ++/** ++ * Poll request queue for ssam request. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param poll_num the number of ssam request that want to be polled. ++ * \param io_req output for received request, the buffer is allocated by ssam, ++ * and released when IO complete. ++ * \param poll_opt (optional) extra poll options. ++ * ++ * \return the number of vmio has been polled, less than 0 or bigger than poll_num for failed ++ */ ++int ssam_request_poll_ext(uint16_t tid, uint16_t poll_num, struct ssam_request **io_req, ++ struct ssam_request_poll_opt *poll_opt); ++ ++/** ++ * Request ssam data. Hardware will load or store data betweent host and CPU. ++ * Asynchronous interface. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param dma_req request data is here. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_dma_data_request(uint16_t tid, struct ssam_dma_request *dma_req); ++ ++/** ++ * Poll ssam request data. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param poll_num the number of ssam request that want to be polled. ++ * \param dma_rsp response data is here. ++ * ++ * \return the number of msg rsp has been polled, less than 0 or bigger than poll_num for failed ++ */ ++int ssam_dma_rsp_poll(uint16_t tid, uint16_t poll_num, struct ssam_dma_rsp *dma_rsp); ++ ++/** ++ * Send IO complete info to ssam request queue. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param resp response info is here. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_io_complete(uint16_t tid, struct ssam_io_response *resp); ++ ++/** ++ * Create vmio rx queue ++ * ++ * \param queue_id_out id of the queue create ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_vmio_rxq_create(uint16_t *queue_id_out); ++ ++/** ++ * Update virtio device used or not. ++ * ++ * \param glb_function_id the global function index of the chip ++ * \param device_used virtio device is used or not ++ * ++ * \return 0: success -1: fail, internal error, others: fail, refer to errno.h ++ */ ++int ssam_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used); ++ ++/** ++ * release virtio blk vq resource. ++ * ++ * \param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * ++ * \return 0: success -1: fail, internal error, others: fail, refer to errno.h ++ */ ++int ssam_virtio_blk_release_resource(uint16_t glb_function_id); ++ ++/** ++ * alloc virtio blk vq resource. ++ * ++ * \param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * \param queue_num number of vq ++ * ++ * \return 0: success -1: fail, internal error, others: fail, refer to errno.h ++ */ ++int ssam_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num); ++ ++/** ++ * Update virtio blk capacity. ++ * ++ * \param gfunc_id the global function index of the chip. ++ * \param capacity the new capacity. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_virtio_blk_resize(uint16_t gfunc_id, uint64_t capacity); ++ ++/** ++ * Get global function id by dbdf. ++ * ++ * \param dbdf the combine of domain bus device function. ++ * \param gfunc_id the global function index of the chip. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_get_funcid_by_dbdf(uint32_t dbdf, uint16_t *gfunc_id); ++ ++/** ++ * Convert dbdf from string format to number. ++ * ++ * \param str source dbdf string. ++ * \param dbdf store result. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_dbdf_str2num(char *str, uint32_t *dbdf); ++ ++/** ++ * Convert dbdf from number format to string. ++ * ++ * \param dbdf source dbdf number. ++ * \param str store result. ++ * \param len the str buffer length. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_dbdf_num2str(uint32_t dbdf, char *str, size_t len); ++ ++/** ++ * @brief check device ready ++ * @param role 0--old process; 1--new process ++ * @param proc_type enum proc_type, supoort PROC_TYPE_VBS and PROC_TYPE_BOOT ++ * @param ready output_para 0--not ready, 1--ready ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int ssam_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready); ++ ++/** ++ * @brief get hot upgrade state ++ * @param void ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int ssam_get_hot_upgrade_state(void); ++ ++/** ++ * @brief sync PF/VF config info to the hpd process in host ++ * @param void ++ * @return void ++ */ ++void ssam_hotplug_cfg(void); ++ ++/** ++ * @brief hot insert device interface ++ * @param prot_id the number of PF to add ++ * @return ++ * - 0: success ++ * - others: fail, refer to errno.h ++ */ ++int ssam_hotplug_add(uint16_t port_id); ++ ++/** ++ * @brief hot remove device interface ++ * @param prot_id the number of PF to remove ++ * @return ++ * - 0: success ++ * - others: fail, refer to errno.h ++ */ ++int ssam_hotplug_del(uint16_t port_id); ++ ++/** ++ * @brief get hot upgrade state ++ * @param void ++ * @return ++ * - 0: HPD enable ++ * - 1: HPD disable ++ */ ++bool ssam_hotplug_enable_check(void); ++ ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* DPAK_SSAM_H */ +diff --git a/lib/ssam/ssam_driver/hivio_api.h b/lib/ssam/ssam_driver/hivio_api.h +new file mode 100644 +index 0000000..155b25c +--- /dev/null ++++ b/lib/ssam/ssam_driver/hivio_api.h +@@ -0,0 +1,714 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef HIVIO_API_H ++#define HIVIO_API_H ++ ++#include "spdk/stdinc.h" ++ ++#define MEM_ALLOC_SGE_NUM_MAX 512 ++ ++/** ++ * @brief memory descriptor for hvio_mem_alloc. ++ */ ++typedef struct mem_desc { ++ uint32_t size; /* *< mem array size */ ++ struct { ++ uint64_t virt; /* *< virtual address */ ++ uint64_t phys; /* *< physical address */ ++ uint32_t len; /* *< length */ ++ } mem[MEM_ALLOC_SGE_NUM_MAX]; ++} mem_desc_s; ++ ++/** ++ * @brief memory descriptor for hvio_heap_malloc. ++ */ ++struct hvio_melem { ++ void *addr; /**< virtual address */ ++ uint64_t iova; /**< IO address */ ++ uint64_t page_sz; /**< page size of underlying memory */ ++ int socket_id; /**< NUMA socket ID */ ++ int rsvd; ++}; ++ ++/** ++ * @brief memory-related callbacks. ++ */ ++typedef struct hvio_callback_ops { ++ int (*hvio_heap_malloc)(const char *type, size_t size, int socket_arg, unsigned int flags, ++ size_t align, size_t bound, bool contig, ++ struct hvio_melem *mem); /* register rte_malloc_heap_alloc */ ++ int (*hvio_heap_free)(void *addr); /* register rte_malloc_heap_free */ ++ int (*hvio_mem_alloc)(uint32_t size, int phy_contig, ++ mem_desc_s *mem_desc); /* register dma_mem_alloc function */ ++ int (*hvio_mem_free)(void *virt); /* register dma_mem_free function */ ++} hvio_callback_ops_s; ++ ++/** ++ * @brief proc type definition ++ */ ++enum proc_type { ++ PROC_TYPE_VBS = 0, ++ PROC_TYPE_BOOT, ++ PROC_TYPE_MIGTORBO, ++ PROC_TYPE_MAX ++}; ++ ++enum hivio_blk_hash_mode { ++ HVIO_PF_HASH_MODE = 0, ++ HVIO_VQ_HASH_MODE, ++ HVIO_IO_HASH_MODE, ++}; ++ ++/** ++ * @brief hivio_lib initialize parameters ++ */ ++typedef struct hvio_lib_args { ++ uint8_t role; /**< 0--old process; 1--new process */ ++ uint8_t core_num; /**< core num that polled by SPDK thread */ ++ hvio_callback_ops_s cb_ops; /**< memory-related callbacks */ ++ uint32_t proc_type; /**< enum proc_type */ ++ uint8_t host_dma_chnl_num; /**< host dma channel number, used for migtorbo multi chan process */ ++ uint8_t host_dma_mp_per_chnl; /**< host dma mempool per channel, 0: disable mp per channel, 1: enable */ ++ uint8_t host_dma_queue_per_chnl; /**< host dma queue num per channel, 0: disabled-defalt 1, max: 4 */ ++ uint8_t hash_mode; /**< HASH MODE: BLK:0-1bits SCSI:2-3bits FS:4-5bits NVMe:6-7bits */ ++ uint8_t rsvd[56]; /**< for rsvd */ ++} hvio_lib_args_s; ++ ++#define HVIO_HOSTEP_NUM_MAX 32 ++ ++/** ++ * @brief host side storage pf/vf end point info ++ */ ++typedef struct hvio_hostep_info { ++ struct { ++ uint16_t pf_funcid; /* *< pf_funcid = 0xffff means invalid */ ++ uint16_t pf_type; /* *< is config or not */ ++ uint16_t vf_funcid_start; ++ uint16_t vf_num; /* *< already config vf num */ ++ uint16_t vf_max; /* *< max num can be config */ ++ } host_pf_list[HVIO_HOSTEP_NUM_MAX]; ++} hvio_hostep_info_s; ++ ++/** ++ * @brief device type definition ++ */ ++enum device_type { ++ DEVICE_NVME, /* *< NVMe device */ ++ DEVICE_VIRTIO_NET, /* *< VirtIO-net device */ ++ DEVICE_VIRTIO_BLK, /* *< VirtIO-blk device */ ++ DEVICE_VIRTIO_SCSI, /* *< VirtIO-scsi device */ ++ DEVICE_VIRTIO_VSOCK, /* *< VirtIO-vsock device */ ++ DEVICE_VIRTIO_FS, /**< VirtIO-FS device */ ++ DEVICE_VIRTIO_MAX /* *< VirtIO-max device */ ++}; ++ ++/** ++ * @brief configration type definition ++ */ ++ ++struct function_config { ++ uint32_t function_id; ++ enum device_type type; ++ union { ++ struct { ++ uint64_t device_feature; ++ uint16_t queue_num; ++ uint16_t config_len; ++ uint8_t device_config[60]; ++ uint16_t queue_size; ++ uint16_t rx_queue_id; ++ } virtio; ++ } config; ++}; ++ ++/** ++ * @brief EP operation definition. ++ */ ++enum function_action { ++ FUNCTION_ACTION_START, /* *< start */ ++ FUNCTION_ACTION_STOP, /* *< stop */ ++ FUNCTION_ACTION_RESET, /* *< reset */ ++ FUNCTION_ACTION_CONFIG_CHANGE, /* *< config change report */ ++ FUNCTION_ACTION_SCSI_EVENT, /* *< SCSI event report */ ++ FUNCTION_ACTION_MAX ++}; ++ ++/** ++ * @brief EP function status definition. ++ */ ++enum function_status { ++ FUNCTION_STATUS_START, /* *< start */ ++ FUNCTION_STATUS_STOP, /* *< stop */ ++ FUNCTION_EVENT_MIGRATE, /* *< migrate */ ++}; ++ ++/** ++ * @brief VMIO type definition, support nvme and virtio. ++ */ ++enum vmio_type { ++ VMIO_TYPE_NVME_IO, /* *< NVMe normal IO */ ++ VMIO_TYPE_NVME_ADMIN, /* *< NVMe admin IO */ ++ VMIO_TYPE_VIRTIO_BLK_IO, /* *< VirtIO blk IO */ ++ VMIO_TYPE_VIRTIO_SCSI_IO, /* *< VirtIO scsi normal IO */ ++ VMIO_TYPE_VIRTIO_SCSI_CTRL, /* *< VirtIO scsi IO */ ++ VMIO_TYPE_VIRTIO_SCSI_EVT, /* *< VirtIO scsi event */ ++ VMIO_TYPE_VIRTIO_VSOCK_IO, /* *< VirtIO vsock IO */ ++ VMIO_TYPE_VIRTIO_VSOCK_EVT, /* *< VirtIO vsock event */ ++ VMIO_TYPE_VIRTIO_FUNC_STATUS, /* *< VirtIO function status change */ ++ VMIO_TYPE_VIRTIO_FS_IO, /* *< VirtIO fs normal IO */ ++ VMIO_TYPE_VIRTIO_FS_HIPRI, /* *< VirtIO fs high priority IO */ ++ VMIO_TYPE_RSVD, /* *< VMIO type rsvd */ ++}; ++ ++struct virtio_req { ++ uint16_t vq_idx; /* *< vq idx */ ++ uint16_t req_idx; /* *< head desc idx of io */ ++}; ++ ++struct nvme_req { ++ void *data; /* *< nvme admin input data */ ++}; ++ ++/** ++ * @brief VMIO cmd structure. ++ */ ++struct vmio_cmd { ++ uint32_t cmd_len; /* *< length of VMIO command, fixed to 64B */ ++ uint8_t cmd[64]; /* *< the specific format according to vmio_type */ ++ ++ uint32_t iovcnt; /* *< io vector count */ ++ struct iovec *iovs; /* *< io vectors, max 1MB IO */ ++ uint8_t writable; /* *< 2nd desc->write_flag */ ++ uint8_t rsvd[3]; /* *< rsvd */ ++ union { ++ struct virtio_req virtio; ++ struct nvme_req nvme; ++ }; ++}; ++ ++/** ++ * @brief function event structure. ++ */ ++struct func_event { ++ enum function_status status; /* *< function status */ ++ uint32_t data; /* *< VirtIO version: 0--v0.95; 1--v1.0; 2--v1.1 */ ++}; ++ ++/** ++ * @brief VMIO status definition. ++ */ ++enum vmio_status { ++ VMIO_STATUS_OK, /* *< ok */ ++ VMIO_STATUS_VQ_EMPTY, /* *< VQ empty */ ++ VMIO_STATUS_ERROR, /* *< error */ ++ VMIO_STATUS_DRIVER_NOT_OK, /* *< frontend driver not ready */ ++ VMIO_STATUS_VQ_ENGN_NOT_EN, /* *< backend vq not ready */ ++ VMIO_STATUS_DMA_IO_ERROR, /* *< frontend dma access error */ ++ VMIO_STATUS_VQ_SOURCE_ERROR, /* *< VQ cache source error */ ++ VMIO_STATUS_VQ_ERROR /* *< frontend vq status error */ ++}; ++ ++/** ++ * @brief VMIO request structure. ++ */ ++struct vmio_request { ++ uint16_t glb_function_id; /* *< global function id in chip */ ++ uint16_t nvme_sq_id; /* *< sq_id in iocb for NVMe vmio */ ++ uint32_t iocb_id; /* *< io control block id for ucode */ ++ enum vmio_type type; /* *< VMIO type to parse the req format */ ++ union { ++ struct vmio_cmd cmd; /* *< VMIO command structure */ ++ struct func_event event; /* *< report function event */ ++ } req; ++ enum vmio_status status; /* *< when flr occurs, set status to error */ ++ uint32_t flr_seq; /* *< check whether VMIO is from VF which FLR occurs */ ++}; ++ ++typedef struct tag_nvme_cqe { ++ uint32_t cmd_spec; ++ uint32_t rsvd; ++ ++ uint32_t sq_hd : 16; ++ uint32_t sq_id : 16; ++ ++ uint32_t cmd_id : 16; ++ uint32_t p : 1; ++ uint32_t status : 15; ++} nvme_cqe_s; ++ ++/** ++ * @brief NVMe response structure ++ */ ++struct nvme_response { ++ nvme_cqe_s nvme_cqe; ++ ++ uint32_t rsp_len; /* *< rsp length */ ++ uint32_t iovcnt; /* *< rsp vector count */ ++ struct iovec *iovs; /* *< rsp io vectors */ ++ void *rsp; /* *< rsp data */ ++}; ++ ++/** ++ * @brief VirtIO response structure ++ */ ++struct virtio_response { ++ uint32_t used_len; /* *< length of data has been upload to VM */ ++ uint32_t rsp_len; /* *< length of rsp */ ++ uint32_t iovcnt; /* *< rsp vector count */ ++ struct iovec *iovs; /* *< rsp io vectors */ ++ void *rsp; /* *< data of rsp */ ++}; ++ ++/** ++ * @brief VMIO response structure ++ */ ++struct vmio_response { ++ uint16_t glb_function_id; /* *< global function id in chip */ ++ uint16_t rsvd0; /* *< make sure nvme and virtio offset is 16B aligned */ ++ uint32_t iocb_id; /* *< io control block id used by ucode */ ++ enum vmio_type type; /* *< VMIO type */ ++ uint32_t rsvd1; /* make sure nvme and virtio offset is 16B aligned */ ++ ++ union { ++ struct nvme_response nvme; /* *< nvme rsp structure */ ++ struct virtio_response virtio; /* *< virtio rsp structure */ ++ }; ++ ++ struct vmio_request *req; /* *< corresponding vmio_request */ ++ enum vmio_status status; /* *< VMIO status, copy from vmio_request */ ++ uint32_t flr_seq; /* *< copy from vmio_request */ ++}; ++ ++/** ++ * @brief data structrue for send action request. ++ */ ++typedef struct hvio_send_action_req { ++ uint16_t glb_function_id; /**< global function id in chip */ ++ uint16_t data_len; /**< length of request's payload */ ++ void *data; /**< request's payload */ ++ enum function_action action; /**< action type */ ++} hvio_send_action_req_s; ++ ++/** ++ * @brief data structrue for VMIO send request(destination is virtio RQ). ++ */ ++typedef struct hvio_vmio_send_req { ++ uint64_t cb; /**< callback info */ ++ uint16_t glb_function_id; /**< global function id in chip */ ++ uint16_t vqn; /**< function inner vq idx */ ++ uint32_t sge_num; /**< data sge number */ ++ struct iovec *data; /**< data buffer address, gpa mode, including virtio_hdr and payload */ ++ uint32_t data_len; /**< data len, including virtio_hdr len and payload len. */ ++ enum vmio_type type; /**< vmio type */ ++} hvio_vmio_send_req_s; ++ ++/** ++ * @brief data structrue for ACK of VMIO send request(destination is virtio RQ). ++ */ ++typedef struct hvio_vmio_send_rsp { ++ uint64_t cb; /**< callback info */ ++ uint32_t status; /**< refer to enum vmio_status */ ++} hvio_vmio_send_rsp_s; ++ ++/** ++ * @brief data structrue for rsp of vsock recovery. ++ */ ++typedef struct hvio_vsock_recovery_rsp { ++ uint16_t tx_used_idx; /* *< virtio vsock txq used idx */ ++ uint16_t rx_used_idx; /* *< virtio vsock rxq used idx */ ++} hvio_vsock_recovery_rsp_s; ++ ++/** ++ * @brief host_dma direction. ++ */ ++enum hvio_host_dma_mode { ++ READ_HOST_MODE = 0, /**< read host data and write to SPU */ ++ WRITE_HOST_MODE = 1, /**< write data to host */ ++ HOST_DMA_MODE_MAX ++}; ++ ++/** ++ * @brief data structrue for host dma request. ++ */ ++typedef struct hvio_host_dma_req { ++ uint16_t glb_function_id; /**< VM global function id */ ++ uint16_t direction; /**< host dma direction, format is enum hvio_host_dma_mode */ ++ uint32_t flr_seq; /**< check whether the vmio copy request is a leaked request when flr occurs */ ++ uint32_t ssge_num; /**< source sge number */ ++ uint32_t dsge_num; /**< dest sge number */ ++ struct iovec *src; /**< source buffer address, gpa. host buf for read, ddr for write. */ ++ struct iovec *dst; /**< dest buffer address, gpa. ddr for read, host buf for write */ ++ uint32_t data_len; /**< length for load or store */ ++ void *cb; /**< callback info */ ++} hvio_host_dma_req_s; ++ ++/** ++ * @brief data structrue for ACK of host dma request. ++ */ ++typedef struct hvio_host_dma_rsp { ++ void *cb; /**< SPDK callback info */ ++ uint32_t status; /**< 0 OK, 1 ERROR */ ++ uint32_t last_flag; ++} hvio_host_dma_rsp_s; ++ ++/** ++ * @brief data structrue for hivio stats. ++ */ ++ ++typedef struct hvio_info_stats { ++ uint64_t vmio_req; ++ uint64_t vmio_rsp; ++ ++ uint64_t vsock_tx_req; ++ uint64_t vscok_tx_rsp; ++ uint64_t vsock_rx_req; ++ uint64_t vsock_rx_rsp; ++ ++ uint64_t host_dma_req; ++ uint64_t host_dma_sub_req; ++ uint64_t host_dma_rsp; ++ ++ uint64_t update_blk_cap; ++ uint64_t send_action; ++ ++ uint64_t rsvd[16]; ++} hvio_info_stats_s; ++ ++typedef struct hvio_warn_stats { ++ uint64_t invalid_vmio; ++ uint64_t vsock_rx_rsp_status_abnormal; ++ uint64_t host_dma_rsp_status_abnormal; ++ ++ uint64_t rsvd[16]; ++} hvio_warn_stats_s; ++ ++typedef struct hvio_error_stats { ++ uint64_t update_blk_cap_fail; ++ uint64_t send_action_fail; ++ uint64_t vmio_rsp_fail; ++ uint64_t vsock_tx_fail; ++ uint64_t vsock_rx_fail; ++ uint64_t host_dma_req_fail; ++ ++ uint64_t rsvd[16]; ++} hvio_error_stats_s; ++ ++typedef struct hivio_func_ctx_read_rsp { ++ uint8_t device_type; ++ uint8_t device_status; ++ uint16_t num_queues; ++ uint8_t flr_status; ++ uint8_t rsvd0[3]; ++ uint32_t device_feature_l; ++ uint32_t device_feature_h; ++ uint32_t driver_feature_l; ++ uint32_t driver_feature_h; ++ uint32_t rsvd1[26]; ++} hivio_func_ctx_read_rsp_s; ++ ++struct hvio_mount_para { ++ uint32_t algo_type; /* *< VBS:algorithm 0 or 1; IPU:0--dummy; 1--normal */ ++ uint32_t key[3]; /* *< 0 for rsvd. VBS:key[0] tree_id, key[1] pt_num, key[2] blk_size */ ++}; ++ ++/** ++ * @brief hivio initialization function ++ * @param args_in initialization parameters input ++ * @param eps_out host side ep info ouput ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_lib_init(hvio_lib_args_s *args_in, hvio_hostep_info_s *eps_out); ++ ++/** ++ * @brief hivbs de-initialize function. ++ * @param void ++ * @return ++ * - 0: success ++ */ ++int hvio_lib_deinit(void); ++ ++/** ++ * @brief update virtio blk capacity. ++ * @param glb_function_id the global function index of the chip ++ * @param capacity new capacity ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_update_virtio_blk_capacity(uint16_t glb_function_id, uint64_t capacity); ++ ++/** ++ * @brief poll RQ for VMIO request. ++ * @param tid It's used as the L2NIC RQ id per SPU core. ++ * @param poll_num the number of msg rsp want to be polled ++ * @param req output for received request. The buffer is allocated by hivbs, and used by SPDK. Release when IO complete. ++ * @return ++ * - >=0: the number of vmio_request has been polled ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_vmio_req_poll_batch(uint16_t tid, uint16_t poll_num, struct vmio_request **req); ++ ++/** ++ * @brief hvio_vmio_req_poll_batch_ext extra poll options ++ */ ++typedef struct hvio_vmio_req_poll_opt { ++ struct iovec ++ *sge1_iov; /**< output for req->req.cmd.iovs[1] (per VMIO req). Actual data length set in iov_len */ ++ uint16_t queue_id; /**< (optional) poll a queue id instead of using 'tid' parameter to calculate the queue */ ++ uint8_t rsvd[54]; ++} hvio_vmio_req_poll_opt_s; ++ ++/** ++ * @brief poll RQ for VMIO request, together with the contents of req->req.cmd.iovs[1]. ++ * @param tid It's used as the L2NIC RQ id per SPU core. ++ * @param poll_num the number of msg rsp want to be polled, if the poll_num > 16, the actual poll num is 16. ++ * @param req output for received request. The buffer is allocated by hivbs, and used by SPDK. Release when IO complete. ++ * @param poll_opt (optional) extra poll options. ++ * @return ++ * - >=0: the number of vmio_request has been polled ++ * - <0: fail, refer to errno.h ++ * @note req->req.cmd.writable will be used to specify the first writable index in req->req.cmd.iovs. ++ */ ++int hvio_vmio_req_poll_batch_ext(uint16_t tid, uint16_t poll_num, struct vmio_request **req, ++ hvio_vmio_req_poll_opt_s *poll_opt); ++ ++/** ++ * @brief send VMIO complete to SQ. ++ * @param tid It's used as the L2NIC SQ id per SPU core. ++ * @param resp VMIO response ++ * @return ++ * - 0: success ++ * - others: fail, refer to errno.h ++ */ ++int hvio_vmio_complete(uint16_t tid, struct vmio_response *resp); ++ ++/** ++ * @brief create vmio rx queue ++ * @param queue_id_out id of the queue create ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_vmio_rxq_create(uint16_t *queue_id_out); ++ ++/** ++ * @brief initialize PF (include all VFs belong to this PF) to specific device type. For virtio device of the PF and VF ++ * can be set to different virtio device_type. The interface must be called with increasing pf_id. The function is not ++ * visible to host after init. ++ * @param pf_id PF id ++ * @param num_vf number of VFs of the PF, they use the same type ++ * @param pf_type pf device type ++ * @param vf_type vf device type ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_setup_function(uint16_t pf_id, uint16_t num_vf, enum device_type pf_type, ++ enum device_type vf_type); ++ ++/** ++ * @brief change specific device config. ++ * @param cfg new device configuration data ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_write_function_config(struct function_config *cfg); ++ ++/** ++ * @brief get global function index by pcie device dbdf info. ++ * @param dbdf pcie device dbdf info(input para) ++ * @param glb_function_id the global function index of the chip(output para) ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_get_glb_function_id_by_dbdf(uint32_t dbdf, uint16_t *glb_function_id); ++ ++/** ++ * @brief send action to function, synchronous interface. ++ * @param req send action request ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_send_action(uint16_t glb_function_id, enum function_action action, const void *data, ++ uint16_t data_len); ++ ++/** ++ * @brief DMA request. hw will load or store data between X86 host and spu ddr, asynchronous interface. ++ * @param chnl_id is associated with L2NIC SQ ID. ++ * @param req host dma request ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_host_dma_request(uint16_t chnl_id, hvio_host_dma_req_s *req); ++ ++/** ++ * @brief poll RQ for dma response status. device provides DMA response in the same order with DMA request. ++ * @param chnl_id is associated with L2NIC RQ ID. ++ * @param poll_num the number of rsp want to be polled. ++ * @param[out] rsp output for received response. ++ * @return ++ * - >=0: the number of host dma rsp has been polled ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_host_dma_rsp_poll(uint16_t chnl_id, uint16_t poll_num, hvio_host_dma_rsp_s *rsp); ++ ++union hvio_nvme_config_cmd_info { ++ uint32_t cmd[5]; ++}; ++ ++/** ++ * @brief get hot upgrade state ++ * @param void ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_get_hot_upgrade_state(void); ++ ++/** ++ * @brief check device ready ++ * @param role 0--old process; 1--new process ++ * @param proc_type enum proc_type, supoort PROC_TYPE_VBS and PROC_TYPE_BOOT ++ * @param ready output_para 0--not ready, 1--ready ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready); ++ ++/** ++ * @brief mount VIO volume, synchronous interface. Invoked by VIO. ++ * @param glb_function_id the global function id of chip ++ * @param lun_id the lun id of this volume ++ * @param hash_paras hash item paras ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_volume_mount(uint16_t glb_function_id, uint32_t lun_id, ++ struct hvio_mount_para *hash_paras); ++ ++/** ++ * @brief umount VIO volume, synchronous interface. Invoked by VIO. ++ * @param glb_function_id the global function id of chip ++ * @param lun_id the lun id of this volume ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_volume_umount(uint16_t glb_function_id, uint32_t lun_id); ++ ++/** ++ * @brief update virtio device used or not. ++ * @param glb_function_id the global function index of the chip ++ * @param device_used virtio device is used or not ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used); ++ ++/** ++ * @brief release virtio blk vq resource. ++ * @param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_virtio_blk_release_resource(uint16_t glb_function_id); ++ ++/** ++ * @brief query slave host cfg require and cfg global func to it ++ * @param null ++ * @return ++ */ ++void hvio_hotplug_cfg(void); ++ ++/** ++ * @brief port_id hot plug add ++ * @param port_id the global function index of the chip ++ * @return ++ * - 0: success ++ * - -1: invalid port_id ++ * - -2: repeat hot plug ++ * - others: fail, refer to errno.h ++ */ ++int hvio_hotplug_add(uint16_t port_id); ++ ++/** ++ * @brief port_id hot plug del ++ * @param port_id the global function index of the chip ++ * @return ++ * - 0: success ++ * - -1: invalid port_id ++ * - -2: repeat hot del ++ * - others: fail, refer to errno.h ++ */ ++int hvio_hotplug_del(uint16_t port_id); ++ ++/** ++ * @brief check hotplug if enable ++ * @param null ++ * @return ++ * - true: enable ++ * - false: disable ++ */ ++bool hvio_hotplug_enable_check(void); ++ ++/** ++ * @brief alloc virtio blk vq resource. ++ * @param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num); ++ ++#endif /* HIVIO_API_H */ +diff --git a/lib/ssam/ssam_driver/ssam_dbdf.c b/lib/ssam/ssam_driver/ssam_dbdf.c +new file mode 100644 +index 0000000..9b368da +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_dbdf.c +@@ -0,0 +1,342 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "spdk/stdinc.h" ++ ++#include "spdk/log.h" ++#include "dpak_ssam.h" ++ ++#define SSAM_DBDF_DOMAIN_OFFSET 16 ++#define SSAM_DBDF_BUS_OFFSET 8 ++#define SSAM_DBDF_DEVICE_OFFSET 3 ++#define SSAM_DBDF_FUNC_OFFSET 0x7 ++#define SSAM_DBDF_DOMAIN_MAX 0xffff ++#define SSAM_DBDF_BUS_MAX 0xff ++#define SSAM_DBDF_DEVICE_MAX 0x1f ++#define SSAM_DBDF_FUNCTION_MAX 0x7 ++#define SSAM_DBDF_DOMAIN_MAX_LEN 4 ++#define SSAM_DBDF_BD_MAX_LEN 2 ++#define SSAM_DBDF_FUNCTION_MAX_LEN 1 ++#define SSAM_DBDF_MAX_STR_LEN 20 ++#define SSAM_STR_CONVERT_HEX 16 ++ ++ ++struct ssam_dbdf { ++ uint32_t domain; ++ uint32_t bus; ++ uint32_t device; ++ uint32_t function; ++}; ++ ++static int ++ssam_dbdf_cvt_str2num(char *input, uint16_t val_limit, uint32_t len_limit, ++ uint32_t *num_resolved) ++{ ++ char *end_ptr = NULL; ++ long int val = strtol(input, &end_ptr, SSAM_STR_CONVERT_HEX); ++ ++ if (strlen(input) > len_limit) { ++ return -EINVAL; ++ } ++ ++ if (end_ptr == NULL || end_ptr == input || *end_ptr != '\0') { ++ return -EINVAL; ++ } ++ if (val < 0 || val > val_limit) { ++ return -EINVAL; ++ } ++ ++ *num_resolved = (uint32_t)val; ++ return 0; ++} ++ ++/* resolve dbdf's domain */ ++static int ++ssam_dbdf_cvt_dom(char *str, struct ssam_dbdf *dbdf, ++ char **bus) ++{ ++ char *colon2 = NULL; ++ int rc; ++ ++ /* find second ":" from dbdf string */ ++ colon2 = strchr(str, ':'); ++ if (colon2 != NULL) { ++ *colon2++ = 0; ++ *bus = colon2; ++ if (str[0] != 0) { ++ /* convert domain number */ ++ rc = ssam_dbdf_cvt_str2num(str, SSAM_DBDF_DOMAIN_MAX, ++ SSAM_DBDF_DOMAIN_MAX_LEN, &dbdf->domain); ++ if (rc != 0) { ++ SPDK_ERRLOG("Invalid characters of domain number!\n"); ++ return rc; ++ } ++ } else { ++ SPDK_ERRLOG("domain number is blank!\n"); ++ return -EINVAL; ++ } ++ } else { ++ /* dbdf string does not contain domain number */ ++ *bus = str; ++ } ++ ++ return 0; ++} ++ ++/* resolve dbdf's bus */ ++static int ++ssam_dbdf_cvt_b(struct ssam_dbdf *dbdf, char *bus) ++{ ++ int rc; ++ ++ if (bus[0] != 0) { ++ /* convert bus number */ ++ rc = ssam_dbdf_cvt_str2num(bus, SSAM_DBDF_BUS_MAX, ++ SSAM_DBDF_BD_MAX_LEN, &dbdf->bus); ++ if (rc != 0) { ++ SPDK_ERRLOG("Invalid characters of bus number!\n"); ++ return rc; ++ } ++ } else { ++ SPDK_ERRLOG("bus number is blank!\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* resolve dbdf's domain and bus part */ ++static int ++ssam_dbdf_cvt_domb(char *str, struct ssam_dbdf *dbdf, ++ char **colon_input, char **mid_input) ++{ ++ char *bus = NULL; ++ char *colon = *colon_input; ++ int rc; ++ ++ *colon++ = 0; ++ *mid_input = colon; ++ rc = ssam_dbdf_cvt_dom(str, dbdf, &bus); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return ssam_dbdf_cvt_b(dbdf, bus); ++} ++ ++/* resolve dbdf's device */ ++static int ++ssam_dbdf_cvt_dev(struct ssam_dbdf *dbdf, char *mid) ++{ ++ int rc; ++ ++ if (mid[0] != 0) { ++ /* convert device number */ ++ rc = ssam_dbdf_cvt_str2num(mid, SSAM_DBDF_DEVICE_MAX, ++ SSAM_DBDF_BD_MAX_LEN, &dbdf->device); ++ if (rc != 0) { ++ SPDK_ERRLOG("Invalid characters of device number!\n"); ++ return rc; ++ } ++ } else { ++ SPDK_ERRLOG("device number is blank!\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_dbdf_cvt_f(struct ssam_dbdf *dbdf, char *dot) ++{ ++ int rc; ++ ++ if (dot != NULL && dot[0] != 0) { ++ /* convert function number */ ++ rc = ssam_dbdf_cvt_str2num(dot, SSAM_DBDF_FUNCTION_MAX, ++ SSAM_DBDF_FUNCTION_MAX_LEN, &dbdf->function); ++ if (rc != 0) { ++ SPDK_ERRLOG("Invalid characters of function number!\n"); ++ return rc; ++ } ++ } else { ++ SPDK_ERRLOG("function number is blank!\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* resolve dbdf's device and function part */ ++static int ++ssam_dbdf_cvt_devf(struct ssam_dbdf *dbdf, char **dot_input, char **mid_input) ++{ ++ char *dot = *dot_input; ++ int rc; ++ ++ if (dot != NULL) { ++ *dot++ = 0; ++ } else { ++ /* Input dbdf string does not contain "." */ ++ SPDK_ERRLOG("Invalid DBDF format\n"); ++ return -1; ++ } ++ ++ rc = ssam_dbdf_cvt_dev(dbdf, *mid_input); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return ssam_dbdf_cvt_f(dbdf, dot); ++} ++ ++static uint32_t ++ssam_dbdf_assemble(const struct ssam_dbdf *dbdf) ++{ ++ return ((dbdf->domain << SSAM_DBDF_DOMAIN_OFFSET) | ++ (dbdf->bus << SSAM_DBDF_BUS_OFFSET) | ++ (dbdf->device << SSAM_DBDF_DEVICE_OFFSET) | ++ (dbdf->function & SSAM_DBDF_FUNC_OFFSET)); ++} ++ ++static int ++ssam_dbdf_cvt_dbdf(char *str, size_t len, uint32_t *dbdf) ++{ ++ if (dbdf == NULL) { ++ SPDK_ERRLOG("dbdf is null\n"); ++ return -1; ++ } ++ /* find ":" from dbdf string */ ++ char *colon = strrchr(str, ':'); ++ /* find "." from dbdf string */ ++ char *dot = NULL; ++ char *mid = str; ++ int rc; ++ struct ssam_dbdf st_dbdf = {0}; ++ ++ if (colon != NULL) { ++ rc = ssam_dbdf_cvt_domb(str, &st_dbdf, &colon, &mid); ++ if (rc != 0) { ++ return rc; ++ } ++ } else { ++ /* Input dbdf string does not contain ":" */ ++ SPDK_ERRLOG("Invalid DBDF format\n"); ++ return -EINVAL; ++ } ++ ++ dot = strchr((colon ? (colon + 1) : str), '.'); ++ rc = ssam_dbdf_cvt_devf(&st_dbdf, &dot, &mid); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ *dbdf = ssam_dbdf_assemble(&st_dbdf); ++ ++ return 0; ++} ++ ++/* convert dbdf from string to number */ ++int ++ssam_dbdf_str2num(char *str, uint32_t *dbdf) ++{ ++ int len; ++ char *dbdf_str = NULL; ++ int ret; ++ ++ if (str == NULL) { ++ SPDK_ERRLOG("dbdf str2num input str null!\n"); ++ return -EINVAL; ++ } ++ ++ if (dbdf == NULL) { ++ SPDK_ERRLOG("dbdf str2num output dbdf null!\n"); ++ return -EINVAL; ++ } ++ ++ len = strlen(str); ++ if (len == 0 || len > SSAM_DBDF_MAX_STR_LEN) { ++ SPDK_ERRLOG("dbdf str2num len %u error!\n", len); ++ return -ERANGE; ++ } ++ ++ dbdf_str = (char *)malloc(len + 1); ++ if (dbdf_str == NULL) { ++ return -ENOMEM; ++ } ++ ++ ret = snprintf(dbdf_str, len + 1, "%s", str); ++ if ((ret > len) || (ret <= 0)) { ++ SPDK_ERRLOG("dbdf str2num snprintf_s error\n"); ++ free(dbdf_str); ++ return -EINVAL; ++ } ++ ++ ret = ssam_dbdf_cvt_dbdf(dbdf_str, len, dbdf); ++ free(dbdf_str); ++ dbdf_str = NULL; ++ ++ return ret; ++} ++ ++static void ++ssam_dbdf_num2struct(uint32_t dbdf, struct ssam_dbdf *st_dbdf) ++{ ++ st_dbdf->domain = (dbdf >> SSAM_DBDF_DOMAIN_OFFSET) & SSAM_DBDF_DOMAIN_MAX; ++ st_dbdf->bus = (dbdf >> SSAM_DBDF_BUS_OFFSET) & SSAM_DBDF_BUS_MAX; ++ st_dbdf->device = (dbdf >> SSAM_DBDF_DEVICE_OFFSET) & SSAM_DBDF_DEVICE_MAX; ++ st_dbdf->function = dbdf & SSAM_DBDF_FUNCTION_MAX; ++ return; ++} ++ ++int ++ssam_dbdf_num2str(uint32_t dbdf, char *str, size_t len) ++{ ++ int ret; ++ struct ssam_dbdf st_dbdf = {0}; ++ ++ if (str == NULL) { ++ SPDK_ERRLOG("dbdf num2str output str null!\n"); ++ return -EINVAL; ++ } ++ ++ ssam_dbdf_num2struct(dbdf, &st_dbdf); ++ ++ ret = snprintf(str, len - 1, "%04x:%02x:%02x.%x", ++ st_dbdf.domain, st_dbdf.bus, st_dbdf.device, st_dbdf.function); ++ if ((ret >= (int)(len - 1)) || (ret <= 0)) { ++ SPDK_ERRLOG("dbdf num2str error\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} +diff --git a/lib/ssam/ssam_driver/ssam_driver.c b/lib/ssam/ssam_driver/ssam_driver.c +new file mode 100644 +index 0000000..b14f77e +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_driver.c +@@ -0,0 +1,483 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "spdk/stdinc.h" ++#include "spdk/log.h" ++#include "ssam_driver_adapter.h" ++#include "dpak_ssam.h" ++ ++#define SSAM_DRV_PRIORITY_LAST 65535 ++#define VIRTIO_F_NOTIFICATION_DATA (1UL << 38) ++#define SSAM_DPAK_DIR "/etc/dpak/" ++#define SSAM_CFG_DIR SSAM_DPAK_DIR SSAM_SERVER_NAME "/" ++#define SSAM_RECOVER_CFG_JSON SSAM_CFG_DIR "recover.json" ++#define SSAM_PARAM_CFG_JSON SSAM_CFG_DIR "parameter.json" ++#define SSAM_CONFIG_DIR_PERMIT 0750 ++ ++__attribute__((constructor(SSAM_DRV_PRIORITY_LAST))) int ssam_construct(void); ++ ++__attribute__((destructor(SSAM_DRV_PRIORITY_LAST))) void ssam_destruct(void); ++ ++int ++ssam_lib_init(struct ssam_lib_args *args_in, struct ssam_hostep_info *eps_out) ++{ ++ hvio_lib_args_s hvio_args_in; ++ hvio_hostep_info_s *hostep_info = NULL; ++ ++ if (args_in == NULL || eps_out == NULL) { ++ SPDK_ERRLOG("input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ memset(&hvio_args_in, 0, sizeof(hvio_lib_args_s)); ++ hvio_args_in.role = args_in->role; ++ hvio_args_in.core_num = args_in->core_num; ++ hvio_args_in.cb_ops.hvio_heap_malloc = (__typeof__(hvio_args_in.cb_ops.hvio_heap_malloc)) ++ args_in->ssam_heap_malloc; ++ hvio_args_in.cb_ops.hvio_heap_free = args_in->ssam_heap_free; ++ hvio_args_in.host_dma_queue_per_chnl = args_in->dma_queue_num; ++ hvio_args_in.hash_mode = args_in->hash_mode; ++ ++ hostep_info = (hvio_hostep_info_s *)(void *)eps_out; ++ ++ return ssam_drv_lib_init(&hvio_args_in, hostep_info); ++} ++ ++int ++ssam_lib_exit(void) ++{ ++ return ssam_drv_lib_deinit(); ++} ++ ++int ++ssam_setup_function(uint16_t pf_id, uint16_t num_vf, enum ssam_device_type dev_type) ++{ ++ enum device_type type; ++ switch (dev_type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ type = DEVICE_VIRTIO_BLK; ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ type = DEVICE_VIRTIO_SCSI; ++ break; ++ case SSAM_DEVICE_VIRTIO_FS: ++ type = DEVICE_VIRTIO_FS; ++ break; ++ default: ++ type = DEVICE_VIRTIO_MAX; ++ break; ++ } ++ ++ return ssam_drv_setup_function(pf_id, num_vf, type, type); ++} ++ ++int ++ssam_write_function_config(struct ssam_function_config *cfg) ++{ ++ struct function_config hvio_function_cfg; ++ ++ if (cfg == NULL) { ++ SPDK_ERRLOG("libssam input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ if ((cfg->virtio_config.device_feature & VIRTIO_F_NOTIFICATION_DATA) != 0) { ++ SPDK_ERRLOG("Virtio feature is error.\n"); ++ return -EINVAL; ++ } ++ ++ memset(&hvio_function_cfg, 0x0, sizeof(struct function_config)); ++ ++ hvio_function_cfg.function_id = (uint32_t)cfg->gfunc_id; ++ switch (cfg->type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ hvio_function_cfg.type = DEVICE_VIRTIO_BLK; ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ hvio_function_cfg.type = DEVICE_VIRTIO_SCSI; ++ break; ++ case SSAM_DEVICE_VIRTIO_FS: ++ hvio_function_cfg.type = DEVICE_VIRTIO_FS; ++ break; ++ default: ++ hvio_function_cfg.type = DEVICE_VIRTIO_MAX; ++ break; ++ } ++ ++ memcpy(&hvio_function_cfg.config.virtio, &cfg->virtio_config, sizeof(struct ssam_virtio_config)); ++ return ssam_drv_write_function_config(&hvio_function_cfg); ++} ++ ++int ++ssam_send_action(uint16_t gfunc_id, enum ssam_function_action action, const void *data, ++ uint16_t data_len) ++{ ++ enum function_action func_act; ++ ++ if (data == NULL || data_len == 0) { ++ SPDK_ERRLOG("libssam input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ switch (action) { ++ case SSAM_FUNCTION_ACTION_START: ++ func_act = FUNCTION_ACTION_START; ++ break; ++ ++ case SSAM_FUNCTION_ACTION_STOP: ++ func_act = FUNCTION_ACTION_STOP; ++ break; ++ ++ case SSAM_FUNCTION_ACTION_RESET: ++ func_act = FUNCTION_ACTION_RESET; ++ break; ++ ++ case SSAM_FUNCTION_ACTION_CONFIG_CHANGE: ++ func_act = FUNCTION_ACTION_CONFIG_CHANGE; ++ break; ++ ++ case SSAM_FUNCTION_ACTION_SCSI_EVENT: ++ func_act = FUNCTION_ACTION_SCSI_EVENT; ++ break; ++ ++ default: ++ func_act = FUNCTION_ACTION_MAX; ++ break; ++ } ++ ++ return ssam_drv_send_action(gfunc_id, func_act, data, data_len); ++} ++ ++int ++ssam_function_mount(uint16_t gfunc_id, uint32_t lun_id, enum ssam_mount_type type, uint16_t tid) ++{ ++ struct hvio_mount_para hash_paras; ++ ++ memset(&hash_paras, 0x0, sizeof(struct hvio_mount_para)); ++ ++ hash_paras.algo_type = type; ++ hash_paras.key[0] = tid; ++ ++ return ssam_drv_volume_mount(gfunc_id, lun_id, &hash_paras); ++} ++ ++int ++ssam_function_umount(uint16_t gfunc_id, uint32_t lun_id) ++{ ++ return ssam_drv_volume_umount(gfunc_id, lun_id); ++} ++ ++int ++ssam_request_poll(uint16_t tid, uint16_t poll_num, struct ssam_request **io_req) ++{ ++ if (io_req == NULL || poll_num > SSAM_MAX_REQ_POLL_SIZE) { ++ SPDK_ERRLOG("ssam request poll input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ return ssam_drv_vmio_req_poll_batch(tid, poll_num, (struct vmio_request **)io_req); ++} ++ ++int ++ssam_request_poll_ext(uint16_t tid, uint16_t poll_num, struct ssam_request **io_req, ++ struct ssam_request_poll_opt *poll_opt) ++{ ++ if (io_req == NULL || poll_num > SSAM_MAX_REQ_POLL_SIZE || poll_opt == NULL) { ++ SPDK_ERRLOG("ssam request poll ext input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ hvio_vmio_req_poll_opt_s hvio_poll_opt = { ++ .sge1_iov = poll_opt->sge1_iov, ++ .queue_id = poll_opt->queue_id, ++ }; ++ ++ return ssam_drv_vmio_req_poll_batch_ext(tid, poll_num, (struct vmio_request **)io_req, ++ &hvio_poll_opt); ++} ++ ++int ++ssam_dma_data_request(uint16_t tid, struct ssam_dma_request *dma_req) ++{ ++ if (dma_req == NULL || dma_req->direction >= SSAM_REQUEST_DATA_MAX) { ++ SPDK_ERRLOG("ssam dma request input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ hvio_host_dma_req_s *mode_para = (hvio_host_dma_req_s *)dma_req; ++ ++ return ssam_drv_host_dma_request(tid, mode_para); ++} ++ ++int ++ssam_dma_rsp_poll(uint16_t tid, uint16_t poll_num, struct ssam_dma_rsp *dma_rsp) ++{ ++ if (dma_rsp == NULL || poll_num > SSAM_MAX_RESP_POLL_SIZE) { ++ SPDK_ERRLOG("resp poll input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ return ssam_drv_host_dma_rsp_poll(tid, poll_num, (hvio_host_dma_rsp_s *)dma_rsp); ++} ++ ++static enum vmio_type ++ssam_io_type_to_vmio(enum ssam_io_type io_type) { ++ enum vmio_type vmio_type; ++ ++ switch (io_type) ++ { ++ case SSAM_VIRTIO_BLK_IO: ++ vmio_type = VMIO_TYPE_VIRTIO_BLK_IO; ++ break; ++ ++ case SSAM_VIRTIO_SCSI_IO: ++ vmio_type = VMIO_TYPE_VIRTIO_SCSI_IO; ++ break; ++ ++ case SSAM_VIRTIO_SCSI_CTRL: ++ vmio_type = VMIO_TYPE_VIRTIO_SCSI_CTRL; ++ break; ++ ++ case SSAM_VIRTIO_SCSI_EVT: ++ vmio_type = VMIO_TYPE_VIRTIO_SCSI_EVT; ++ break; ++ ++ case SSAM_VIRTIO_FUNC_STATUS: ++ vmio_type = VMIO_TYPE_VIRTIO_FUNC_STATUS; ++ break; ++ ++ case SSAM_VIRTIO_FS_IO: ++ vmio_type = VMIO_TYPE_VIRTIO_FS_IO; ++ break; ++ ++ case SSAM_VIRTIO_FS_HIPRI: ++ vmio_type = VMIO_TYPE_VIRTIO_FS_HIPRI; ++ break; ++ ++ default: ++ vmio_type = VMIO_TYPE_RSVD; ++ } ++ ++ return vmio_type; ++} ++ ++int ++ssam_io_complete(uint16_t tid, struct ssam_io_response *resp) ++{ ++ struct vmio_response vmio_res; ++ struct virtio_response *virtio_res = NULL; ++ ++ if (resp == NULL) { ++ SPDK_ERRLOG("ssam io complete input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ memset(&vmio_res, 0x0, sizeof(vmio_res)); ++ vmio_res.glb_function_id = resp->gfunc_id; ++ vmio_res.iocb_id = resp->iocb_id; ++ vmio_res.type = ssam_io_type_to_vmio(resp->req->type); ++ ++ switch (resp->status) { ++ case SSAM_IO_STATUS_OK: ++ vmio_res.status = VMIO_STATUS_OK; ++ break; ++ case SSAM_IO_STATUS_EMPTY: ++ vmio_res.status = VMIO_STATUS_VQ_EMPTY; ++ break; ++ default: ++ vmio_res.status = VMIO_STATUS_ERROR; ++ break; ++ } ++ ++ vmio_res.req = (struct vmio_request *)(void *)resp->req; ++ vmio_res.flr_seq = resp->flr_seq; ++ ++ virtio_res = (struct virtio_response *)&vmio_res.virtio; ++ virtio_res->used_len = 0; /* virtio-blk insensitive of this value, set 0 */ ++ virtio_res->rsp_len = resp->data.rsp_len; ++ virtio_res->iovcnt = resp->data.iovcnt; ++ virtio_res->iovs = resp->data.iovs; ++ virtio_res->rsp = resp->data.rsp; ++ ++ return ssam_drv_vmio_complete(tid, &vmio_res); ++} ++ ++int ++ssam_vmio_rxq_create(uint16_t *queue_id_out) ++{ ++ if (queue_id_out == NULL) { ++ return -EINVAL; ++ } ++ return ssam_drv_vmio_rxq_create(queue_id_out); ++} ++ ++int ++ssam_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used) ++{ ++ return ssam_drv_update_virtio_device_used(glb_function_id, device_used); ++} ++ ++int ++ssam_virtio_blk_resize(uint16_t gfunc_id, uint64_t capacity) ++{ ++ return ssam_drv_update_virtio_blk_capacity(gfunc_id, capacity); ++} ++ ++int ++ssam_get_funcid_by_dbdf(uint32_t dbdf, uint16_t *gfunc_id) ++{ ++ if (gfunc_id == NULL) { ++ SPDK_ERRLOG("libssam input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ return ssam_drv_get_glb_function_id_by_dbdf(dbdf, gfunc_id); ++} ++ ++int ++ssam_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready) ++{ ++ if (ready == NULL) { ++ SPDK_ERRLOG("libssam input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ return ssam_drv_check_device_ready(role, proc_type, ready); ++} ++ ++int ++ssam_get_hot_upgrade_state(void) ++{ ++ return ssam_drv_get_hot_upgrade_state(); ++} ++ ++void ++ssam_hotplug_cfg(void) ++{ ++ ssam_drv_hotplug_cfg(); ++} ++ ++int ++ssam_hotplug_add(uint16_t port_id) ++{ ++ return ssam_drv_hotplug_add(port_id); ++} ++ ++int ++ssam_hotplug_del(uint16_t port_id) ++{ ++ return ssam_drv_hotplug_del(port_id); ++} ++ ++bool ++ssam_hotplug_enable_check(void) ++{ ++ return ssam_drv_hotplug_enable_check(); ++} ++ ++int ++ssam_virtio_blk_release_resource(uint16_t glb_function_id) ++{ ++ return ssam_drv_virtio_blk_release_resource(glb_function_id); ++} ++ ++int ++ssam_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num) ++{ ++ return ssam_drv_virtio_blk_alloc_resource(glb_function_id, queue_num); ++} ++ ++static int ++ssam_try_mkdir(const char *dir, mode_t mode) ++{ ++ int rc; ++ ++ rc = mkdir(dir, mode); ++ if (rc < 0 && errno != EEXIST) { ++ SPDK_ERRLOG("ssam try mkdir error, dir: '%s': %s\n", dir, strerror(errno)); ++ return -errno; ++ } ++ return 0; ++} ++ ++int ++spdk_ssam_rc_preinit(void) ++{ ++ int rc; ++ ++ rc = ssam_try_mkdir(SSAM_DPAK_DIR, SSAM_CONFIG_DIR_PERMIT); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ rc = ssam_try_mkdir(SSAM_CFG_DIR, SSAM_CONFIG_DIR_PERMIT); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ if (access(SSAM_RECOVER_CFG_JSON, F_OK) != 0) { ++ return 0; ++ } ++ ++ return 1; ++} ++ ++char * ++ssam_rc_get_recover_json_file_path(void) ++{ ++ return (char *)SSAM_RECOVER_CFG_JSON; ++} ++ ++char * ++ssam_rc_get_param_json_file_path(void) ++{ ++ return (char *)SSAM_PARAM_CFG_JSON; ++} ++ ++__attribute__((constructor(SSAM_DRV_PRIORITY_LAST))) int ++ssam_construct(void) ++{ ++ int ret = ssam_drv_ops_init(); ++ if (ret != 0) { ++ SPDK_ERRLOG("ssam drv ops init failed"); ++ return -1; ++ } ++ ++ SPDK_NOTICELOG("ssam construct finish"); ++ return 0; ++} ++ ++__attribute__((destructor(SSAM_DRV_PRIORITY_LAST))) void ++ssam_destruct(void) ++{ ++ ssam_drv_ops_uninit(); ++} +diff --git a/lib/ssam/ssam_driver/ssam_driver_adapter.c b/lib/ssam/ssam_driver/ssam_driver_adapter.c +new file mode 100644 +index 0000000..66aa390 +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_driver_adapter.c +@@ -0,0 +1,579 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include "spdk/stdinc.h" ++#include "spdk/log.h" ++#include "ssam_driver_adapter.h" ++ ++#define SSAM_DRV_SHARD_LIBRARY "/usr/lib64/libhivio.so" ++#define SSAM_DRV_FUNC_NO_PTR (-1) ++#define SSAM_DRV_ADD_FUNC(class, name) {#name, (void**)&(class).name} ++#define SSAM_FUNC_PTR_OR_ERR_RET(func, retval) do { \ ++ if ((func) == NULL) \ ++ return retval; \ ++} while (0) ++ ++struct ssam_drv_ops_map { ++ char *name; ++ void **func; ++}; ++ ++static void *g_ssam_drv_handler = NULL; ++static struct ssam_drv_ops g_ssam_drv_ops = { 0 }; ++typedef void (*lib_dlsym_uninit_cb_t)(void); ++static lib_dlsym_uninit_cb_t g_lib_dlsym_uninit_cb = NULL; ++ ++static struct ++ ssam_drv_ops_map g_ssam_drv_ops_map[] = { ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_host_dma_request), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_vmio_req_poll_batch), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_vmio_req_poll_batch_ext), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_lib_deinit), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_volume_umount), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_lib_init), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_volume_mount), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_host_dma_rsp_poll), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_get_glb_function_id_by_dbdf), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_send_action), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_update_virtio_blk_capacity), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_setup_function), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_check_device_ready), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_write_function_config), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_get_hot_upgrade_state), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_vmio_complete), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_hotplug_cfg), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_hotplug_add), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_hotplug_del), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_hotplug_enable_check), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_vmio_rxq_create), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_update_virtio_device_used), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_virtio_blk_alloc_resource), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_virtio_blk_release_resource), ++}; ++ ++void ++ssam_lib_dlsym_uninit_cb_register(lib_dlsym_uninit_cb_t cb); ++ ++ ++struct ++ssam_drv_ops *ssam_get_drv_ops(void) ++{ ++ return &g_ssam_drv_ops; ++} ++ ++static void ++ssam_drv_ops_cb_uninit(void) ++{ ++ if (g_ssam_drv_handler != NULL) { ++ memset(&g_ssam_drv_ops, 0, sizeof(struct ssam_drv_ops)); ++ dlclose(g_ssam_drv_handler); ++ } ++} ++ ++static int ++ssam_drv_ops_init_sub(void *handler, struct ssam_drv_ops_map driver_map[], int size) ++{ ++ for (int index = 0; index < size; index++) { ++ if (*driver_map[index].func != NULL) { ++ continue; ++ } ++ ++ *driver_map[index].func = dlsym(handler, driver_map[index].name); ++ if (*driver_map[index].func == NULL) { ++ SPDK_ERRLOG("%s load func %s fail: %s", SSAM_DRV_SHARD_LIBRARY, driver_map[index].name, dlerror()); ++ return -1; ++ } ++ } ++ return 0; ++} ++ ++void ++ssam_lib_dlsym_uninit_cb_register(lib_dlsym_uninit_cb_t cb) ++{ ++ g_lib_dlsym_uninit_cb = cb; ++} ++ ++int ++ssam_drv_ops_init(void) ++{ ++ int ret = 0; ++ void *handler = dlopen(SSAM_DRV_SHARD_LIBRARY, RTLD_NOW); ++ if (handler == NULL) { ++ SPDK_ERRLOG("%s load err %s\n", SSAM_DRV_SHARD_LIBRARY, dlerror()); ++ return -1; ++ } ++ ++ ret = ssam_drv_ops_init_sub(handler, g_ssam_drv_ops_map, ++ sizeof(g_ssam_drv_ops_map) / sizeof(g_ssam_drv_ops_map[0])); ++ if (ret != 0) { ++ SPDK_ERRLOG("hwoff drv ops init: common api load failed"); ++ dlclose(handler); ++ return -1; ++ } ++ ++ g_ssam_drv_handler = handler; ++ ssam_lib_dlsym_uninit_cb_register(ssam_drv_ops_cb_uninit); ++ ++ return 0; ++} ++ ++void ++ssam_drv_ops_uninit(void) ++{ ++ if (g_lib_dlsym_uninit_cb != NULL) { ++ g_lib_dlsym_uninit_cb(); ++ g_lib_dlsym_uninit_cb = NULL; ++ } ++} ++ ++int ++ssam_drv_host_dma_request(uint16_t chnl_id, hvio_host_dma_req_s *req) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_host_dma_request, SSAM_DRV_FUNC_NO_PTR); ++ ret = ops->hvio_host_dma_request(chnl_id, req); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_vmio_req_poll_batch(uint16_t tid, uint16_t poll_num, struct vmio_request **req) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_vmio_req_poll_batch, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_vmio_req_poll_batch(tid, poll_num, req); ++ if (ret < 0) { ++ SPDK_ERRLOG("hvio_vmio_req_poll_batch exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return ret; ++} ++ ++int ++ssam_drv_vmio_req_poll_batch_ext(uint16_t tid, uint16_t poll_num, struct vmio_request **req, ++ hvio_vmio_req_poll_opt_s *poll_opt) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_vmio_req_poll_batch_ext, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_vmio_req_poll_batch_ext(tid, poll_num, req, poll_opt); ++ if (ret < 0) { ++ SPDK_ERRLOG("hvio_vmio_req_poll_batch_ext exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return ret; ++} ++ ++int ++ssam_drv_lib_deinit(void) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_lib_deinit, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_lib_deinit(); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_lib_deinit exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_volume_umount(uint16_t glb_function_id, uint32_t lun_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_volume_umount, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_volume_umount(glb_function_id, lun_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_volume_umount exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_lib_init(hvio_lib_args_s *args_in, hvio_hostep_info_s *eps_out) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_lib_init, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_lib_init(args_in, eps_out); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_lib_init exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_volume_mount(uint16_t glb_function_id, uint32_t lun_id, struct hvio_mount_para *hash_paras) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_volume_mount, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_volume_mount(glb_function_id, lun_id, hash_paras); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_volume_mount exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_host_dma_rsp_poll(uint16_t chnl_id, uint16_t poll_num, hvio_host_dma_rsp_s *rsp) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_host_dma_rsp_poll, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_host_dma_rsp_poll(chnl_id, poll_num, rsp); ++ if (ret < 0) { ++ SPDK_ERRLOG("hvio_host_dma_rsp_poll exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return ret; ++} ++ ++int ++ssam_drv_get_glb_function_id_by_dbdf(uint32_t dbdf, uint16_t *glb_function_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_get_glb_function_id_by_dbdf, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_get_glb_function_id_by_dbdf(dbdf, glb_function_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_get_glb_function_id_by_dbdf exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_send_action(uint16_t glb_function_id, enum function_action action, const void *data, ++ uint16_t data_len) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_send_action, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_send_action(glb_function_id, action, data, data_len); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_send_action exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_update_virtio_blk_capacity(uint16_t glb_function_id, uint64_t capacity) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_update_virtio_blk_capacity, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_update_virtio_blk_capacity(glb_function_id, capacity); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_update_virtio_blk_capacity exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_setup_function(uint16_t pf_id, uint16_t num_vf, enum device_type pf_type, ++ enum device_type vf_type) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_setup_function, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_setup_function(pf_id, num_vf, pf_type, vf_type); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_setup_function exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_check_device_ready, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_check_device_ready(role, proc_type, ready); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_check_device_ready exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_write_function_config(struct function_config *cfg) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_write_function_config, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_write_function_config(cfg); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_write_function_config exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_get_hot_upgrade_state(void) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_get_hot_upgrade_state, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_get_hot_upgrade_state(); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_get_hot_upgrade_state exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_vmio_complete(uint16_t tid, struct vmio_response *resp) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_vmio_complete, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_vmio_complete(tid, resp); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_hotplug_cfg(void) ++{ ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_hotplug_cfg, SSAM_DRV_FUNC_NO_PTR); ++ ++ ops->hvio_hotplug_cfg(); ++ return 0; ++} ++ ++int ++ssam_drv_hotplug_add(uint16_t port_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_hotplug_add, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_hotplug_add(port_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_hotplug_add exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_hotplug_del(uint16_t port_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_hotplug_del, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_hotplug_del(port_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_hotplug_del exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++bool ++ssam_drv_hotplug_enable_check(void) ++{ ++ bool ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_hotplug_enable_check, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_hotplug_enable_check(); ++ if (ret == false) { ++ SPDK_ERRLOG("hvio_hotplug_enable_check exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return true; ++} ++ ++int ++ssam_drv_vmio_rxq_create(uint16_t *queue_id_out) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_vmio_rxq_create, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_vmio_rxq_create(queue_id_out); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_update_virtio_device_used, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_update_virtio_device_used(glb_function_id, device_used); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_virtio_blk_release_resource(uint16_t glb_function_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_virtio_blk_release_resource, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_virtio_blk_release_resource(glb_function_id); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_virtio_blk_alloc_resource, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_virtio_blk_alloc_resource(glb_function_id, queue_num); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} +diff --git a/lib/ssam/ssam_driver/ssam_driver_adapter.h b/lib/ssam/ssam_driver/ssam_driver_adapter.h +new file mode 100644 +index 0000000..4b74524 +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_driver_adapter.h +@@ -0,0 +1,100 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef SSAM_DRIVER_ADAPTER_H ++#define SSAM_DRIVER_ADAPTER_H ++ ++#include "hivio_api.h" ++ ++struct ssam_drv_ops { ++ int (*hvio_host_dma_request)(uint16_t chnl_id, hvio_host_dma_req_s *req); ++ int (*hvio_vmio_req_poll_batch)(uint16_t tid, uint16_t poll_num, struct vmio_request **req); ++ int (*hvio_vmio_req_poll_batch_ext)(uint16_t tid, uint16_t poll_num, struct vmio_request **req, ++ hvio_vmio_req_poll_opt_s *poll_opt); ++ int (*hvio_lib_deinit)(void); ++ int (*hvio_volume_umount)(uint16_t glb_function_id, uint32_t lun_id); ++ int (*hvio_lib_init)(hvio_lib_args_s *args_in, hvio_hostep_info_s *eps_out); ++ int (*hvio_volume_mount)(uint16_t glb_function_id, uint32_t lun_id, ++ struct hvio_mount_para *hash_paras); ++ int (*hvio_host_dma_rsp_poll)(uint16_t chnl_id, uint16_t poll_num, hvio_host_dma_rsp_s *rsp); ++ int (*hvio_get_glb_function_id_by_dbdf)(uint32_t dbdf, uint16_t *glb_function_id); ++ int (*hvio_send_action)(uint16_t glb_function_id, enum function_action action, const void *data, ++ uint16_t data_len); ++ int (*hvio_update_virtio_blk_capacity)(uint16_t glb_function_id, uint64_t capacity); ++ int (*hvio_setup_function)(uint16_t pf_id, uint16_t num_vf, enum device_type pf_type, ++ enum device_type vf_type); ++ int (*hvio_check_device_ready)(uint8_t role, uint32_t proc_type, uint8_t *ready); ++ int (*hvio_write_function_config)(struct function_config *cfg); ++ int (*hvio_get_hot_upgrade_state)(void); ++ int (*hvio_vmio_complete)(uint16_t tid, struct vmio_response *resp); ++ int (*hvio_vmio_rxq_create)(uint16_t *queue_id_out); ++ int (*hvio_update_virtio_device_used)(uint16_t glb_function_id, uint64_t device_used); ++ int (*hvio_virtio_blk_release_resource)(uint16_t glb_function_id); ++ int (*hvio_virtio_blk_alloc_resource)(uint16_t glb_function_id, uint16_t queue_num); ++ void (*hvio_hotplug_cfg)(void); ++ int (*hvio_hotplug_add)(uint16_t port_id); ++ int (*hvio_hotplug_del)(uint16_t port_id); ++ bool (*hvio_hotplug_enable_check)(void); ++}; ++ ++int ssam_drv_ops_init(void); ++void ssam_drv_ops_uninit(void); ++struct ssam_drv_ops *ssam_get_drv_ops(void); ++int ssam_drv_host_dma_request(uint16_t chnl_id, hvio_host_dma_req_s *req); ++int ssam_drv_vmio_req_poll_batch(uint16_t tid, uint16_t poll_num, struct vmio_request **req); ++int ssam_drv_vmio_req_poll_batch_ext(uint16_t tid, uint16_t poll_num, struct vmio_request **req, ++ hvio_vmio_req_poll_opt_s *poll_opt); ++int ssam_drv_lib_deinit(void); ++int ssam_drv_volume_umount(uint16_t glb_function_id, uint32_t lun_id); ++int ssam_drv_lib_init(hvio_lib_args_s *args_in, hvio_hostep_info_s *eps_out); ++int ssam_drv_volume_mount(uint16_t glb_function_id, uint32_t lun_id, ++ struct hvio_mount_para *hash_paras); ++int ssam_drv_host_dma_rsp_poll(uint16_t chnl_id, uint16_t poll_num, hvio_host_dma_rsp_s *rsp); ++int ssam_drv_get_glb_function_id_by_dbdf(uint32_t dbdf, uint16_t *glb_function_id); ++int ssam_drv_send_action(uint16_t glb_function_id, enum function_action action, const void *data, ++ uint16_t data_len); ++int ssam_drv_update_virtio_blk_capacity(uint16_t glb_function_id, uint64_t capacity); ++int ssam_drv_setup_function(uint16_t pf_id, uint16_t num_vf, enum device_type pf_type, ++ enum device_type vf_type); ++int ssam_drv_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready); ++int ssam_drv_write_function_config(struct function_config *cfg); ++int ssam_drv_get_hot_upgrade_state(void); ++int ssam_drv_vmio_complete(uint16_t tid, struct vmio_response *resp); ++int ssam_drv_vmio_rxq_create(uint16_t *queue_id_out); ++int ssam_drv_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used); ++int ssam_drv_virtio_blk_release_resource(uint16_t glb_function_id); ++int ssam_drv_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num); ++int ssam_drv_hotplug_cfg(void); ++int ssam_drv_hotplug_add(uint16_t port_id); ++int ssam_drv_hotplug_del(uint16_t port_id); ++bool ssam_drv_hotplug_enable_check(void); ++#endif +diff --git a/lib/ssam/ssam_driver/ssam_mempool.c b/lib/ssam/ssam_driver/ssam_mempool.c +new file mode 100644 +index 0000000..a32ffec +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_mempool.c +@@ -0,0 +1,799 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "spdk/stdinc.h" ++ ++#include "spdk/log.h" ++#include "spdk/env.h" ++#include "dpak_ssam.h" ++ ++#define MP_CK_HEADER_LEN sizeof(struct ssam_mp_chunk) ++#define MP_CK_END_LEN sizeof(struct ssam_mp_chunk*) ++#define MP_CK_CB_LEN (MP_CK_HEADER_LEN + MP_CK_END_LEN) ++ ++#define SHIFT_2MB 21 /* (1 << 21) == 2MB */ ++#define VALUE_2MB (1ULL << SHIFT_2MB) ++#define SHIFT_1GB 30 /* (1 << 30) == 1G */ ++#define VALUE_1GB (1ULL << SHIFT_1GB) ++#define SSAM_SPDK_VTOPHYS_ERROR (0xFFFFFFFFFFFFFFFFULL) ++#define SSAM_DMA_MEM_MAGIC (0xBABEFACEBABEFACE) ++ ++struct ssam_mp_dma_mem { ++ uint64_t magic; ++ uint64_t size; ++ char mem[0]; ++}; ++ ++struct ssam_mp_chunk { ++ struct ssam_mp_chunk *prev; ++ struct ssam_mp_chunk *next; ++ ++ /* Total size of the memory pool chunk, the chunk is in the memory block */ ++ uint64_t size; ++ ++ /* The chunk is free when true or in use when false */ ++ bool is_free; ++}; ++ ++struct ssam_mp_block { ++ struct ssam_mp_chunk *free_list; ++ struct ssam_mp_chunk *alloc_list; ++ struct ssam_mp_block *next; ++ ++ /* The memory pool block's start virtual address */ ++ char *virt_start; ++ ++ /* The memory pool block's start physical address */ ++ char *phys_start; ++ ++ /* Total size of the memory pool block */ ++ uint64_t size; ++ ++ /* Total size of the memory pool block that be allocated */ ++ uint64_t alloc_size; ++ ++ /* Total size of the memory pool block be allocated that program can be use */ ++ uint64_t alloc_prog_size; ++}; ++ ++struct ssam_mempool { ++ /* Total size of the memory pool */ ++ uint64_t size; ++ uint64_t extra_size; ++ uint64_t extra_size_limit; ++ struct ssam_mp_block *blk_list; ++ ++ /* The memory pool's start virtual address */ ++ char *virt; ++ pthread_mutex_t lock; ++}; ++ ++ ++static uint64_t ++ssam_mp_align_up(uint64_t size) ++{ ++ /* Aligin to sizeof long */ ++ return (size + sizeof(long) - 1) & (~(sizeof(long) - 1)); ++} ++ ++static inline void ++ssam_mp_lock(struct ssam_mempool *mp) ++{ ++ pthread_mutex_lock(&mp->lock); ++} ++ ++static inline void ++ssam_mp_unlock(struct ssam_mempool *mp) ++{ ++ pthread_mutex_unlock(&mp->lock); ++} ++ ++static void ++ssam_mp_init_block(struct ssam_mp_block *blk, uint64_t size) ++{ ++ blk->size = size; ++ blk->alloc_size = 0; ++ blk->alloc_prog_size = 0; ++ blk->free_list = (struct ssam_mp_chunk *)blk->virt_start; ++ blk->free_list->is_free = true; ++ blk->free_list->size = size; ++ blk->free_list->prev = NULL; ++ blk->free_list->next = NULL; ++ blk->alloc_list = NULL; ++} ++ ++static inline void ++ssam_mp_list_insert(struct ssam_mp_chunk **head, struct ssam_mp_chunk *ck) ++{ ++ struct ssam_mp_chunk *hd = *head; ++ ++ ck->prev = NULL; ++ ck->next = hd; ++ if (hd != NULL) { ++ hd->prev = ck; ++ } ++ *head = ck; ++} ++ ++static void ++ssam_mp_list_delete(struct ssam_mp_chunk **head, struct ssam_mp_chunk *ck) ++{ ++ if (ck->prev == NULL) { ++ *head = ck->next; ++ if (ck->next != NULL) { ++ ck->next->prev = NULL; ++ } ++ } else { ++ ck->prev->next = ck->next; ++ if (ck->next != NULL) { ++ ck->next->prev = ck->prev; ++ } ++ } ++} ++ ++static struct ++ssam_mp_block *ssam_mp_find_block(struct ssam_mempool *mp, void *p) ++{ ++ struct ssam_mp_block *blk = mp->blk_list; ++ ++ while (blk != NULL) { ++ if ((blk->virt_start <= (char *)p) && ++ ((blk->virt_start + blk->size) > (char *)p)) { ++ break; ++ } ++ blk = blk->next; ++ } ++ ++ return blk; ++} ++ ++static void ++ssam_mp_merge_chunk(struct ssam_mp_block *blk, struct ssam_mp_chunk *ck) ++{ ++ struct ssam_mp_chunk *free_mem = ck; ++ struct ssam_mp_chunk *next = ck; ++ ++ /* Traversal free memory backward */ ++ while (next->is_free) { ++ free_mem = next; ++ if (((char *)next - MP_CK_CB_LEN) <= blk->virt_start) { ++ break; ++ } ++ next = *(struct ssam_mp_chunk **)((char *)next - MP_CK_END_LEN); ++ } ++ ++ /* Traverse free memory forward */ ++ next = (struct ssam_mp_chunk *)((char *)free_mem + free_mem->size); ++ while (((char *)next <= blk->virt_start + blk->size - MP_CK_HEADER_LEN) && next->is_free) { ++ ssam_mp_list_delete(&blk->free_list, next); ++ free_mem->size += next->size; ++ next = (struct ssam_mp_chunk *)((char *)next + next->size); ++ } ++ ++ /* Merge free memory */ ++ *(struct ssam_mp_chunk **)((char *)free_mem + free_mem->size - MP_CK_END_LEN) = free_mem; ++ ++ return; ++} ++ ++static int ++ssam_mp_get_mem_block(uint64_t start_virt_addr, uint64_t len, uint64_t *phys_addr, ++ uint64_t *blk_size) ++{ ++ uint64_t virt0, virt1, phys0, phys1; ++ uint64_t phys_len; ++ ++ if ((len % VALUE_2MB) != 0) { ++ SPDK_ERRLOG("Memory len %lu not align to %llu\n", len, VALUE_2MB); ++ return -EINVAL; ++ } ++ ++ virt0 = start_virt_addr; ++ virt1 = start_virt_addr; ++ phys0 = spdk_vtophys((void *)virt0, NULL); ++ if (phys0 == SSAM_SPDK_VTOPHYS_ERROR) { ++ SPDK_ERRLOG("Error translating virt0 address %lu\n", virt0); ++ return -EINVAL; ++ } ++ ++ /* ++ * Find a piece of memory with consecutive physical address, ++ * the memory got by spdk_dma_malloc is aligned by VALUE_2MB, ++ * this ensures that the physical addresses are consecutive ++ * within the VALUE_2MB length range. ++ */ ++ for (phys_len = VALUE_2MB; phys_len < len; phys_len += VALUE_2MB) { ++ virt1 += VALUE_2MB; ++ phys1 = spdk_vtophys((void *)virt1, NULL); ++ if (phys1 == SSAM_SPDK_VTOPHYS_ERROR) { ++ SPDK_ERRLOG("Error translating virt1 address %lu\n", virt1); ++ break; ++ } ++ if ((long)(phys1 - phys0) != (long)(virt1 - virt0)) { ++ SPDK_DEBUGLOG(ssam_mempool, "End of consecutive physical addresses\n"); ++ break; ++ } ++ } ++ ++ *phys_addr = spdk_vtophys((void *)virt0, NULL); ++ *blk_size = phys_len; ++ ++ return 0; ++} ++ ++static void ++ssam_mp_free_blk_heads(struct ssam_mp_block *blk) ++{ ++ struct ssam_mp_block *blk_head = blk; ++ struct ssam_mp_block *l_mp = NULL; ++ ++ while (blk_head != NULL) { ++ l_mp = blk_head; ++ blk_head = blk_head->next; ++ free(l_mp); ++ l_mp = NULL; ++ } ++} ++ ++static int ++ssam_mp_insert_blocks(struct ssam_mempool *mp, uint64_t size) ++{ ++ struct ssam_mp_block *blk_head = NULL; ++ uint64_t blk_size = 0; ++ uint64_t remain_size = size; ++ uint64_t phys = 0; ++ char *virt_addr = mp->virt; ++ int rc; ++ ++ /* Find memory blocks and insert them to memory pool list */ ++ while (remain_size > 0) { ++ rc = ssam_mp_get_mem_block((uint64_t)virt_addr, remain_size, &phys, &blk_size); ++ if (rc != 0) { ++ ssam_mp_free_blk_heads(mp->blk_list); ++ return -ENOMEM; ++ } ++ blk_head = (struct ssam_mp_block *)malloc(sizeof(struct ssam_mp_block)); ++ if (blk_head == NULL) { ++ SPDK_ERRLOG("mempool block head malloc failed, mempool create failed\n"); ++ ssam_mp_free_blk_heads(mp->blk_list); ++ return -ENOMEM; ++ } ++ blk_head->virt_start = virt_addr; ++ blk_head->phys_start = (char *)phys; ++ ssam_mp_init_block(blk_head, blk_size); ++ blk_head->next = mp->blk_list; ++ mp->blk_list = blk_head; ++ mp->size += blk_size; ++ virt_addr += blk_size; ++ remain_size -= blk_size; ++ } ++ ++ if (mp->size != size) { ++ SPDK_ERRLOG("mempool size lost, mempool create failed\n"); ++ ssam_mp_free_blk_heads(mp->blk_list); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_check_mempool_size(uint64_t size, uint64_t extra_size_limit) ++{ ++ if (size == 0) { ++ SPDK_ERRLOG("Memory pool size can not be %lu, mempool create failed\n", size); ++ return -EINVAL; ++ } ++ ++ if (size < VALUE_2MB) { ++ SPDK_ERRLOG("Memory pool size can not less than %llu, actually %lu, mempool create failed\n", ++ VALUE_2MB, size); ++ return -EINVAL; ++ } ++ ++ if (extra_size_limit > VALUE_1GB) { ++ SPDK_ERRLOG("Memory pool extra size can not greater than %llu, actually %lu, mempool create failed\n", ++ VALUE_1GB, extra_size_limit); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++ssam_mempool_t * ++ssam_mempool_create(uint64_t size, uint64_t extra_size_limit) ++{ ++ struct ssam_mempool *mp = NULL; ++ uint64_t mp_size = size; ++ uint64_t mp_extra_size_limit = extra_size_limit; ++ void *virt = NULL; ++ int rc; ++ ++ rc = ssam_check_mempool_size(mp_size, mp_extra_size_limit); ++ if (rc != 0) { ++ return NULL; ++ } ++ ++ if ((mp_size % VALUE_2MB) != 0) { ++ SPDK_NOTICELOG("Memory pool size %lu not align to %llu, Align down memory pool size to %llu\n", ++ mp_size, ++ VALUE_2MB, mp_size & ~(VALUE_2MB - 1)); ++ mp_size = mp_size & ~(VALUE_2MB - 1); ++ } ++ ++ if ((mp_extra_size_limit % VALUE_2MB) != 0) { ++ SPDK_NOTICELOG("Memory pool extra size %lu not align to %llu, Align down memory pool size to %llu\n", ++ mp_extra_size_limit, VALUE_2MB, mp_extra_size_limit & ~(VALUE_2MB - 1)); ++ mp_extra_size_limit = mp_extra_size_limit & ~(VALUE_2MB - 1); ++ } ++ ++ mp = (struct ssam_mempool *)calloc(1, sizeof(struct ssam_mempool)); ++ if (mp == NULL) { ++ SPDK_ERRLOG("mempool head malloc failed, mempool create failed\n"); ++ return NULL; ++ } ++ ++ virt = spdk_dma_malloc(mp_size, VALUE_2MB, NULL); ++ if (virt == NULL) { ++ SPDK_ERRLOG("spdk_dma_malloc failed, mempool create failed\n"); ++ free(mp); ++ mp = NULL; ++ return NULL; ++ } ++ mp->virt = (char *)virt; ++ ++ rc = ssam_mp_insert_blocks(mp, mp_size); ++ if (rc != 0) { ++ free(mp); ++ mp = NULL; ++ spdk_dma_free(virt); ++ return NULL; ++ } ++ ++ mp->extra_size = 0; ++ mp->extra_size_limit = mp_extra_size_limit; ++ pthread_mutex_init(&mp->lock, NULL); ++ ++ return (ssam_mempool_t *)mp; ++} ++ ++static void ssam_mp_split_block(struct ssam_mp_block *blk, struct ssam_mp_chunk *free_mem, ++ struct ssam_mp_chunk *allocated, uint64_t size) ++{ ++ *free_mem = *allocated; ++ free_mem->size -= size; ++ *(struct ssam_mp_chunk **)((char *)free_mem + free_mem->size - MP_CK_END_LEN) = free_mem; ++ ++ if (free_mem->prev == NULL) { ++ blk->free_list = free_mem; ++ } else { ++ free_mem->prev->next = free_mem; ++ } ++ ++ if (free_mem->next != NULL) { ++ free_mem->next->prev = free_mem; ++ } ++ ++ allocated->is_free = false; ++ allocated->size = size; ++ ++ *(struct ssam_mp_chunk **)((char *)allocated + size - MP_CK_END_LEN) = allocated; ++} ++ ++static void * ++ssam_mp_alloc_mem_from_block(struct ssam_mp_block *blk, uint64_t size, ++ uint64_t *phys_addr) ++{ ++ struct ssam_mp_chunk *free_mem = NULL; ++ struct ssam_mp_chunk *allocated = NULL; ++ char *alloc = NULL; ++ ++ free_mem = blk->free_list; ++ while (free_mem != NULL) { ++ if (free_mem->size < size) { ++ free_mem = free_mem->next; ++ continue; ++ } ++ ++ allocated = free_mem; ++ if ((free_mem->size - size) > MP_CK_CB_LEN) { ++ /* If enough mem in free chunk, split it */ ++ free_mem = (struct ssam_mp_chunk *)((char *)allocated + size); ++ ssam_mp_split_block(blk, free_mem, allocated, size); ++ } else { ++ /* If no enough mem in free chunk, all will be allocated */ ++ ssam_mp_list_delete(&blk->free_list, allocated); ++ allocated->is_free = false; ++ } ++ ssam_mp_list_insert(&blk->alloc_list, allocated); ++ ++ blk->alloc_size += allocated->size; ++ blk->alloc_prog_size += allocated->size - (uint64_t)MP_CK_CB_LEN; ++ alloc = (char *)allocated + MP_CK_HEADER_LEN; ++ if (phys_addr != NULL) { ++ *phys_addr = (uint64_t)blk->phys_start + (uint64_t)(alloc - blk->virt_start); ++ } ++ ++ return (void *)alloc; ++ } ++ ++ return NULL; ++} ++ ++static bool ++ssam_mp_check_consecutive_mem(void *start_addr, uint64_t len) ++{ ++ uint64_t phys_start; ++ uint64_t phys_end; ++ ++ phys_start = spdk_vtophys(start_addr, NULL); ++ phys_end = spdk_vtophys((void *)((uint64_t)start_addr + len - 1), NULL); ++ if ((phys_end - phys_start) == (len - 1)) { ++ return true; ++ } ++ ++ return false; ++} ++ ++/* alloc dma memory from hugepage directly */ ++static void * ++ssam_mp_dma_alloc(struct ssam_mempool *mp, uint64_t size, uint64_t *phys) ++{ ++ struct ssam_mp_dma_mem *alloc; ++ size_t len = size + sizeof(struct ssam_mp_dma_mem); ++ uint64_t phys_addr = 0; ++ ++ if (mp->extra_size + len > mp->extra_size_limit) { ++ SPDK_INFOLOG(ssam_mempool, "spdk_dma_malloc alloc failed, extra_size(%lu) size(%zu) limit(%lu).\n", ++ mp->extra_size, len, mp->extra_size_limit); ++ return NULL; ++ } ++ ++ alloc = (struct ssam_mp_dma_mem *)spdk_dma_malloc(len, 0, NULL); ++ if (alloc == NULL) { ++ SPDK_INFOLOG(ssam_mempool, "spdk_dma_malloc alloc failed, len %zu.\n", len); ++ return NULL; ++ } ++ if (!ssam_mp_check_consecutive_mem((void *)alloc->mem, size)) { ++ SPDK_ERRLOG("spdk_dma_malloc alloc failed, no consecutive mem, len %lu.\n", size); ++ spdk_dma_free(alloc); ++ return NULL; ++ } ++ phys_addr = spdk_vtophys((const void *)alloc->mem, NULL); ++ if (phys_addr == SSAM_SPDK_VTOPHYS_ERROR) { ++ SPDK_ERRLOG("Error translating spdk_dma_malloc address %lu\n", phys_addr); ++ spdk_dma_free(alloc); ++ return NULL; ++ } ++ *phys = phys_addr; ++ alloc->magic = SSAM_DMA_MEM_MAGIC; ++ alloc->size = len; ++ mp->extra_size += len; ++ ++ return (void *)alloc->mem; ++} ++ ++static void ++ssam_mp_dma_free(struct ssam_mempool *mp, const void *ptr) ++{ ++ struct ssam_mp_dma_mem *free_mem; ++ uint64_t addr = (uint64_t)ptr; ++ ++ if (addr <= sizeof(struct ssam_mp_dma_mem)) { ++ SPDK_ERRLOG("ssam_mp_dma_free mem address err\n"); ++ return; ++ } ++ ++ free_mem = (struct ssam_mp_dma_mem *)(addr - sizeof(struct ssam_mp_dma_mem)); ++ if (free_mem->magic == SSAM_DMA_MEM_MAGIC) { ++ mp->extra_size -= free_mem->size; ++ spdk_dma_free(free_mem); ++ } else { ++ SPDK_ERRLOG("ssam_mp_dma_free magic err, magic is %lx\n", free_mem->magic); ++ } ++ return; ++} ++ ++static void * ++ssam_mp_alloc_mem_from_blocks(struct ssam_mempool *mp, uint64_t size, ++ uint64_t *phys_addr) ++{ ++ struct ssam_mp_block *blk = mp->blk_list; ++ void *alloc = NULL; ++ ++ while (blk != NULL) { ++ if (size > (blk->size - blk->alloc_size)) { ++ blk = blk->next; ++ continue; ++ } ++ ++ alloc = ssam_mp_alloc_mem_from_block(blk, size, phys_addr); ++ if (alloc != NULL) { ++ return alloc; ++ } ++ ++ blk = blk->next; ++ } ++ SPDK_INFOLOG(ssam_mempool, "ssam mempool no enough memory, alloc size %lu\n", size); ++ alloc = ssam_mp_dma_alloc(mp, size, phys_addr); ++ ++ return alloc; ++} ++ ++void * ++ssam_mempool_alloc(ssam_mempool_t *mp, uint64_t size, uint64_t *phys_addr) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ void *alloc = NULL; ++ uint64_t need_size; ++ ++ if (phys_addr == NULL) { ++ SPDK_ERRLOG("alloc phys_addr pointer is NULL\n"); ++ return NULL; ++ } ++ ++ if (l_mp == NULL) { ++ SPDK_ERRLOG("alloc mp pointer is NULL\n"); ++ return NULL; ++ } ++ ++ if (size == 0) { ++ SPDK_ERRLOG("Memory pool size can not be %lu, mempool alloc failed\n", size); ++ return NULL; ++ } ++ ++ need_size = ssam_mp_align_up(size + MP_CK_CB_LEN); ++ ++ ssam_mp_lock(l_mp); ++ if (need_size > l_mp->size) { ++ SPDK_INFOLOG(ssam_mempool, "No enough memory in mempool, need %lu, actually %lu\n", ++ need_size, l_mp->size); ++ alloc = ssam_mp_dma_alloc(l_mp, size, phys_addr); ++ ssam_mp_unlock(l_mp); ++ return alloc; ++ } ++ ++ alloc = ssam_mp_alloc_mem_from_blocks(l_mp, need_size, phys_addr); ++ ++ ssam_mp_unlock(l_mp); ++ ++ return alloc; ++} ++ ++void ++ssam_mempool_free(ssam_mempool_t *mp, void *ptr) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ struct ssam_mp_chunk *ck = NULL; ++ ++ if (l_mp == NULL) { ++ SPDK_ERRLOG("free mp pointer is NULL\n"); ++ return; ++ } ++ ++ if (ptr == NULL) { ++ SPDK_ERRLOG("free ptr pointer is NULL\n"); ++ return; ++ } ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = ssam_mp_find_block(l_mp, ptr); ++ if (blk == NULL) { ++ ssam_mp_dma_free(l_mp, ptr); ++ ssam_mp_unlock(l_mp); ++ return; ++ } ++ ++ ck = (struct ssam_mp_chunk *)((char *)ptr - MP_CK_HEADER_LEN); ++ ++ ssam_mp_list_delete(&blk->alloc_list, ck); ++ ssam_mp_list_insert(&blk->free_list, ck); ++ ck->is_free = true; ++ ++ blk->alloc_size -= ck->size; ++ blk->alloc_prog_size -= ck->size - (uint64_t)MP_CK_CB_LEN; ++ ++ ssam_mp_merge_chunk(blk, ck); ++ ++ ssam_mp_unlock(l_mp); ++ ++ return; ++} ++ ++void ++ssam_mempool_destroy(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ ++ if (l_mp == NULL) { ++ SPDK_ERRLOG("destroy mp pointer is NULL\n"); ++ return; ++ } ++ ++ if (l_mp->virt == NULL) { ++ SPDK_ERRLOG("destroy mp->virt pointer is NULL\n"); ++ return; ++ } ++ ++ ssam_mp_lock(l_mp); ++ ssam_mp_free_blk_heads(l_mp->blk_list); ++ spdk_dma_free(l_mp->virt); ++ ssam_mp_unlock(l_mp); ++ pthread_mutex_destroy(&l_mp->lock); ++ free(l_mp); ++ l_mp = NULL; ++ ++ return; ++} ++ ++static uint64_t ++ssam_mp_total_memory(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ uint64_t size; ++ ++ ssam_mp_lock(l_mp); ++ size = l_mp->size; ++ ssam_mp_unlock(l_mp); ++ ++ return size; ++} ++ ++static uint64_t ++ssam_mp_total_used_memory(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ uint64_t total = 0; ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = l_mp->blk_list; ++ while (blk != NULL) { ++ total += blk->alloc_size; ++ blk = blk->next; ++ } ++ ++ ssam_mp_unlock(l_mp); ++ ++ return total; ++} ++ ++static uint32_t ++ssam_mp_alloc_num(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ struct ssam_mp_chunk *alloc = NULL; ++ uint32_t total = 0; ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = l_mp->blk_list; ++ while (blk != NULL) { ++ alloc = blk->alloc_list; ++ while (alloc != NULL) { ++ if (total == UINT32_MAX) { ++ SPDK_ERRLOG("mp alloc num out of bound\n"); ++ ssam_mp_unlock(l_mp); ++ return total; ++ } ++ total++; ++ alloc = alloc->next; ++ } ++ blk = blk->next; ++ } ++ ++ ssam_mp_unlock(l_mp); ++ ++ return total; ++} ++ ++static uint32_t ++ssam_mp_free_num(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ struct ssam_mp_chunk *free_mem = NULL; ++ uint32_t total = 0; ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = l_mp->blk_list; ++ while (blk != NULL) { ++ free_mem = blk->free_list; ++ while (free_mem != NULL) { ++ if (total == UINT32_MAX) { ++ SPDK_ERRLOG("mp free num out of bound\n"); ++ ssam_mp_unlock(l_mp); ++ return total; ++ } ++ total++; ++ free_mem = free_mem->next; ++ } ++ blk = blk->next; ++ } ++ ++ ssam_mp_unlock(l_mp); ++ ++ return total; ++} ++ ++static uint64_t ++ssam_mp_get_greatest_free_size(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ struct ssam_mp_chunk *free_mem = NULL; ++ uint64_t max_size = 0; ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = l_mp->blk_list; ++ while (blk != NULL) { ++ free_mem = blk->free_list; ++ while (free_mem != NULL) { ++ if (max_size < free_mem->size) { ++ max_size = free_mem->size; ++ } ++ free_mem = free_mem->next; ++ } ++ blk = blk->next; ++ } ++ ++ ssam_mp_unlock(l_mp); ++ ++ return max_size; ++} ++ ++int ++ssam_get_mempool_info(ssam_mempool_t *mp, struct memory_info_stats *info) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ ++ if (l_mp == NULL || info == NULL) { ++ SPDK_ERRLOG("ssam get mempool info mp or info pointer is NULL\n"); ++ return -EINVAL; ++ } ++ ++ info->total_size = ssam_mp_total_memory(l_mp); ++ info->used_size = ssam_mp_total_used_memory(l_mp); ++ info->free_size = info->total_size - info->used_size; ++ info->greatest_free_size = ssam_mp_get_greatest_free_size(l_mp); ++ info->alloc_count = ssam_mp_alloc_num(l_mp); ++ info->free_count = ssam_mp_free_num(l_mp); ++ ++ return 0; ++} ++SPDK_LOG_REGISTER_COMPONENT(ssam_mempool) +diff --git a/lib/ssam/ssam_fs.c b/lib/ssam/ssam_fs.c +new file mode 100644 +index 0000000..27c37ef +--- /dev/null ++++ b/lib/ssam/ssam_fs.c +@@ -0,0 +1,2720 @@ ++/* - ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "spdk/env.h" ++#include "spdk/string.h" ++#include "spdk/thread.h" ++#include "spdk/ssam.h" ++#include "spdk/likely.h" ++ ++#include "ssam_config.h" ++#include "ssam_fs_internal.h" ++ ++static void ssam_fs_dump_info_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++static void ssam_fs_response_worker(struct spdk_ssam_session *smsession, void *arg); ++static void ssam_fs_write_config_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++static void ssam_fs_remove_self(struct spdk_ssam_session *smsession); ++static void ssam_fs_show_iostat_json(struct spdk_ssam_session *smsession, uint32_t id, ++ struct spdk_json_write_ctx *w); ++static void ssam_fs_clear_iostat_json(struct spdk_ssam_session *smsession); ++ ++static void ssam_free_fs_session(struct spdk_ssam_fs_session *fsmsession); ++ ++static const struct spdk_ssam_session_backend g_ssam_fs_session_backend = { ++ .type = VIRTIO_TYPE_FS, ++ .remove_session = NULL, ++ .request_worker = NULL, ++ .destroy_bdev_device = NULL, ++ .response_worker = ssam_fs_response_worker, ++ .no_data_req_worker = NULL, ++ .ssam_get_config = NULL, ++ .print_stuck_io_info = NULL, ++ .dump_info_json = ssam_fs_dump_info_json, ++ .write_config_json = ssam_fs_write_config_json, ++ .show_iostat_json = ssam_fs_show_iostat_json, ++ .clear_iostat_json = ssam_fs_clear_iostat_json, ++ .get_bdev = NULL, ++ .remove_self = ssam_fs_remove_self, ++}; ++ ++static struct spdk_ssam_fs_poller_ctx g_ssam_fs_poller_ctx = { 0 }; ++ ++static struct lo_data lo_map[SSAM_HOSTEP_NUM_MAX] = { 0 }; ++ ++static struct lo_data *lo_data(fuse_req_t req) ++{ ++ return (struct lo_data *)fuse_req_userdata(req); ++} ++ ++static struct lo_inode *lo_inode(fuse_req_t req, fuse_ino_t ino) ++{ ++ if (ino == FUSE_ROOT_ID) { ++ return &lo_data(req)->root; ++ } ++ ++ return (struct lo_inode *)(uintptr_t)ino; ++} ++ ++static int lo_fd(fuse_req_t req, fuse_ino_t ino) ++{ ++ return lo_inode(req, ino)->fd; ++} ++ ++static void lo_init(void *userdata, struct fuse_conn_info *conn) ++{ ++ struct lo_data *lo = (struct lo_data *)userdata; ++ struct fuse_session *se = lo->se; ++ ++ if (conn->capable & FUSE_CAP_EXPORT_SUPPORT) { ++ conn->want |= FUSE_CAP_EXPORT_SUPPORT; ++ } ++ ++ if (lo->writeback && conn->capable & FUSE_CAP_WRITEBACK_CACHE) { ++ SPDK_INFOLOG(ssam_fs, "lo_init: activating writeback\n"); ++ conn->want |= FUSE_CAP_WRITEBACK_CACHE; ++ } ++ if (lo->flock && conn->capable & FUSE_CAP_FLOCK_LOCKS) { ++ SPDK_INFOLOG(ssam_fs, "lo_init: activating flock locks\n"); ++ conn->want |= FUSE_CAP_FLOCK_LOCKS; ++ } ++ conn->want &= ~FUSE_CAP_SPLICE_READ; ++ conn->max_write = SSAM_FS_MAX_PAGES * getpagesize(); ++ se->got_destroy = 0; ++ lo->mounted = true; ++} ++ ++static void lo_destroy(void *userdata) ++{ ++ struct lo_data *lo = (struct lo_data *)userdata; ++ struct fuse_session *se = lo->se; ++ ++ while (lo->root.next != &lo->root) { ++ struct lo_inode *next = lo->root.next; ++ lo->root.next = next->next; ++ if (next->fd > 0) { ++ close(next->fd); ++ } ++ free(next); ++ } ++ se->got_init = 0; ++ lo->mounted = false; ++} ++ ++static void lo_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) ++{ ++ int res; ++ struct stat buf; ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ ++ (void)fi; ++ ++ res = fstatat(lo_fd(req, ino), "", &buf, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW); ++ if (res == -1) { ++ return (void)fuse_reply_err(req, errno); ++ } ++ ++ ssam_fuse_reply_attr(req, &buf, lo->timeout, fsmsession); ++} ++ ++static void lo_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr, int valid, ++ struct fuse_file_info *fi) ++{ ++ int saverr; ++ char procname[64]; ++ struct lo_inode *inode = lo_inode(req, ino); ++ int ifd = inode->fd; ++ int res; ++ ++ if (valid & FUSE_SET_ATTR_MODE) { ++ if (fi) { ++ res = fchmod(fi->fh, attr->st_mode); ++ } else { ++ snprintf(procname, sizeof(procname), "/proc/self/fd/%i", ifd); ++ res = chmod(procname, attr->st_mode); ++ } ++ if (res == -1) { ++ goto out_err; ++ } ++ } ++ if (valid & (FUSE_SET_ATTR_UID | FUSE_SET_ATTR_GID)) { ++ uid_t uid = (valid & FUSE_SET_ATTR_UID) ? attr->st_uid : (uid_t) -1; ++ gid_t gid = (valid & FUSE_SET_ATTR_GID) ? attr->st_gid : (gid_t) -1; ++ ++ res = fchownat(ifd, "", uid, gid, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW); ++ if (res == -1) { ++ goto out_err; ++ } ++ } ++ if (valid & FUSE_SET_ATTR_SIZE) { ++ if (fi) { ++ res = ftruncate(fi->fh, attr->st_size); ++ } else { ++ snprintf(procname, sizeof(procname), "/proc/self/fd/%i", ifd); ++ res = truncate(procname, attr->st_size); ++ } ++ if (res == -1) { ++ goto out_err; ++ } ++ } ++ if (valid & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) { ++ struct timespec tv[2]; ++ ++ tv[0].tv_sec = 0; ++ tv[1].tv_sec = 0; ++ tv[0].tv_nsec = UTIME_OMIT; ++ tv[1].tv_nsec = UTIME_OMIT; ++ ++ if (valid & FUSE_SET_ATTR_ATIME_NOW) { ++ tv[0].tv_nsec = UTIME_NOW; ++ } else if (valid & FUSE_SET_ATTR_ATIME) { ++ tv[0] = attr->st_atim; ++ } ++ ++ if (valid & FUSE_SET_ATTR_MTIME_NOW) { ++ tv[1].tv_nsec = UTIME_NOW; ++ } else if (valid & FUSE_SET_ATTR_MTIME) { ++ tv[1] = attr->st_mtim; ++ } ++ ++ if (fi) { ++ res = futimens(fi->fh, tv); ++ } else { ++ snprintf(procname, sizeof(procname), "/proc/self/fd/%i", ifd); ++ res = utimensat(AT_FDCWD, procname, tv, 0); ++ } ++ if (res == -1) { ++ goto out_err; ++ } ++ } ++ ++ return lo_getattr(req, ino, fi); ++ ++out_err: ++ saverr = errno; ++ fuse_reply_err(req, saverr); ++} ++ ++static struct lo_inode *lo_find(struct lo_data *lo, struct stat *st) ++{ ++ struct lo_inode *p; ++ struct lo_inode *ret = NULL; ++ ++ pthread_mutex_lock(&lo->mutex); ++ for (p = lo->root.next; p != &lo->root; p = p->next) { ++ if (p->ino == st->st_ino && p->dev == st->st_dev) { ++ assert(p->refcount > 0); ++ ret = p; ++ ret->refcount++; ++ break; ++ } ++ } ++ pthread_mutex_unlock(&lo->mutex); ++ return ret; ++} ++ ++static int lo_do_lookup(fuse_req_t req, fuse_ino_t parent, const char *name, ++ struct fuse_entry_param *e) ++{ ++ int newfd; ++ int res; ++ int saverr; ++ struct lo_data *lo = lo_data(req); ++ struct lo_inode *inode; ++ ++ memset(e, 0, sizeof(*e)); ++ e->attr_timeout = lo->timeout; ++ e->entry_timeout = lo->timeout; ++ ++ newfd = openat(lo_fd(req, parent), name, O_PATH | O_NOFOLLOW); ++ if (newfd == -1) { ++ goto out_err; ++ } ++ ++ res = fstatat(newfd, "", &e->attr, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW); ++ if (res == -1) { ++ goto out_err; ++ } ++ ++ inode = lo_find(lo_data(req), &e->attr); ++ if (inode) { ++ close(newfd); ++ newfd = -1; ++ } else { ++ struct lo_inode *prev, *next; ++ ++ saverr = ENOMEM; ++ inode = calloc(1, sizeof(struct lo_inode)); ++ if (!inode) { ++ goto out_err; ++ } ++ ++ inode->refcount = 1; ++ inode->fd = newfd; ++ inode->ino = e->attr.st_ino; ++ inode->dev = e->attr.st_dev; ++ ++ pthread_mutex_lock(&lo->mutex); ++ prev = &lo->root; ++ next = prev->next; ++ next->prev = inode; ++ inode->next = next; ++ inode->prev = prev; ++ prev->next = inode; ++ pthread_mutex_unlock(&lo->mutex); ++ } ++ e->ino = (uintptr_t)inode; ++ ++ SPDK_INFOLOG(ssam_fs, "%lli/%s -> %lli\n", (unsigned long long)parent, name, ++ (unsigned long long)e->ino); ++ ++ return 0; ++ ++out_err: ++ saverr = errno; ++ if (newfd != -1) { ++ close(newfd); ++ } ++ return saverr; ++} ++ ++static void lo_lookup(fuse_req_t req, fuse_ino_t parent, const char *name) ++{ ++ struct fuse_entry_param e; ++ int err; ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ ++ SPDK_INFOLOG(ssam_fs, "lo_lookup(parent=%" PRIu64 ", name=%s)\n", parent, name); ++ ++ err = lo_do_lookup(req, parent, name, &e); ++ if (err) { ++ fuse_reply_err(req, err); ++ } else { ++ ssam_fuse_reply_entry(req, &e, fsmsession); ++ } ++} ++ ++static int mknod_wrapper(int dirfd, const char *path, const char *link, int mode, dev_t rdev) ++{ ++ int res; ++ ++ if (S_ISREG(mode)) { ++ res = openat(dirfd, path, O_CREAT | O_EXCL | O_WRONLY, mode); ++ if (res >= 0) { ++ res = close(res); ++ } ++ } else if (S_ISDIR(mode)) { ++ res = mkdirat(dirfd, path, mode); ++ } else if (S_ISLNK(mode) && link != NULL) { ++ res = symlinkat(link, dirfd, path); ++ } else if (S_ISFIFO(mode)) { ++ res = mkfifoat(dirfd, path, mode); ++#ifdef __FreeBSD__ ++ } else if (S_ISSOCK(mode)) { ++ struct sockaddr_un su; ++ int fd; ++ ++ if (strlen(path) >= sizeof(su.sun_path)) { ++ errno = ENAMETOOLONG; ++ return -1; ++ } ++ fd = socket(AF_UNIX, SOCK_STREAM, 0); ++ if (fd >= 0) { ++ /* ++ * We must bind the socket to the underlying file ++ * system to create the socket file, even though ++ * we'll never listen on this socket. ++ */ ++ su.sun_family = AF_UNIX; ++ snprintf(su.sun_path, sizeof(su.sun_path), "%s", path); ++ res = bindat(dirfd, fd, (struct sockaddr *)&su, sizeof(su)); ++ if (res == 0) { ++ close(fd); ++ } ++ } else { ++ res = -1; ++ } ++#endif ++ } else { ++ res = mknodat(dirfd, path, mode, rdev); ++ } ++ ++ return res; ++} ++ ++static void lo_mknod_symlink(fuse_req_t req, fuse_ino_t parent, const char *name, mode_t mode, ++ dev_t rdev, ++ const char *link) ++{ ++ int res; ++ int saverr; ++ struct lo_data *lo = lo_data(req); ++ struct lo_inode *dir = lo_inode(req, parent); ++ struct fuse_entry_param e; ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ ++ res = mknod_wrapper(dir->fd, name, link, mode, rdev); ++ ++ saverr = errno; ++ if (res == -1) { ++ goto out; ++ } ++ ++ saverr = lo_do_lookup(req, parent, name, &e); ++ if (saverr) { ++ goto out; ++ } ++ ++ SPDK_INFOLOG(ssam_fs, "%lli/%s -> %lli\n", (unsigned long long)parent, name, ++ (unsigned long long)e.ino); ++ ++ ssam_fuse_reply_entry(req, &e, fsmsession); ++ return; ++ ++out: ++ fuse_reply_err(req, saverr); ++} ++ ++static void lo_mknod(fuse_req_t req, fuse_ino_t parent, const char *name, mode_t mode, dev_t rdev) ++{ ++ lo_mknod_symlink(req, parent, name, mode, rdev, NULL); ++} ++ ++static void lo_mkdir(fuse_req_t req, fuse_ino_t parent, const char *name, mode_t mode) ++{ ++ lo_mknod_symlink(req, parent, name, S_IFDIR | mode, 0, NULL); ++} ++ ++static void lo_symlink(fuse_req_t req, const char *link, fuse_ino_t parent, const char *name) ++{ ++ lo_mknod_symlink(req, parent, name, S_IFLNK, 0, link); ++} ++ ++static void lo_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t parent, const char *name) ++{ ++ int res; ++ struct lo_data *lo = lo_data(req); ++ struct lo_inode *inode = lo_inode(req, ino); ++ struct fuse_entry_param e; ++ char procname[64]; ++ int saverr; ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ ++ memset(&e, 0, sizeof(struct fuse_entry_param)); ++ e.attr_timeout = lo->timeout; ++ e.entry_timeout = lo->timeout; ++ ++ snprintf(procname, sizeof(procname), "/proc/self/fd/%i", inode->fd); ++ res = linkat(AT_FDCWD, procname, lo_fd(req, parent), name, AT_SYMLINK_FOLLOW); ++ if (res == -1) { ++ goto out_err; ++ } ++ ++ res = fstatat(inode->fd, "", &e.attr, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW); ++ if (res == -1) { ++ goto out_err; ++ } ++ ++ pthread_mutex_lock(&lo->mutex); ++ inode->refcount++; ++ pthread_mutex_unlock(&lo->mutex); ++ e.ino = (uintptr_t)inode; ++ ++ SPDK_INFOLOG(ssam_fs, "%lli/%s -> %lli\n", (unsigned long long)parent, name, ++ (unsigned long long)e.ino); ++ ++ ssam_fuse_reply_entry(req, &e, fsmsession); ++ return; ++ ++out_err: ++ saverr = errno; ++ fuse_reply_err(req, saverr); ++} ++ ++static void lo_rmdir(fuse_req_t req, fuse_ino_t parent, const char *name) ++{ ++ int res; ++ ++ res = unlinkat(lo_fd(req, parent), name, AT_REMOVEDIR); ++ ++ fuse_reply_err(req, res == -1 ? errno : 0); ++} ++ ++static void lo_rename(fuse_req_t req, fuse_ino_t parent, const char *name, fuse_ino_t newparent, ++ const char *newname, ++ unsigned int flags) ++{ ++ int res; ++ ++ if (flags) { ++ fuse_reply_err(req, EINVAL); ++ return; ++ } ++ ++ res = renameat(lo_fd(req, parent), name, lo_fd(req, newparent), newname); ++ ++ fuse_reply_err(req, res == -1 ? errno : 0); ++} ++ ++static void lo_unlink(fuse_req_t req, fuse_ino_t parent, const char *name) ++{ ++ int res; ++ ++ res = unlinkat(lo_fd(req, parent), name, 0); ++ ++ fuse_reply_err(req, res == -1 ? errno : 0); ++} ++ ++static void unref_inode(struct lo_data *lo, struct lo_inode *inode, uint64_t n) ++{ ++ if (!inode) { ++ return; ++ } ++ ++ pthread_mutex_lock(&lo->mutex); ++ assert(inode->refcount >= n); ++ inode->refcount -= n; ++ if (!inode->refcount) { ++ struct lo_inode *prev, *next; ++ ++ prev = inode->prev; ++ next = inode->next; ++ next->prev = prev; ++ prev->next = next; ++ ++ pthread_mutex_unlock(&lo->mutex); ++ close(inode->fd); ++ free(inode); ++ } else { ++ pthread_mutex_unlock(&lo->mutex); ++ } ++} ++ ++static void lo_forget_one(fuse_req_t req, fuse_ino_t ino, uint64_t nlookup) ++{ ++ struct lo_data *lo = lo_data(req); ++ struct lo_inode *inode = lo_inode(req, ino); ++ struct lo_inode *p; ++ ++ pthread_mutex_lock(&lo->mutex); ++ for (p = lo->root.next; p != &lo->root; p = p->next) { ++ if (p == inode) { ++ break; ++ } ++ } ++ pthread_mutex_unlock(&lo->mutex); ++ if (p == &lo->root) { ++ SPDK_INFOLOG(ssam_fs, "forget %lli-%lli\n", (unsigned long long)ino, (unsigned long long)nlookup); ++ return; ++ } ++ ++ SPDK_INFOLOG(ssam_fs, "forget %lli %lli -%lli\n", (unsigned long long)ino, ++ (unsigned long long)inode->refcount, ++ (unsigned long long)nlookup); ++ ++ unref_inode(lo, inode, nlookup); ++} ++ ++static udaa_error_t udaa_eml_queue_empty_complete(int depth_idx, struct udaa_emlq *emlq, ++ uint16_t tid, ++ struct spdk_ssam_fs_session *fsmsession); ++ ++static void lo_forget(fuse_req_t req, fuse_ino_t ino, uint64_t nlookup) ++{ ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ unsigned tid = fsmsession->smsession.smdev->tid; ++ ++ lo_forget_one(req, ino, nlookup); ++ udaa_eml_queue_empty_complete(0, lo->udaa_fs_queues[lcore_id], tid, fsmsession); ++ fuse_reply_none(req); ++} ++ ++static void lo_forget_multi(fuse_req_t req, size_t count, struct fuse_forget_data *forgets) ++{ ++ size_t i; ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ unsigned tid = fsmsession->smsession.smdev->tid; ++ ++ for (i = 0; i < count; i++) { ++ lo_forget_one(req, forgets[i].ino, forgets[i].nlookup); ++ } ++ udaa_eml_queue_empty_complete(0, lo->udaa_fs_queues[lcore_id], tid, fsmsession); ++ fuse_reply_none(req); ++} ++ ++static void lo_readlink(fuse_req_t req, fuse_ino_t ino) ++{ ++ int res; ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ ++ memset(fsmsession->static_buf, 0, SSAM_FS_STATIC_BUF_SIZE); ++ res = readlinkat(lo_fd(req, ino), "", fsmsession->static_buf, SSAM_FS_STATIC_BUF_SIZE); ++ if (res == -1) { ++ return (void)fuse_reply_err(req, errno); ++ } ++ ++ if (res == sizeof(fsmsession->static_buf)) { ++ return (void)fuse_reply_err(req, ENAMETOOLONG); ++ } ++ ++ fsmsession->static_buf[res] = '\0'; ++ ++ fuse_reply_readlink(req, fsmsession->static_buf); ++} ++ ++struct lo_dirp { ++ DIR *dp; ++ struct dirent *entry; ++ off_t offset; ++}; ++ ++static struct lo_dirp *lo_dirp(struct fuse_file_info *fi) ++{ ++ return (struct lo_dirp *)(uintptr_t)fi->fh; ++} ++ ++static void lo_opendir(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) ++{ ++ int error = ENOMEM; ++ struct lo_data *lo = lo_data(req); ++ struct lo_dirp *d = NULL; ++ int fd; ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ ++ d = calloc(1, sizeof(struct lo_dirp)); ++ if (d == NULL) { ++ goto out_err; ++ } ++ ++ fd = openat(lo_fd(req, ino), ".", O_RDONLY); ++ if (fd == -1) { ++ goto out_errno; ++ } ++ ++ d->dp = fdopendir(fd); ++ if (d->dp == NULL) { ++ goto out_errno; ++ } ++ ++ d->offset = 0; ++ d->entry = NULL; ++ ++ fi->fh = (uintptr_t)d; ++ if (lo->cache == CACHE_ALWAYS) { ++ fi->cache_readdir = 1; ++ } ++ ssam_fuse_reply_open(req, fi, fsmsession); ++ return; ++ ++out_errno: ++ error = errno; ++out_err: ++ if (d) { ++ if (fd != -1) { ++ close(fd); ++ } ++ free(d); ++ } ++ fuse_reply_err(req, error); ++} ++ ++static int is_dot_or_dotdot(const char *name) ++{ ++ return name[0] == '.' && (name[1] == '\0' || (name[1] == '.' && name[2] == '\0')); ++} ++ ++static void lo_do_readdir(fuse_req_t req, fuse_ino_t ino, size_t size, off_t offset, ++ struct fuse_file_info *fi, ++ int plus) ++{ ++ struct lo_dirp *d = lo_dirp(fi); ++ char *p = NULL; ++ size_t rem = size; ++ int err; ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ uint64_t phys_addr; ++ ++ (void)ino; ++ ++ fsmsession->dynamic_buf = ssam_mempool_alloc(fsmsession->smsession.mp, size, &phys_addr); ++ if (!fsmsession->dynamic_buf) { ++ err = ENOMEM; ++ goto error; ++ } ++ p = fsmsession->dynamic_buf; ++ ++ if (offset != d->offset) { ++ seekdir(d->dp, offset); ++ d->entry = NULL; ++ d->offset = offset; ++ } ++ while (1) { ++ size_t entsize; ++ off_t nextoff; ++ const char *name; ++ ++ if (!d->entry) { ++ errno = 0; ++ d->entry = readdir(d->dp); ++ if (!d->entry) { ++ if (errno) { /* Error */ ++ err = errno; ++ goto error; ++ } else { /* End of stream */ ++ break; ++ } ++ } ++ } ++ nextoff = d->entry->d_off; ++ name = d->entry->d_name; ++ fuse_ino_t entry_ino = 0; ++ if (plus) { ++ struct fuse_entry_param e; ++ if (is_dot_or_dotdot(name)) { ++ e = (struct fuse_entry_param) { ++ .attr.st_ino = d->entry->d_ino, ++ .attr.st_mode = d->entry->d_type << 12, ++ }; ++ } else { ++ err = lo_do_lookup(req, ino, name, &e); ++ if (err) { ++ goto error; ++ } ++ entry_ino = e.ino; ++ } ++ ++ entsize = fuse_add_direntry_plus(req, p, rem, name, &e, nextoff); ++ } else { ++ struct stat st = { ++ .st_ino = d->entry->d_ino, ++ .st_mode = d->entry->d_type << 12, ++ }; ++ entsize = fuse_add_direntry(req, p, rem, name, &st, nextoff); ++ } ++ if (entsize > rem) { ++ if (entry_ino != 0) { ++ lo_forget_one(req, entry_ino, 1); ++ } ++ break; ++ } ++ ++ p += entsize; ++ rem -= entsize; ++ ++ d->entry = NULL; ++ d->offset = nextoff; ++ } ++ ++ err = 0; ++error: ++ /* If there's an error, we can only signal it if we haven't stored ++ * any entries yet - otherwise we'd end up with wrong lookup ++ * counts for the entries that are already in the buffer. So we ++ * return what we've collected until that point. ++ */ ++ if (err && rem == size) { ++ fuse_reply_err(req, err); ++ ssam_mempool_free(fsmsession->smsession.mp, fsmsession->dynamic_buf); ++ fsmsession->dynamic_buf = NULL; ++ } else { ++ fuse_reply_buf(req, fsmsession->dynamic_buf, size - rem); ++ } ++} ++ ++static void lo_readdir(fuse_req_t req, fuse_ino_t ino, size_t size, off_t offset, ++ struct fuse_file_info *fi) ++{ ++ lo_do_readdir(req, ino, size, offset, fi, 0); ++} ++ ++static void lo_readdirplus(fuse_req_t req, fuse_ino_t ino, size_t size, off_t offset, ++ struct fuse_file_info *fi) ++{ ++ lo_do_readdir(req, ino, size, offset, fi, 1); ++} ++ ++static void lo_releasedir(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) ++{ ++ struct lo_dirp *d = lo_dirp(fi); ++ (void)ino; ++ closedir(d->dp); ++ free(d); ++ fuse_reply_err(req, 0); ++} ++ ++static void lo_create(fuse_req_t req, fuse_ino_t parent, const char *name, mode_t mode, ++ struct fuse_file_info *fi) ++{ ++ int fd; ++ struct lo_data *lo = lo_data(req); ++ struct fuse_entry_param e; ++ int err; ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ ++ SPDK_INFOLOG(ssam_fs, "lo_create(parent=%" PRIu64 ", name=%s)\n", parent, name); ++ ++ fd = openat(lo_fd(req, parent), name, (fi->flags | O_CREAT) & ~O_NOFOLLOW, mode); ++ if (fd == -1) { ++ return (void)fuse_reply_err(req, errno); ++ } ++ ++ fi->fh = fd; ++ if (lo->cache == CACHE_NEVER) { ++ fi->direct_io = 1; ++ } else if (lo->cache == CACHE_ALWAYS) { ++ fi->keep_cache = 1; ++ } ++ ++ err = lo_do_lookup(req, parent, name, &e); ++ if (err) { ++ fuse_reply_err(req, err); ++ } else { ++ ssam_fuse_reply_create(req, &e, fi, fsmsession); ++ } ++} ++ ++static void lo_fsyncdir(fuse_req_t req, fuse_ino_t ino, int datasync, struct fuse_file_info *fi) ++{ ++ int res; ++ int fd = dirfd(lo_dirp(fi)->dp); ++ (void)ino; ++ if (datasync) { ++ res = fdatasync(fd); ++ } else { ++ res = fsync(fd); ++ } ++ fuse_reply_err(req, res == -1 ? errno : 0); ++} ++ ++static void lo_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) ++{ ++ int fd; ++ char buf[64]; ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ ++ SPDK_INFOLOG(ssam_fs, "lo_open(ino=%" PRIu64 ", flags=%d)\n", ino, fi->flags); ++ ++ /* With writeback cache, kernel may send read requests even ++ when userspace opened write-only */ ++ if (lo->writeback && (fi->flags & O_ACCMODE) == O_WRONLY) { ++ fi->flags &= ~O_ACCMODE; ++ fi->flags |= O_RDWR; ++ } ++ ++ /* With writeback cache, O_APPEND is handled by the kernel. ++ * This breaks atomicity (since the file may change in the ++ * underlying filesystem, so that the kernel's idea of the ++ * end of the file isn't accurate anymore). In this example, ++ * we just accept that. A more rigorous filesystem may want ++ * to return an error here ++ */ ++ if (lo->writeback && (fi->flags & O_APPEND)) { ++ fi->flags &= ~O_APPEND; ++ } ++ ++ snprintf(buf, sizeof(buf), "/proc/self/fd/%i", lo_fd(req, ino)); ++ fd = open(buf, fi->flags & ~O_NOFOLLOW); ++ if (fd == -1) { ++ return (void)fuse_reply_err(req, errno); ++ } ++ ++ fi->fh = fd; ++ if (lo->cache == CACHE_NEVER) { ++ fi->direct_io = 1; ++ } else if (lo->cache == CACHE_ALWAYS) { ++ fi->keep_cache = 1; ++ } ++ ssam_fuse_reply_open(req, fi, fsmsession); ++} ++ ++static void lo_release(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) ++{ ++ (void)ino; ++ ++ close(fi->fh); ++ fuse_reply_err(req, 0); ++} ++ ++static void lo_flush(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) ++{ ++ int res; ++ (void)ino; ++ res = close(dup(fi->fh)); ++ fuse_reply_err(req, res == -1 ? errno : 0); ++} ++ ++static void lo_fsync(fuse_req_t req, fuse_ino_t ino, int datasync, struct fuse_file_info *fi) ++{ ++ int res; ++ (void)ino; ++ if (datasync) { ++ res = fdatasync(fi->fh); ++ } else { ++ res = fsync(fi->fh); ++ } ++ fuse_reply_err(req, res == -1 ? errno : 0); ++} ++ ++static void lo_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t offset, ++ struct fuse_file_info *fi) ++{ ++ struct fuse_bufvec buf = FUSE_BUFVEC_INIT(size); ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ ++ SPDK_INFOLOG(ssam_fs, "lo_read(ino=%" PRIu64 ", size=%zd, off=%lu)\n", ino, size, ++ (unsigned long)offset); ++ fsmsession->fs_stat.payload_size = size; ++ buf.buf[0].flags = FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK; ++ buf.buf[0].fd = fi->fh; ++ buf.buf[0].pos = offset; ++ ++ ssam_fuse_reply_data(req, &buf, FUSE_BUF_SPLICE_MOVE, fsmsession); ++} ++ ++static void lo_write_buf(fuse_req_t req, fuse_ino_t ino, struct fuse_bufvec *in_buf, off_t off, ++ struct fuse_file_info *fi) ++{ ++ (void)ino; ++ ssize_t res; ++ struct fuse_bufvec out_buf = FUSE_BUFVEC_INIT(fuse_buf_size(in_buf)); ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ ++ out_buf.buf[0].flags = FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK; ++ out_buf.buf[0].fd = fi->fh; ++ out_buf.buf[0].pos = off; ++ ++ SPDK_INFOLOG(ssam_fs, "lo_write(ino=%" PRIu64 ", size=%zd, off=%lu)\n", ino, out_buf.buf[0].size, ++ (unsigned long)off); ++ fsmsession->fs_stat.payload_size = out_buf.buf[0].size; ++ res = fuse_buf_copy(&out_buf, in_buf, 0); ++ if (res < 0) { ++ fuse_reply_err(req, -res); ++ } else { ++ ssam_fuse_reply_write(req, (size_t)res, fsmsession); ++ } ++} ++ ++static void lo_statfs(fuse_req_t req, fuse_ino_t ino) ++{ ++ int res; ++ struct statvfs stbuf; ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ ++ res = fstatvfs(lo_fd(req, ino), &stbuf); ++ if (res == -1) { ++ fuse_reply_err(req, errno); ++ } else { ++ ssam_fuse_reply_statfs(req, &stbuf, fsmsession); ++ } ++} ++ ++static void lo_fallocate(fuse_req_t req, fuse_ino_t ino, int mode, off_t offset, off_t length, ++ struct fuse_file_info *fi) ++{ ++ int err = EOPNOTSUPP; ++ (void)ino; ++ ++#ifdef HAVE_FALLOCATE ++ err = fallocate(fi->fh, mode, offset, length); ++ if (err < 0) { ++ err = errno; ++ } ++ ++#elif defined(HAVE_POSIX_FALLOCATE) ++ if (mode) { ++ fuse_reply_err(req, EOPNOTSUPP); ++ return; ++ } ++ ++ err = posix_fallocate(fi->fh, offset, length); ++#endif ++ ++ fuse_reply_err(req, err); ++} ++ ++static void lo_flock(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi, int op) ++{ ++ int res; ++ (void)ino; ++ ++ res = flock(fi->fh, op); ++ ++ fuse_reply_err(req, res == -1 ? errno : 0); ++} ++ ++static void lo_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name, size_t size) ++{ ++ char procname[64]; ++ struct lo_inode *inode = lo_inode(req, ino); ++ ssize_t ret; ++ int saverr; ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ uint64_t phys_addr; ++ ++ saverr = ENOSYS; ++ if (!lo_data(req)->xattr) { ++ goto out; ++ } ++ ++ SPDK_INFOLOG(ssam_fs, "lo_getxattr(ino=%" PRIu64 ", name=%s size=%zd)\n", ino, name, size); ++ ++ snprintf(procname, sizeof(procname), "/proc/self/fd/%i", inode->fd); ++ ++ if (size) { ++ fsmsession->dynamic_buf = ssam_mempool_alloc(fsmsession->smsession.mp, size, &phys_addr); ++ if (!fsmsession->dynamic_buf) { ++ goto out_err; ++ } ++ ++ ret = getxattr(procname, name, fsmsession->dynamic_buf, size); ++ if (ret == -1) { ++ goto out_err; ++ } ++ saverr = 0; ++ if (ret == 0) { ++ goto out; ++ } ++ ++ fuse_reply_buf(req, fsmsession->dynamic_buf, ret); ++ return; ++ } else { ++ ret = getxattr(procname, name, NULL, 0); ++ if (ret == -1) { ++ goto out_err; ++ } ++ ++ ssam_fuse_reply_xattr(req, ret, fsmsession); ++ } ++out_free: ++ if (fsmsession->dynamic_buf != NULL) { ++ ssam_mempool_free(fsmsession->smsession.mp, fsmsession->dynamic_buf); ++ fsmsession->dynamic_buf = NULL; ++ } ++ return; ++ ++out_err: ++ saverr = errno; ++out: ++ fuse_reply_err(req, saverr); ++ goto out_free; ++} ++ ++static void lo_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size) ++{ ++ char procname[64]; ++ struct lo_inode *inode = lo_inode(req, ino); ++ ssize_t ret; ++ int saverr; ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ uint64_t phys_addr; ++ ++ saverr = ENOSYS; ++ if (!lo_data(req)->xattr) { ++ goto out; ++ } ++ ++ SPDK_INFOLOG(ssam_fs, "lo_listxattr(ino=%" PRIu64 ", size=%zd)\n", ino, size); ++ ++ snprintf(procname, sizeof(procname), "/proc/self/fd/%i", inode->fd); ++ ++ if (size) { ++ fsmsession->dynamic_buf = ssam_mempool_alloc(fsmsession->smsession.mp, size, &phys_addr); ++ if (!fsmsession->dynamic_buf) { ++ goto out_err; ++ } ++ ++ ret = listxattr(procname, fsmsession->dynamic_buf, size); ++ if (ret == -1) { ++ goto out_err; ++ } ++ saverr = 0; ++ if (ret == 0) { ++ goto out; ++ } ++ ++ fuse_reply_buf(req, fsmsession->dynamic_buf, ret); ++ return; ++ } else { ++ ret = listxattr(procname, NULL, 0); ++ if (ret == -1) { ++ goto out_err; ++ } ++ ++ ssam_fuse_reply_xattr(req, ret, fsmsession); ++ } ++out_free: ++ if (fsmsession->dynamic_buf != NULL) { ++ ssam_mempool_free(fsmsession->smsession.mp, fsmsession->dynamic_buf); ++ fsmsession->dynamic_buf = NULL; ++ } ++ return; ++ ++out_err: ++ saverr = errno; ++out: ++ fuse_reply_err(req, saverr); ++ goto out_free; ++} ++ ++static void lo_setxattr(fuse_req_t req, fuse_ino_t ino, const char *name, const char *value, ++ size_t size, int flags) ++{ ++ char procname[64]; ++ struct lo_inode *inode = lo_inode(req, ino); ++ ssize_t ret; ++ int saverr; ++ ++ saverr = ENOSYS; ++ if (!lo_data(req)->xattr) { ++ goto out; ++ } ++ ++ SPDK_INFOLOG(ssam_fs, "lo_setxattr(ino=%" PRIu64 ", name=%s value=%s size=%zd)\n", ino, name, value, ++ size); ++ ++ snprintf(procname, sizeof(procname), "/proc/self/fd/%i", inode->fd); ++ ++ ret = setxattr(procname, name, value, size, flags); ++ saverr = ret == -1 ? errno : 0; ++ ++out: ++ fuse_reply_err(req, saverr); ++} ++ ++static void lo_removexattr(fuse_req_t req, fuse_ino_t ino, const char *name) ++{ ++ char procname[64]; ++ struct lo_inode *inode = lo_inode(req, ino); ++ ssize_t ret; ++ int saverr; ++ ++ saverr = ENOSYS; ++ if (!lo_data(req)->xattr) { ++ goto out; ++ } ++ ++ SPDK_INFOLOG(ssam_fs, "lo_removexattr(ino=%" PRIu64 ", name=%s)\n", ino, name); ++ ++ snprintf(procname, sizeof(procname), "/proc/self/fd/%i", inode->fd); ++ ++ ret = removexattr(procname, name); ++ saverr = ret == -1 ? errno : 0; ++ ++out: ++ fuse_reply_err(req, saverr); ++} ++ ++#ifdef HAVE_COPY_FILE_RANGE ++static void lo_copy_file_range(fuse_req_t req, fuse_ino_t ino_in, off_t off_in, ++ struct fuse_file_info *fi_in, ++ fuse_ino_t ino_out, off_t off_out, struct fuse_file_info *fi_out, size_t len, int flags) ++{ ++ ssize_t res; ++ struct lo_data *lo = lo_data(req); ++ unsigned lcore_id = rte_lcore_id(); ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[lcore_id]; ++ ++ SPDK_INFOLOG(ssam_fs, ++ "lo_copy_file_range(ino=%" PRIu64 "/fd=%lu, off=%lu, ino=%" PRIu64 "/fd=%lu, " ++ "off=%lu, size=%zd, flags=0x%x)\n", ++ ino_in, fi_in->fh, off_in, ino_out, fi_out->fh, off_out, len, flags); ++ ++ res = copy_file_range(fi_in->fh, &off_in, fi_out->fh, &off_out, len, flags); ++ if (res < 0) { ++ fuse_reply_err(req, errno); ++ } else { ++ ssam_fuse_reply_write(req, res, fsmsession); ++ } ++} ++#endif ++ ++static void lo_lseek(fuse_req_t req, fuse_ino_t ino, off_t off, int whence, ++ struct fuse_file_info *fi) ++{ ++ off_t res; ++ ++ (void)ino; ++ res = lseek(fi->fh, off, whence); ++ if (res != -1) { ++ fuse_reply_lseek(req, res); ++ } else { ++ fuse_reply_err(req, errno); ++ } ++} ++ ++static const struct fuse_lowlevel_ops lo_oper = { ++ .init = lo_init, ++ .destroy = lo_destroy, ++ .lookup = lo_lookup, ++ .mkdir = lo_mkdir, ++ .mknod = lo_mknod, ++ .symlink = lo_symlink, ++ .link = lo_link, ++ .unlink = lo_unlink, ++ .rmdir = lo_rmdir, ++ .rename = lo_rename, ++ .forget = lo_forget, ++ .forget_multi = lo_forget_multi, ++ .getattr = lo_getattr, ++ .setattr = lo_setattr, ++ .readlink = lo_readlink, ++ .opendir = lo_opendir, ++ .readdir = lo_readdir, ++ .readdirplus = lo_readdirplus, ++ .releasedir = lo_releasedir, ++ .fsyncdir = lo_fsyncdir, ++ .create = lo_create, ++ .open = lo_open, ++ .release = lo_release, ++ .flush = lo_flush, ++ .fsync = lo_fsync, ++ .read = lo_read, ++ .write_buf = lo_write_buf, ++ .statfs = lo_statfs, ++ .fallocate = lo_fallocate, ++ .flock = lo_flock, ++ .getxattr = lo_getxattr, ++ .listxattr = lo_listxattr, ++ .setxattr = lo_setxattr, ++ .removexattr = lo_removexattr, ++#ifdef HAVE_COPY_FILE_RANGE ++ .copy_file_range = lo_copy_file_range, ++#endif ++ .lseek = lo_lseek, ++}; ++ ++static void ++ssam_task_stat_tick(uint64_t *tsc) ++{ ++#ifdef PERF_STAT ++ *tsc = spdk_get_ticks(); ++#endif ++ return; ++} ++ ++static void ++ssam_fs_stat_statistics(struct spdk_ssam_session *smsession, uint8_t status) ++{ ++#ifdef PERF_STAT ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)smsession; ++ uint64_t total_tsc = fsmsession->fs_stat.complete_end_tsc - fsmsession->fs_stat.start_tsc; ++ ++ if (fsmsession->fs_stat.op_type == SSAM_FUSE_OPCODE_READ) { /* read */ ++ fsmsession->fs_stat.read_latency_ticks += total_tsc; ++ fsmsession->fs_stat.bytes_read += fsmsession->fs_stat.payload_size; ++ fsmsession->fs_stat.num_read_ops++; ++ if (status == 0) { ++ fsmsession->fs_stat.complete_read_ios++; ++ } else { ++ fsmsession->fs_stat.err_read_ios++; ++ } ++ } else if (fsmsession->fs_stat.op_type == SSAM_FUSE_OPCODE_WRITE) { /* write */ ++ fsmsession->fs_stat.write_latency_ticks += total_tsc; ++ fsmsession->fs_stat.bytes_written += fsmsession->fs_stat.payload_size; ++ fsmsession->fs_stat.num_write_ops++; ++ if (status == 0) { ++ fsmsession->fs_stat.complete_write_ios++; ++ } else { ++ fsmsession->fs_stat.err_write_ios++; ++ } ++ } else if (fsmsession->fs_stat.op_type == SSAM_FUSE_OPCODE_FLUSH) { /* flush */ ++ fsmsession->fs_stat.flush_ios++; ++ if (status == 0) { ++ fsmsession->fs_stat.complete_flush_ios++; ++ } else { ++ fsmsession->fs_stat.err_flush_ios++; ++ } ++ } else { ++ fsmsession->fs_stat.other_ios++; ++ } ++ ++ fsmsession->fs_stat.payload_size = 0; ++#endif ++} ++ ++static udaa_error_t udaa_eml_queue_create(uint32_t depth, struct udaa_emlq **emlq, ++ uint16_t queue_id) ++{ ++ struct udaa_emlq *eml_queue = NULL; ++ ++ if (depth <= 0) { ++ SPDK_ERRLOG("Invalid depth: %d\n", depth); ++ return UDAA_ERROR_INVALID_VALUE; ++ } ++ ++ eml_queue = (struct udaa_emlq *)malloc(sizeof(struct udaa_emlq)); ++ if (eml_queue == NULL) { ++ return UDAA_ERROR_NO_MEMORY; ++ } ++ ++ eml_queue->vmio_req = (struct ssam_request **)malloc(depth * sizeof(struct ssam_request *)); ++ if (eml_queue->vmio_req == NULL) { ++ return UDAA_ERROR_NO_MEMORY; ++ } ++ ++ eml_queue->queue_id = queue_id; ++ eml_queue->eml_type = UDAA_PCI_FUNC_VIRTIO_FS; ++ *emlq = eml_queue; ++ ++ return UDAA_SUCCESS; ++} ++ ++static udaa_error_t vio_do_dma_async(uint16_t queue_id, struct ssam_dma_request *dma_req) ++{ ++ int res; ++ ++ res = ssam_dma_data_request(queue_id, dma_req); ++ if (res != 0) { ++ SPDK_ERRLOG("ssam_dma_data_request failed: %d\n", res); ++ return UDAA_ERROR_IO_FAILED; ++ } ++ ++ return UDAA_SUCCESS; ++} ++ ++static udaa_error_t vio_build_request(uint16_t queue_id, struct ssam_request *vmio_req, void *buf, ++ int num_sge, ++ int len, int skip_sges, struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct ssam_dma_request dma_req = { 0 }; ++ udaa_error_t result; ++ struct spdk_ssam_dma_cb dma_cb = { ++ .status = 0, ++ .req_dir = 0, /* read */ ++ .gfunc_id = vmio_req->gfunc_id, ++ .vq_idx = 0, ++ .task_idx = 0 ++ }; ++ ++ /* DMA sges from host */ ++ dma_req.src = &vmio_req->req.cmd.iovs[skip_sges]; ++ dma_req.src_num = num_sge; ++ dma_req.cb = (void *) * (uint64_t *)&dma_cb; ++ dma_req.direction = READ_HOST_MODE; ++ fsmsession->dst_iov.iov_len = len; ++ fsmsession->dst_iov.iov_base = (void *)spdk_vtophys((void *)buf, NULL); ++ dma_req.data_len = len; ++ dma_req.dst = &fsmsession->dst_iov; ++ dma_req.dst_num = 1; ++ dma_req.flr_seq = vmio_req->flr_seq; ++ dma_req.gfunc_id = vmio_req->gfunc_id; ++ ++ result = vio_do_dma_async(queue_id, &dma_req); ++ if (result != UDAA_SUCCESS) { ++ SPDK_ERRLOG("vio_do_dma_async failed: %d\n", result); ++ return result; ++ } ++ ++ return UDAA_ERROR_AGAIN; ++} ++ ++static void ssam_fuse_share_memory(struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct fuse_buf *fbuf = &fsmsession->fbuf; ++ struct fuse_in_header *in = fbuf->mem; ++ char name[SHM_NAME] = {0}; ++ int shm_fd = 0; ++ struct mount_info *info = NULL; ++ ++ if (in->opcode == SSAM_FUSE_OPCODE_INIT) { ++ snprintf(name, sizeof(name), "shm_name%d", fsmsession->p_lo->gfunc_id); ++ shm_fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, 0600); ++ if (shm_fd == -1) { ++ SPDK_NOTICELOG("could not open %s\n", name); ++ return; ++ } ++ ++ if (ftruncate(shm_fd, SHM_SIZE) != 0) { ++ SPDK_ERRLOG("could not truncate %s\n", name); ++ close(shm_fd); ++ return; ++ } ++ ++ info = mmap(NULL, SHM_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); ++ if (info == MAP_FAILED) { ++ close(shm_fd); ++ SPDK_ERRLOG("fail to set shared memory\n"); ++ return; ++ } ++ ++ info->fd = fsmsession->fbuf.fd; ++ info->flags = fsmsession->fbuf.flags; ++ info->pos = fsmsession->fbuf.pos; ++ info->size = fsmsession->fbuf.size; ++ info->opcode = in->opcode; ++ info->gid = in->gid; ++ info->len = in->len; ++ info->nodeid = in->nodeid; ++ info->padding = in->padding; ++ info->pid = in->pid; ++ info->total_extlen = in->total_extlen; ++ info->uid = in->uid; ++ info->unique = in->unique; ++ munmap(info, SHM_SIZE); ++ close(shm_fd); ++ SPDK_NOTICELOG("succss store to shm_mount\n"); ++ } else if (in->opcode == SSAM_FUSE_OPCODE_DESTROY) { ++ snprintf(name, sizeof(name), "shm_name%d", fsmsession->p_lo->gfunc_id); ++ shm_unlink(name); ++ SPDK_NOTICELOG("success close shm_mount\n"); ++ } ++ ++ return; ++} ++ ++static int vio_vmio_complete(uint16_t queue_id, struct ssam_request *req, void *buf, int sge_index, ++ struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct ssam_io_response resp; ++ struct ssam_virtio_res *virtio_res = (struct ssam_virtio_res *)&resp.data; ++ struct iovec iov; ++ ++ memset(&resp, 0, sizeof(resp)); ++ resp.gfunc_id = req->gfunc_id; ++ resp.iocb_id = req->iocb_id; ++ resp.flr_seq = req->flr_seq; ++ resp.status = req->status; ++ resp.req = req; ++ ++ memcpy(&iov, &req->req.cmd.iovs[sge_index], sizeof(req->req.cmd.iovs[sge_index])); ++ virtio_res->iovs = &iov; ++ virtio_res->iovcnt = 1; ++ virtio_res->rsp = buf; ++ virtio_res->rsp_len = req->req.cmd.iovs[sge_index].iov_len; ++ ++ fsmsession->fbuf_used = false; ++ ssam_dev_io_dec(fsmsession->smsession.smdev); ++ ++ struct spdk_ssam_session *smsession = (struct spdk_ssam_session *)fsmsession; ++ ssam_fs_stat_statistics(smsession, resp.status); ++ return ssam_io_complete(queue_id, &resp); ++} ++ ++static int vio_vmio_empty_complete(uint16_t queue_id, struct ssam_request *req, ++ struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct ssam_io_response resp; ++ struct ssam_virtio_res *virtio_res = (struct ssam_virtio_res *)&resp.data; ++ ++ memset(&resp, 0, sizeof(resp)); ++ resp.gfunc_id = req->gfunc_id; ++ resp.iocb_id = req->iocb_id; ++ resp.flr_seq = req->flr_seq; ++ resp.status = req->status; ++ resp.req = req; ++ ++ virtio_res->rsp_len = 0; ++ virtio_res->iovcnt = 0; ++ ++ fsmsession->fbuf_used = false; ++ ssam_dev_io_dec(fsmsession->smsession.smdev); ++ ++ struct spdk_ssam_session *smsession = (struct spdk_ssam_session *)fsmsession; ++ ssam_fs_stat_statistics(smsession, resp.status); ++ return ssam_io_complete(queue_id, &resp); ++} ++ ++static size_t iov_length(const struct iovec *iov, size_t count) ++{ ++ size_t seg; ++ size_t ret = 0; ++ ++ for (seg = 0; seg < count; seg++) { ++ ret += iov[seg].iov_len; ++ } ++ return ret; ++} ++ ++static udaa_error_t udaa_get_hdr_len(udaa_eml_type_t func_eml_type, int *hdr_len) ++{ ++ switch (func_eml_type) { ++ case UDAA_PCI_FUNC_VIRTIO_FS: ++ *hdr_len = 40; ++ break; ++ case UDAA_PCI_FUNC_NVME: ++ case UDAA_PCI_FUNC_VIRTIO_NET: ++ case UDAA_PCI_FUNC_VIRTIO_BLK: ++ case UDAA_PCI_FUNC_VIRTIO_SCSI: ++ case UDAA_PCI_FUNC_VIRTIO_VSOCK: ++ default: ++ SPDK_ERRLOG("Not supported func_eml_type:%d\n", func_eml_type); ++ return UDAA_ERROR_NOT_SUPPORTED; ++ } ++ return UDAA_SUCCESS; ++} ++ ++static udaa_error_t udaa_poll_batch_blocking(int *polled_num, uint16_t tid, uint16_t poll_num, ++ struct ssam_request **vmio_req, struct iovec *ext, uint16_t queue_id, ssize_t *in_len, ++ struct lo_data *p_lo) ++{ ++ struct ssam_request_poll_opt poll_opt = { ++ .sge1_iov = ext, ++ .queue_id = queue_id, ++ }; ++ pthread_mutex_lock(&g_ssam_fs_poller_ctx.poll_mutex[p_lo->gfunc_id]); ++ *polled_num = ssam_request_poll_ext(tid, poll_num, vmio_req, &poll_opt); ++ pthread_mutex_unlock(&g_ssam_fs_poller_ctx.poll_mutex[p_lo->gfunc_id]); ++ if ((*polled_num) < 0) { ++ *in_len = *polled_num; ++ return UDAA_ERROR_AGAIN; ++ } ++ if ((*polled_num) == 0) { ++ return UDAA_ERROR_AGAIN; ++ } ++ ++ return UDAA_SUCCESS; ++} ++ ++static udaa_error_t udaa_poll_batch_non_blocking(int *polled_num, uint16_t tid, uint16_t poll_num, ++ struct ssam_request **vmio_req, struct iovec *ext, uint16_t queue_id, ssize_t *in_len, ++ struct lo_data *p_lo) ++{ ++ struct ssam_request_poll_opt poll_opt = { ++ .sge1_iov = ext, ++ .queue_id = queue_id, ++ }; ++ pthread_mutex_lock(&g_ssam_fs_poller_ctx.poll_mutex[p_lo->gfunc_id]); ++ *polled_num = ssam_request_poll_ext(tid, poll_num, vmio_req, &poll_opt); ++ pthread_mutex_unlock(&g_ssam_fs_poller_ctx.poll_mutex[p_lo->gfunc_id]); ++ if ((*polled_num) < 0) { ++ *in_len = *polled_num; ++ return UDAA_ERROR_IO_FAILED; ++ } ++ if ((*polled_num) == 0) { ++ return UDAA_ERROR_AGAIN; ++ } ++ ++ return UDAA_SUCCESS; ++} ++ ++static udaa_error_t udaa_eml_queue_progress_retrieve(struct udaa_emlq *emlq, ++ struct udaa_eml_req *eml_req, ++ int depth_idx, ssize_t *in_len, int is_blocking, unsigned tid, ++ struct spdk_ssam_fs_session *fsmsession) ++{ ++ int polled_num = 0; ++ uint16_t queue_id = emlq->queue_id, poll_num = 1; ++ size_t iov0_len; ++ struct iovec ext; /* For sge1 pre-fetch (extension) */ ++ struct ssam_request **vmio_req = &(emlq->vmio_req[depth_idx]); ++ udaa_error_t result; ++ void *buf = eml_req->buf; ++ size_t buf_len = eml_req->buf_len; ++ int skip_sges = 0; ++ int hdr_len; ++ udaa_eml_type_t func_eml_type = emlq->eml_type; ++ ++ result = udaa_get_hdr_len(func_eml_type, &hdr_len); ++ if (result != UDAA_SUCCESS) { ++ SPDK_ERRLOG("Failed to get hdr_len. udaa_error value: %d\n", result); ++ return result; ++ } ++ ++ ext.iov_base = (uint8_t *)buf + hdr_len; ++ ext.iov_len = buf_len - hdr_len; ++ ++ if (is_blocking) { ++ result = udaa_poll_batch_blocking(&polled_num, tid, poll_num, vmio_req, &ext, queue_id, in_len, ++ fsmsession->p_lo); ++ if (result != UDAA_SUCCESS) { ++ if (result != UDAA_ERROR_AGAIN) { ++ SPDK_ERRLOG("Failed to poll request polled_num = %d. udaa_error value: %d\n", polled_num, result); ++ } ++ return result; ++ } ++ } else { ++ result = udaa_poll_batch_non_blocking(&polled_num, tid, poll_num, vmio_req, &ext, queue_id, in_len, ++ fsmsession->p_lo); ++ if (result != UDAA_SUCCESS) { ++ if (result != UDAA_ERROR_AGAIN) { ++ SPDK_ERRLOG("Failed to poll request polled_num = %d. udaa_error value: %d\n", polled_num, result); ++ } ++ return result; ++ } ++ } ++ fsmsession->fbuf_used = true; ++ fsmsession->smsession.smdev->io_num++; ++ ++ /* user can mount tag again when reboot host without umount */ ++ if (fsmsession->p_lo->flr_seq != UINT32_MAX && fsmsession->p_lo->flr_seq != vmio_req[0]->flr_seq && ++ fsmsession->p_lo->mounted == true) { ++ lo_destroy((void *)fsmsession->p_lo); ++ } ++ fsmsession->p_lo->flr_seq = vmio_req[0]->flr_seq; ++ ++ ssam_task_stat_tick(&fsmsession->fs_stat.start_tsc); ++ /* An element contains one request and the space to send our response ++ * They're spread over multiple descriptors in a scatter/gather set ++ * and we can't trust the guest to keep them still; so copy in/out. ++ */ ++ unsigned int in_num = vmio_req[0]->req.cmd.writable; ++ struct iovec *in_sges = vmio_req[0]->req.cmd.iovs; ++ *in_len = iov_length(in_sges, in_num); ++ ++ if (*in_len > (ssize_t)buf_len) { ++ SPDK_ERRLOG("in_len exceed buf_len. in_len=%zd, buf_len=%zd\n", *in_len, buf_len); ++ *in_len = -1; ++ return UDAA_ERROR_IO_FAILED; ++ } ++ ++ iov0_len = vmio_req[0]->req.cmd.iovs[0].iov_len; ++ ++ /* Copy fuse_in_header from buffer */ ++ memcpy(buf, vmio_req[0]->req.cmd.header, hdr_len); ++ buf = (uint8_t *)buf + iov0_len + ext.iov_len; ++ ++ /* Fill Emulation request's type and buffers */ ++ eml_req->type = (int)func_eml_type; ++ ++ /* Get the rest of the request */ ++ if (in_num > 1) { ++ if (ext.iov_len == 0) { ++ /* Couldn't prefetch sge1, need to get it with DMA */ ++ skip_sges = 1; ++ } else { ++ skip_sges = 2; ++ } ++ in_num -= skip_sges; ++ if (in_num > 0) { ++ result = vio_build_request(tid, vmio_req[0], buf, in_num, *in_len - iov0_len - ext.iov_len, ++ skip_sges, ++ fsmsession); ++ if (result != UDAA_ERROR_AGAIN) { ++ SPDK_ERRLOG("vio_build_request failed: %d\n", result); ++ *in_len = -1; ++ return UDAA_ERROR_IO_FAILED; ++ } ++ fsmsession->in_len = *in_len; ++ } ++ } ++ ++ return result; ++} ++ ++static udaa_error_t udaa_eml_queue_progress_response(struct udaa_emlq *emlq, ++ struct udaa_eml_req *eml_req, ++ struct iovec *iov, int count, int depth_idx, ssize_t *out_len, unsigned tid, ++ struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct ssam_dma_request dma_req; ++ int i; ++ int len; ++ size_t dst_len; ++ int sge_index; ++ struct ssam_request **vmio_req = &(emlq->vmio_req[depth_idx]); ++ udaa_error_t result = UDAA_SUCCESS; ++ struct spdk_ssam_dma_cb dma_cb = { ++ .status = 0, ++ .req_dir = 1, /* write */ ++ .gfunc_id = vmio_req[0]->gfunc_id, ++ .vq_idx = 0, ++ .task_idx = 0 ++ }; ++ struct fuse_in_header *in = fsmsession->fbuf.mem; ++ ++ dma_req.cb = (void *) * (uint64_t *)&dma_cb; ++ dma_req.direction = WRITE_HOST_MODE; ++ dma_req.data_len = 0; ++ ++ if (count <= 0) { ++ SPDK_ERRLOG("Invalid count: %d\n", count); ++ *out_len = -1; ++ return UDAA_ERROR_IO_FAILED; ++ } ++ ++ sge_index = vmio_req[0]->req.cmd.writable; ++ fsmsession->fs_stat.op_type = in->opcode; ++ ++ if (count == 1) { ++ goto complete; ++ } ++ ++ fsmsession->src_iov = calloc(count - 1, sizeof(*fsmsession->src_iov)); ++ if (fsmsession->src_iov == NULL) { ++ SPDK_ERRLOG("Failed to alloc src_iov\n"); ++ *out_len = -1; ++ result = UDAA_ERROR_NO_MEMORY; ++ goto complete; ++ } ++ ++ for (i = 1; i < count; i++) { ++ if (in->opcode == SSAM_FUSE_OPCODE_INIT) { ++ memcpy(fsmsession->static_buf, iov[i].iov_base, sizeof(struct fuse_init_out)); ++ fsmsession->src_iov[i - 1].iov_base = (void *)spdk_vtophys(fsmsession->static_buf, NULL); ++ len = sizeof(struct fuse_init_out); ++ } else { ++ fsmsession->src_iov[i - 1].iov_base = (void *)spdk_vtophys(iov[i].iov_base, NULL); ++ len = iov[i].iov_len; ++ } ++ fsmsession->src_iov[i - 1].iov_len = len; ++ dma_req.data_len += len; ++ } ++ ++ if (dma_req.data_len == 0) { ++ goto complete; ++ } ++ ++ dma_req.src = fsmsession->src_iov; ++ dma_req.src_num = count - 1; ++ dma_req.dst = vmio_req[0]->req.cmd.iovs + sge_index + 1; ++ /* Set destination len */ ++ dst_len = dma_req.data_len; ++ for (i = 0; dst_len > 0; i++) { ++ if (dma_req.dst[i].iov_len < dst_len) { ++ dst_len -= dma_req.dst[i].iov_len; ++ } else { ++ dma_req.dst[i].iov_len = dst_len; ++ dma_req.dst_num = i + 1; ++ break; ++ } ++ } ++ dma_req.flr_seq = vmio_req[0]->flr_seq; ++ dma_req.gfunc_id = vmio_req[0]->gfunc_id; ++ ++ result = vio_do_dma_async(tid, &dma_req); ++ if (result != UDAA_SUCCESS) { ++ SPDK_ERRLOG("vio_do_dma_sync failed: %d\n", result); ++ *out_len = -1; ++ result = UDAA_ERROR_IO_FAILED; ++ goto complete; ++ } ++ memcpy(&fsmsession->iov_header, iov[0].iov_base, sizeof(fsmsession->iov_header)); ++ return result; ++ ++ ++complete: ++ if (fsmsession->src_iov) { ++ free(fsmsession->src_iov); ++ fsmsession->src_iov = NULL; ++ } ++ ssam_task_stat_tick(&fsmsession->fs_stat.complete_end_tsc); ++ vio_vmio_complete(tid, vmio_req[0], iov[0].iov_base, sge_index, fsmsession); ++ ++ if (*out_len != -1) { ++ *out_len = dma_req.data_len + iov[0].iov_len; ++ } ++ return result; ++} ++ ++static udaa_error_t udaa_eml_queue_empty_complete(int depth_idx, struct udaa_emlq *emlq, ++ uint16_t tid, ++ struct spdk_ssam_fs_session *fsmsession) ++{ ++ int res; ++ struct ssam_request **vmio_req = &(emlq->vmio_req[depth_idx]); ++ ++ ssam_task_stat_tick(&fsmsession->fs_stat.complete_end_tsc); ++ res = vio_vmio_empty_complete(tid, vmio_req[0], fsmsession); ++ if (res != 0) { ++ SPDK_ERRLOG("queue_id %d, vio_vmio_empty_complete failed: %d\n", tid, res); ++ return UDAA_ERROR_IO_FAILED; ++ } ++ return UDAA_SUCCESS; ++} ++ ++static ssize_t fuse_udaa_read(int fd, void *buf, size_t buf_len, void *userdata) ++{ ++ udaa_error_t result; ++ struct lo_data *p_lo = (struct lo_data *)userdata; ++ ssize_t in_len = 0; ++ int depth_idx = 0; ++ struct udaa_eml_req *eml_req = NULL; ++ struct udaa_emlq *emlq = NULL; ++ int is_blocking = 1; ++ unsigned lcore_id = rte_lcore_id(); ++ if (lcore_id >= SSAM_FS_LCORE_ID_MAX) { ++ SPDK_ERRLOG("lcore_id is out of range. lcore_id: %d\n", lcore_id); ++ return -1; ++ } ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)p_lo->smsession[lcore_id]; ++ ++ eml_req = &p_lo->fs_reqs[lcore_id]; ++ eml_req->buf = buf; ++ eml_req->buf_len = buf_len; ++ emlq = p_lo->udaa_fs_queues[lcore_id]; ++ ++ result = udaa_eml_queue_progress_retrieve(emlq, eml_req, depth_idx, &in_len, is_blocking, ++ fsmsession->smsession.smdev->tid, fsmsession); ++ if (result != UDAA_SUCCESS) { ++ if (result == UDAA_ERROR_AGAIN) { ++ return INT_MAX; ++ } ++ SPDK_ERRLOG("Failed to retrieve data from emulation queue. udaa_error value: %d\n", result); ++ } ++ ++ return in_len; ++} ++ ++static ssize_t fuse_udaa_writev(int fd, struct iovec *iov, int count, void *userdata) ++{ ++ udaa_error_t result = UDAA_SUCCESS; ++ int depth_idx = 0; ++ ssize_t out_len = 0; ++ struct lo_data *p_lo = (struct lo_data *)userdata; ++ struct udaa_eml_req *eml_req = NULL; ++ struct udaa_emlq *emlq = NULL; ++ unsigned lcore_id; ++ lcore_id = rte_lcore_id(); ++ if (lcore_id >= SSAM_FS_LCORE_ID_MAX) { ++ SPDK_ERRLOG("lcore_id is out of range. lcore_id: %d\n", lcore_id); ++ return -1; ++ } ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)p_lo->smsession[lcore_id]; ++ ++ eml_req = &p_lo->fs_reqs[lcore_id]; ++ emlq = p_lo->udaa_fs_queues[lcore_id]; ++ ++ if (spdk_likely(p_lo->have_shm == false)) { ++ result = udaa_eml_queue_progress_response(emlq, eml_req, iov, count, depth_idx, &out_len, ++ fsmsession->smsession.smdev->tid, fsmsession); ++ } else { ++ p_lo->have_shm = false; ++ } ++ ++ if (result != UDAA_SUCCESS) { ++ SPDK_ERRLOG("Failed to respond to the emulation queue. udaa_error value: %d\n", result); ++ } ++ ++ return out_len; ++} ++ ++static udaa_error_t create_fs_device(struct udaa_emlq **emlq, uint32_t num_queues, ++ uint16_t gfunc_id) ++{ ++ udaa_error_t result; ++ uint16_t queue_id = ssam_get_queue_id(gfunc_id); ++ ++ /* Create the EmlQ */ ++ for (uint32_t i = 0; i < num_queues; i++) { ++ result = udaa_eml_queue_create(1, &emlq[i], queue_id); ++ if (result != UDAA_SUCCESS) { ++ SPDK_ERRLOG("Unable to create emulation queue: %d\n", result); ++ return result; ++ } ++ } ++ ++ return UDAA_SUCCESS; ++} ++ ++static udaa_error_t udaa_eml_queues_destroy(struct udaa_emlq **emlqs, int num_queues) ++{ ++ if (emlqs == NULL) { ++ return UDAA_SUCCESS; ++ } ++ ++ for (int i = 0; i < num_queues; i++) { ++ if (emlqs[i]) { ++ if (emlqs[i]->vmio_req) { ++ free(emlqs[i]->vmio_req); ++ } ++ free(emlqs[i]); ++ } ++ } ++ free(emlqs); ++ ++ return UDAA_SUCCESS; ++} ++ ++static void ++ssam_fs_remove_self(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)smsession; ++ struct lo_data *p_lo = fsmsession->p_lo; ++ ++ pthread_mutex_lock(&p_lo->exit_mutex); ++ p_lo->exit_num++; ++ if (p_lo->exit_num == p_lo->num_queues) { ++ fuse_session_reset(p_lo->se); ++ fuse_session_destroy(p_lo->se); ++ udaa_eml_queues_destroy(p_lo->udaa_fs_queues, SSAM_FS_LCORE_ID_MAX); ++ if (p_lo->fs_reqs != NULL) { ++ free(p_lo->fs_reqs); ++ } ++ if (p_lo->source != NULL) { ++ free(p_lo->source); ++ } ++ if (p_lo->name != NULL) { ++ free(p_lo->name); ++ } ++ memset(p_lo, 0, sizeof(*p_lo)); ++ } ++ pthread_mutex_unlock(&p_lo->exit_mutex); ++ ++ if (fsmsession->fs_poller != NULL) { ++ spdk_poller_unregister(&fsmsession->fs_poller); ++ fsmsession->fs_poller = NULL; ++ } ++ ++ if (smsession->name != NULL) { ++ free(smsession->name); ++ smsession->name = NULL; ++ } ++ ++ ssam_free_fs_session(fsmsession); ++ return; ++} ++ ++static void ++ssam_fs_dump_info_json(struct spdk_ssam_session *smsession, struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)smsession; ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "name", ssam_session_get_name(smsession)); ++ spdk_json_write_named_uint32(w, "function_id", (uint32_t)smsession->gfunc_id); ++ spdk_json_write_named_uint32(w, "queues", (uint32_t)smsession->max_queues); ++ spdk_json_write_named_string(w, "dbdf", fsmsession->p_lo->dbdf); ++ spdk_json_write_named_uint32(w, "max_threads", fsmsession->p_lo->num_queues); ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_fs_clear_iostat_json(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)smsession; ++ memset(&fsmsession->fs_stat, 0, sizeof(struct ssam_fs_stat)); ++} ++ ++static void ++ssam_fs_show_iostat_json(struct spdk_ssam_session *smsession, uint32_t id, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)smsession; ++ ++ struct ssam_fs_stat fs_stat; ++ ++ memcpy(&fs_stat, &fsmsession->fs_stat, sizeof(struct ssam_fs_stat)); ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_uint32(w, "function_id", (uint32_t)smsession->gfunc_id); ++ spdk_json_write_named_string(w, "name", ssam_session_get_name(smsession)); ++ spdk_json_write_named_string(w, "ctrlr", ssam_dev_get_name(smsession->smdev)); ++ spdk_json_write_named_string_fmt(w, "cpumask", "0x%s", ++ spdk_cpuset_fmt(spdk_thread_get_cpumask(smsession->smdev->thread))); ++ spdk_json_write_named_string(w, "dbdf", fsmsession->p_lo->dbdf); ++ spdk_json_write_named_uint64(w, "complete_read_ios", fs_stat.complete_read_ios); ++ spdk_json_write_named_uint64(w, "err_read_ios", fs_stat.err_read_ios); ++ spdk_json_write_named_uint64(w, "complete_write_ios", fs_stat.complete_write_ios); ++ spdk_json_write_named_uint64(w, "err_write_ios", fs_stat.err_write_ios); ++ spdk_json_write_named_uint64(w, "flush_ios", fs_stat.flush_ios); ++ spdk_json_write_named_uint64(w, "complete_flush_ios", fs_stat.complete_flush_ios); ++ spdk_json_write_named_uint64(w, "err_flush_ios", fs_stat.err_flush_ios); ++ ++ spdk_json_write_named_uint64(w, "other_ios", fs_stat.other_ios); ++ spdk_json_write_named_uint64(w, "bytes_read", fs_stat.bytes_read); ++ spdk_json_write_named_uint64(w, "num_read_ops", fs_stat.num_read_ops); ++ spdk_json_write_named_uint64(w, "bytes_written", fs_stat.bytes_written); ++ spdk_json_write_named_uint64(w, "num_write_ops", fs_stat.num_write_ops); ++ spdk_json_write_named_uint64(w, "read_latency_ticks", fs_stat.read_latency_ticks); ++ spdk_json_write_named_uint64(w, "write_latency_ticks", fs_stat.write_latency_ticks); ++ spdk_json_write_named_uint64(w, "fatal_ios", fs_stat.fatal_ios); ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_fs_write_config_json(struct spdk_ssam_session *smsession, struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)smsession; ++ ++ if (fsmsession == NULL || fsmsession->p_lo == NULL || fsmsession->need_write_config != true) { ++ return; ++ } ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "method", "fs_controller_create"); ++ ++ spdk_json_write_named_object_begin(w, "params"); ++ spdk_json_write_named_string(w, "dbdf", fsmsession->p_lo->dbdf); ++ spdk_json_write_named_string(w, "fs_name", fsmsession->p_lo->source); ++ spdk_json_write_named_string(w, "name", fsmsession->p_lo->name); ++ spdk_json_write_named_uint32(w, "max_threads", fsmsession->p_lo->num_queues); ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_free_fs_session(struct spdk_ssam_fs_session *fsmsession) ++{ ++ if (fsmsession->fbuf.mem != NULL) { ++ spdk_free(fsmsession->fbuf.mem); ++ fsmsession->fbuf.mem = NULL; ++ } ++ ++ if (fsmsession->smsession.name != NULL) { ++ free(fsmsession->smsession.name); ++ fsmsession->smsession.name = NULL; ++ } ++ ++ if (fsmsession->dynamic_buf != NULL) { ++ ssam_mempool_free(fsmsession->smsession.mp, fsmsession->dynamic_buf); ++ fsmsession->dynamic_buf = NULL; ++ } ++ ++ if (fsmsession->static_buf != NULL) { ++ spdk_free(fsmsession->static_buf); ++ fsmsession->static_buf = NULL; ++ } ++ ++ ssam_sessions_remove(fsmsession->smsession.smdev->smsessions, &fsmsession->smsession); ++ fsmsession->smsession.smdev->active_session_num--; ++ fsmsession->smsession.smdev = NULL; ++ ++ memset(fsmsession, 0, sizeof(*fsmsession)); ++ free(fsmsession); ++ fsmsession = NULL; ++} ++ ++static void ++ssam_free_lo_data(struct lo_data *p_lo) ++{ ++ for (int i = 0; i < SSAM_FS_LCORE_ID_MAX; i++) { ++ if (p_lo->smsession[i] != NULL) { ++ ssam_free_fs_session((struct spdk_ssam_fs_session *)p_lo->smsession[i]); ++ p_lo->smsession[i] = NULL; ++ } ++ } ++ udaa_eml_queues_destroy(p_lo->udaa_fs_queues, SSAM_FS_LCORE_ID_MAX); ++ if (p_lo->fs_reqs != NULL) { ++ free(p_lo->fs_reqs); ++ } ++ if (p_lo->source != NULL) { ++ free(p_lo->source); ++ } ++ if (p_lo->name != NULL) { ++ free(p_lo->name); ++ } ++ if (p_lo->dbdf != NULL) { ++ free(p_lo->dbdf); ++ } ++ memset(p_lo, 0, sizeof(*p_lo)); ++} ++ ++static void ++ssam_free_fuse_session(struct lo_data *p_lo, struct spdk_ssam_fs_session *fsmsession) ++{ ++ pthread_mutex_lock(&p_lo->exit_mutex); ++ p_lo->exit_num++; ++ if (p_lo->exit_num < p_lo->num_queues) { ++ if (fsmsession->fs_poller != NULL) { ++ spdk_poller_unregister(&fsmsession->fs_poller); ++ fsmsession->fs_poller = NULL; ++ } ++ pthread_mutex_unlock(&p_lo->exit_mutex); ++ return; ++ } ++ pthread_mutex_unlock(&p_lo->exit_mutex); ++ SPDK_NOTICELOG("fs controller %u is removed\n", fsmsession->smsession.gfunc_id); ++ if (fsmsession->fs_poller != NULL) { ++ spdk_poller_unregister(&fsmsession->fs_poller); ++ fsmsession->fs_poller = NULL; ++ } ++ ++ fuse_session_reset(p_lo->se); ++ fuse_session_destroy(p_lo->se); ++ ++ if (p_lo->rsp_fn != NULL) { ++ p_lo->rsp_fn(p_lo->rsp_ctx, 0); ++ p_lo->rsp_fn = NULL; ++ } ++ ++ ssam_free_lo_data(p_lo); ++ ++ return; ++} ++ ++static void ++ssam_fs_response_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct ssam_dma_rsp *dma_rsp = (struct ssam_dma_rsp *)arg; ++ struct spdk_ssam_dma_cb *dma_cb = (struct spdk_ssam_dma_cb *)&dma_rsp->cb; ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)smsession; ++ unsigned lcore_id = rte_lcore_id(); ++ struct udaa_emlq *emlq = fsmsession->p_lo->udaa_fs_queues[lcore_id]; ++ struct ssam_request **vmio_req = &(emlq->vmio_req[0]); ++ ++ if (dma_rsp->status != 0) { ++ SPDK_ERRLOG("Response error status %d\n", dma_rsp->status); ++ return; ++ } ++ ++ if (dma_cb->req_dir == 0) { /* read */ ++ fsmsession->fbuf.size = fsmsession->in_len; ++ fuse_session_process_buf(fsmsession->p_lo->se, &fsmsession->fbuf); ++ } else { /* write */ ++ if (fsmsession->src_iov) { ++ free(fsmsession->src_iov); ++ fsmsession->src_iov = NULL; ++ } ++ ssam_task_stat_tick(&fsmsession->fs_stat.complete_end_tsc); ++ vio_vmio_complete(smsession->smdev->tid, vmio_req[0], &fsmsession->iov_header, ++ vmio_req[0]->req.cmd.writable, ++ fsmsession); ++ if (fsmsession->dynamic_buf != NULL) { ++ ssam_mempool_free(fsmsession->smsession.mp, fsmsession->dynamic_buf); ++ fsmsession->dynamic_buf = NULL; ++ } ++ } ++ ++ return; ++} ++ ++static int ++ssam_fuse_session_loop(void *arg) ++{ ++ struct spdk_ssam_session *smsession = (struct spdk_ssam_session *)arg; ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)smsession; ++ struct fuse_session *se = fsmsession->p_lo->se; ++ struct lo_data *p_lo = (struct lo_data *)se->userdata; ++ int res = 0; ++ struct fuse_buf *fbuf = &fsmsession->fbuf; ++ ++ if (fsmsession->fbuf_used == true) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ if (!p_lo->delete_flag) { ++ res = fuse_session_receive_buf(se, fbuf); ++ if (res == -EINTR || res == INT_MAX) { ++ return SPDK_POLLER_BUSY; ++ } ++ ssam_fuse_share_memory(fsmsession); ++ if (res > 0) { ++ fuse_session_process_buf(se, fbuf); ++ return SPDK_POLLER_BUSY; ++ } ++ } ++ ++ ssam_free_fuse_session(p_lo, fsmsession); ++ return SPDK_POLLER_BUSY; ++} ++ ++static void ++ssam_fs_read_shm_mem(struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct lo_data *p_lo = (struct lo_data *)fsmsession->p_lo; ++ struct fuse_session *se = p_lo->se; ++ char name[SHM_NAME] = {0}; ++ int shm_fd = 0; ++ struct mount_info *info = NULL; ++ struct fuse_buf *fb = NULL; ++ struct fuse_in_header *in = NULL; ++ void *inarg = NULL; ++ struct fuse_init_in *arg = NULL; ++ ++ snprintf(name, sizeof(name), "shm_name%d", fsmsession->smsession.gfunc_id); ++ ++ shm_fd = shm_open(name, O_RDWR, 0600); ++ if (shm_fd < 0 || p_lo->have_shm == true) { ++ close(shm_fd); ++ return; ++ } ++ ++ if (ftruncate(shm_fd, SHM_SIZE) != 0) { ++ close(shm_fd); ++ SPDK_ERRLOG("could not truncate %s\n", name); ++ return; ++ } ++ ++ info = mmap(NULL, SHM_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); ++ if (info == MAP_FAILED) { ++ close(shm_fd); ++ SPDK_ERRLOG("can not read mmap"); ++ return; ++ } ++ ++ fb = (struct fuse_buf *)malloc(sizeof(struct fuse_buf)); ++ if (fb == NULL) { ++ munmap(info, SHM_SIZE); ++ close(shm_fd); ++ SPDK_ERRLOG("can not alloc memory"); ++ return; ++ } ++ ++ fb->mem = malloc(se->bufsize); ++ if (fb->mem == NULL) { ++ munmap(info, SHM_SIZE); ++ close(shm_fd); ++ SPDK_ERRLOG("can not alloc memory"); ++ return; ++ } ++ ++ in = fb->mem; ++ inarg = (void *) &in[1]; ++ arg = (struct fuse_init_in *) inarg; ++ ++ fb->fd = info->fd; ++ fb->flags = info->flags; ++ fb->pos = info->pos; ++ fb->size = info->size; ++ in->opcode = info->opcode; ++ in->padding = info->padding; ++ in->gid = info->gid; ++ in->len = info->len; ++ in->pid = info->pid; ++ in->total_extlen = info->total_extlen; ++ in->uid = info->uid; ++ in->unique = info->unique; ++ in->nodeid = info->nodeid; ++ munmap(info, SHM_SIZE); ++ close(shm_fd); ++ arg->major = MEM_MAJOR; ++ arg->minor = MEM_MINOR; ++ p_lo->have_shm = true; ++ ++ fuse_session_process_buf(se, fb); ++ SPDK_NOTICELOG("success reset mount\n"); ++ ++ free(fb->mem); ++ free(fb); ++} ++ ++static int ++ssam_fs_reactor_loop_start(struct spdk_ssam_session *smsession, void **unused) ++{ ++ struct spdk_ssam_fs_session *fsmsession = (struct spdk_ssam_fs_session *)smsession; ++ struct lo_data *p_lo = (struct lo_data *)fsmsession->p_lo; ++ ++ pthread_mutex_lock(&p_lo->mutex); ++ if (p_lo->load_shm_flag == false) { ++ ssam_fs_read_shm_mem(fsmsession); ++ p_lo->load_shm_flag = true; ++ } ++ pthread_mutex_unlock(&p_lo->mutex); ++ ++ if (fsmsession->fs_poller == NULL) { ++ fsmsession->fs_poller = SPDK_POLLER_REGISTER(ssam_fuse_session_loop, smsession, 0); ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_init_fs_session_reg_info(struct spdk_ssam_session_reg_info *reg_info, ++ struct ssam_fs_construct_info *info) ++{ ++ snprintf(reg_info->type_name, SPDK_SESSION_TYPE_MAX_LEN, "%s", SPDK_SESSION_TYPE_FS); ++ reg_info->backend = &g_ssam_fs_session_backend; ++ reg_info->session_ctx_size = sizeof(struct spdk_ssam_fs_session) - sizeof(struct spdk_ssam_session); ++ reg_info->gfunc_id = info->gfunc_id; ++ reg_info->queues = ssam_get_queues(); ++ if (reg_info->queues > SPDK_SSAM_MAX_VQUEUES) { ++ SPDK_ERRLOG("Queue number out of range, need less or equal than %u, actually %u.\n", ++ SPDK_SSAM_MAX_VQUEUES, reg_info->queues); ++ return -ERANGE; ++ } ++ reg_info->name = info->name; ++ return 0; ++} ++ ++static int ++ssam_create_fs_session(struct ssam_fs_construct_info *info, struct lo_data *lo) ++{ ++ int ret; ++ struct spdk_ssam_session_reg_info reg_info; ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_ssam_fs_session *fsmsession = NULL; ++ uint16_t max_threads = 0; ++ uint32_t thread_mask; ++ ++ ret = ssam_init_fs_session_reg_info(®_info, info); ++ if (ret != 0) { ++ SPDK_ERRLOG("failed to init fs session reg info\n"); ++ return ret; ++ } ++ ++ thread_mask = ssam_get_tids(info->max_threads); ++ for (int i = 0; i < SSAM_FS_LCORE_ID_MAX; i++) { ++ if (thread_mask & (1 << i)) { ++ reg_info.tid = i; ++ ret = ssam_session_register(®_info, &smsession); ++ if (ret != 0) { ++ SPDK_ERRLOG("failed to register session\n"); ++ return ret; ++ } ++ fsmsession = (struct spdk_ssam_fs_session *)smsession; ++ fsmsession->p_lo = lo; ++ fsmsession->fbuf.mem = NULL; ++ fsmsession->fbuf_used = false; ++ fsmsession->dynamic_buf = NULL; ++ fsmsession->need_write_config = (max_threads == 0); ++ fsmsession->static_buf = spdk_zmalloc(SSAM_FS_STATIC_BUF_SIZE, SSAM_FS_DEFAULT_ALIGN, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ if (!fsmsession->static_buf) { ++ SPDK_ERRLOG("malloc_buf spdk_zmalloc() failed\n"); ++ return -ENOMEM; ++ } ++ ssam_session_start_done(smsession, 0); ++ lo->smsession[smsession->smdev->lcore_id] = smsession; ++ max_threads++; ++ } ++ } ++ ++ info->max_threads = max_threads; ++ return 0; ++} ++ ++static int ++ssam_init_lo_data(struct ssam_fs_construct_info *info, struct lo_data *lo) ++{ ++ struct stat stat; ++ char source[PATH_MAX] = {0}; ++ int ret; ++ ++ pthread_mutex_init(&lo->mutex, NULL); ++ pthread_mutex_init(&lo->exit_mutex, NULL); ++ lo->root.next = lo->root.prev = &lo->root; ++ lo->root.fd = -1; ++ lo->cache = CACHE_NORMAL; ++ lo->gfunc_id = info->gfunc_id; ++ lo->exit_num = 0; ++ lo->mounted = false; ++ lo->cache = CACHE_NORMAL; ++ lo->root.refcount = SSAM_FS_REF_COUNT; ++ lo->num_queues = info->max_threads; ++ lo->used = true; ++ lo->flr_seq = UINT32_MAX; ++ lo->have_shm = false; ++ lo->load_shm_flag = false; ++ lo->delete_flag = false; ++ lo->rsp_fn = NULL; ++ lo->rsp_ctx = NULL; ++ ++ lo->name = spdk_sprintf_alloc("%s", info->name); ++ if (lo->name == NULL) { ++ SPDK_ERRLOG("snprintf cotroller name failed\n"); ++ return -ENOMEM; ++ } ++ ++ lo->dbdf = spdk_sprintf_alloc("%s", info->dbdf); ++ if (lo->dbdf == NULL) { ++ SPDK_ERRLOG("snprintf cotroller dbdf failed\n"); ++ return -ENOMEM; ++ } ++ ++ if (realpath(info->fs_name, source) == NULL) { ++ SPDK_ERRLOG("Failed to execute the realpath function.\n"); ++ return -EINVAL; ++ } ++ ++ lo->source = spdk_sprintf_alloc("%s", source); ++ if (lo->source == NULL) { ++ SPDK_ERRLOG("snprintf cotroller source failed\n"); ++ return -ENOMEM; ++ } ++ ++ if (lo->source) { ++ ret = lstat(lo->source, &stat); ++ if (ret == -1) { ++ SPDK_ERRLOG("failed to stat source (\"%s\")\n", lo->source); ++ return ret; ++ } ++ if (!S_ISDIR(stat.st_mode)) { ++ SPDK_ERRLOG("source is not a directory\n"); ++ return -ENOTDIR; ++ } ++ } else { ++ lo->source = strdup("/"); ++ if (!lo->source) { ++ SPDK_ERRLOG("fuse: memory allocation failed\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ if (!lo->timeout_set) { ++ switch (lo->cache) { ++ case CACHE_NEVER: ++ lo->timeout = 0.0; ++ break; ++ case CACHE_NORMAL: ++ lo->timeout = 1.0; ++ break; ++ case CACHE_ALWAYS: ++ lo->timeout = 86400.0; ++ break; ++ } ++ } else if (lo->timeout < 0) { ++ SPDK_ERRLOG("timeout is negative (%lf)\n", lo->timeout); ++ return -EINVAL; ++ } ++ ++ lo->root.fd = open(lo->source, O_PATH); ++ if (lo->root.fd == -1) { ++ SPDK_ERRLOG("open fs path failed\n"); ++ return -EINVAL; ++ } ++ ++ lo->udaa_fs_queues = (struct udaa_emlq **)calloc(SSAM_FS_LCORE_ID_MAX, sizeof(struct udaa_emlq *)); ++ if (lo->udaa_fs_queues == NULL) { ++ SPDK_ERRLOG("failed to alloc udaa_fs_queues\n"); ++ return -ENOMEM; ++ } ++ ++ lo->fs_reqs = (struct udaa_eml_req *)calloc(SSAM_FS_LCORE_ID_MAX, sizeof(struct udaa_eml_req)); ++ if (lo->fs_reqs == NULL) { ++ SPDK_ERRLOG("failed to alloc fs_reqs\n"); ++ return -ENOMEM; ++ } ++ ++ if (create_fs_device(lo->udaa_fs_queues, SSAM_FS_LCORE_ID_MAX, info->gfunc_id) != UDAA_SUCCESS) { ++ return -ENOMEM; ++ } ++ return 0; ++} ++ ++static int ++ssam_check_contrller_info(struct ssam_fs_construct_info *info) ++{ ++ if (info->gfunc_id >= SSAM_HOSTEP_NUM_MAX) { ++ SPDK_ERRLOG("gfunc_id %u out of range\n", info->gfunc_id); ++ return -ERANGE; ++ } ++ if (lo_map[info->gfunc_id].used == true) { ++ SPDK_ERRLOG("fs controller %u already exists\n", info->gfunc_id); ++ return -EEXIST; ++ } ++ return 0; ++} ++ ++static int ++ssam_dev_io_scan_poller(void *pf_poller_ctx) ++{ ++ int polled_num = 0; ++ int tid = 0; ++ int opcode = 0; ++ uint8_t func_id = ((struct spdk_ssam_dev_io_scan_poller_ctx *)pf_poller_ctx)->func_id; ++ bool restart_flag = ((struct spdk_ssam_dev_io_scan_poller_ctx *)pf_poller_ctx)->restart_flag; ++ int queue_id = ssam_get_queue_id(func_id); ++ struct ssam_io_response resp; ++ struct ssam_virtio_res *virtio_res = (struct ssam_virtio_res *)&resp.data; ++ char buffer[SSAM_FS_BUF_LEN]; ++ struct iovec ext; ++ ext.iov_base = buffer; ++ ext.iov_len = SSAM_FS_BUF_LEN; ++ struct iovec iov; ++ struct ssam_request *io_req[1] = { 0 }; ++ struct ssam_request_poll_opt poll_opt = { ++ .sge1_iov = &ext, ++ .queue_id = queue_id, ++ }; ++ struct fuse_release_in *arg = (struct fuse_release_in *)buffer; ++ struct lo_dirp *d = NULL; ++ int fd; ++ ++ pthread_mutex_lock(&g_ssam_fs_poller_ctx.poll_mutex[func_id]); ++ polled_num = ssam_request_poll_ext(tid, 1, io_req, &poll_opt); ++ pthread_mutex_unlock(&g_ssam_fs_poller_ctx.poll_mutex[func_id]); ++ if (io_req[0] != NULL && io_req[0]->type != SSAM_VIRTIO_FS_IO) { ++ SPDK_ERRLOG(" get illegal type, io_req[0]->type is %d, io_req[0]->gfunc_id is %d, io_req[0]->status is %d\n", ++ io_req[0]->type, io_req[0]->gfunc_id, io_req[0]->status); ++ return SPDK_POLLER_BUSY; ++ } ++ if (polled_num <= 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ opcode = ((struct fuse_in_header *)io_req[0]->req.cmd.header)->opcode; ++ struct fuse_out_header rsp = { ++ .len = sizeof(struct fuse_out_header), ++ .error = opcode != SSAM_FUSE_OPCODE_DESTROY ? -ENODEV : 0, ++ .unique = 0, ++ }; ++ ++ if (restart_flag != true) { ++ if (opcode == SSAM_FUSE_OPCODE_RELEASE) { ++ fd = arg->fh; ++ close(fd); ++ } else if (opcode == SSAM_FUSE_OPCODE_RELEASEDIR) { ++ d = (struct lo_dirp *)(uintptr_t)arg->fh; ++ closedir(d->dp); ++ free(d); ++ } ++ } ++ ++ memset(&resp, 0, sizeof(resp)); ++ resp.gfunc_id = io_req[0]->gfunc_id; ++ resp.iocb_id = io_req[0]->iocb_id; ++ resp.flr_seq = io_req[0]->flr_seq; ++ resp.status = io_req[0]->status; ++ resp.req = io_req[0]; ++ ++ memcpy(&iov, &io_req[0]->req.cmd.iovs[io_req[0]->req.cmd.writable], ++ sizeof(io_req[0]->req.cmd.iovs[io_req[0]->req.cmd.writable])); ++ virtio_res->iovs = &iov; ++ virtio_res->iovcnt = 1; ++ virtio_res->rsp = &rsp; ++ virtio_res->rsp_len = sizeof(struct fuse_out_header); ++ ssam_io_complete(0, &resp); ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++int ++ssam_fs_construct(struct ssam_fs_construct_info *info) ++{ ++ struct fuse_session *se = NULL; ++ struct fuse_cmdline_opts opts = { 0 }; ++ int ret = -1; ++ struct fuse_custom_io vio_io = { ++ .read = fuse_udaa_read, ++ .writev = fuse_udaa_writev ++ }; ++ struct lo_data *lo = &lo_map[info->gfunc_id]; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = false, ++ }; ++ struct spdk_ssam_fs_session *fsmsession = NULL; ++ ++ ssam_lock(); ++ ++ ret = ssam_check_contrller_info(info); ++ if (ret != 0) { ++ ssam_unlock(); ++ return ret; ++ } ++ ++ ret = ssam_create_fs_session(info, lo); ++ if (ret != 0) { ++ goto err_out1; ++ } ++ ++ /* ssam -s -f */ ++ char *argv[SSAM_FUSE_ARGS_NUM] = { "ssam", "-s", "-f" }; ++ struct fuse_args args = FUSE_ARGS_INIT(SSAM_FUSE_ARGS_NUM, argv); ++ ++ ret = fuse_parse_cmdline(&args, &opts); ++ if (ret != 0) { ++ goto err_out1; ++ } ++ ++ ret = ssam_init_lo_data(info, lo); ++ if (ret != 0) { ++ goto err_out1; ++ } ++ ++ se = fuse_session_new(&args, &lo_oper, sizeof(lo_oper), lo); ++ if (se == NULL) { ++ SPDK_ERRLOG("fuse_session_new failed\n"); ++ ret = -1; ++ goto err_out1; ++ } ++ ++ ret = fuse_session_custom_io(se, &vio_io, lo->root.fd); ++ if (ret != 0) { ++ goto err_out2; ++ } ++ ++ ssam_update_virtio_device_used(info->gfunc_id, 1); ++ ++ if (g_ssam_fs_poller_ctx.pf_poller[info->gfunc_id] != NULL) { ++ spdk_poller_unregister(&g_ssam_fs_poller_ctx.pf_poller[info->gfunc_id]); ++ g_ssam_fs_poller_ctx.pf_poller[info->gfunc_id] = NULL; ++ } ++ fuse_daemonize(opts.foreground); ++ se->mountpoint = lo->source; ++ lo->se = se; ++ ++ for (int i = 0; i < SSAM_FS_LCORE_ID_MAX; i++) { ++ if (lo->smsession[i] != NULL) { ++ fsmsession = (struct spdk_ssam_fs_session *)lo->smsession[i]; ++ fsmsession->fbuf.mem = spdk_zmalloc(se->bufsize, SSAM_FS_DEFAULT_ALIGN, NULL, SPDK_ENV_LCORE_ID_ANY, ++ SPDK_MALLOC_DMA); ++ if (fsmsession->fbuf.mem == NULL) { ++ SPDK_ERRLOG("fbuf malloc failed\n"); ++ ret = -ENOMEM; ++ goto err_out2; ++ } ++ ssam_send_event_to_session(lo->smsession[i], ssam_fs_reactor_loop_start, NULL, send_event_flag, ++ NULL); ++ } ++ } ++ ++ SPDK_NOTICELOG("fs controller %u is created by %u threads\n", info->gfunc_id, info->max_threads); ++ ssam_unlock(); ++ return 0; ++ ++err_out2: ++ fuse_session_destroy(se); ++err_out1: ++ ssam_free_lo_data(lo); ++ ssam_unlock(); ++ return ret; ++} ++ ++int ++ssam_fs_destory(char *name, bool force, void *request, ++ spdk_ssam_session_rsp_fn rpc_ssam_send_response_cb) ++{ ++ char shm_name[SHM_NAME] = {0}; ++ ++ for (int i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ if (lo_map[i].name != NULL && strcmp(lo_map[i].name, name) == 0) { ++ if (lo_map[i].mounted == true && force != true) { ++ SPDK_ERRLOG("fs controller %u is busy\n", lo_map[i].gfunc_id); ++ return -EBUSY; ++ } ++ SPDK_NOTICELOG("fs controller %u removing, the force flag is %d\n", lo_map[i].gfunc_id, force); ++ snprintf(shm_name, sizeof(shm_name), "shm_name%d", lo_map[i].gfunc_id); ++ shm_unlink(shm_name); ++ lo_map[i].delete_flag = true; ++ lo_map[i].rsp_ctx = request; ++ lo_map[i].rsp_fn = rpc_ssam_send_response_cb; ++ g_ssam_fs_poller_ctx.pf_poller_ctx[lo_map[i].gfunc_id].restart_flag = false; ++ g_ssam_fs_poller_ctx.pf_poller[lo_map[i].gfunc_id] = SPDK_POLLER_REGISTER(ssam_dev_io_scan_poller, ++ &g_ssam_fs_poller_ctx.pf_poller_ctx[lo_map[i].gfunc_id], 0); ++ ssam_update_virtio_device_used(lo_map[i].gfunc_id, 0); ++ return 0; ++ } ++ } ++ ++ return -ENODEV; ++} ++ ++static int ssam_fs_flr_poller(void *flr_map) ++{ ++ int i; ++ uint8_t *flr_map_p = NULL; ++ ++ flr_map_p = (uint8_t *)flr_map; ++ for (i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ if (lo_map[i].used == true && lo_map[i].flr_seq != *flr_map_p && lo_map[i].flr_seq != UINT32_MAX) { ++ if (lo_map[i].mounted == true && lo_map[i].se != NULL) { ++ lo_destroy((void *)&lo_map[i]); ++ } ++ lo_map[i].flr_seq = *flr_map_p; ++ } ++ flr_map_p++; ++ } ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++int spdk_ssam_fs_poller_init(void) ++{ ++ int i; ++ ++ if (ssam_get_virtio_fs_enable() == false) { ++ return 0; ++ } ++ ++ for (i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ pthread_mutex_init(&g_ssam_fs_poller_ctx.poll_mutex[i], NULL); ++ } ++ ++ g_ssam_fs_poller_ctx.flr_fd = open(SSAM_FS_FLR_SEQ_PATH, O_RDONLY); ++ if (g_ssam_fs_poller_ctx.flr_fd == -1) { ++ return -1; ++ } ++ ++ g_ssam_fs_poller_ctx.flr_map = mmap(NULL, SSAM_HOSTEP_NUM_MAX * sizeof(uint8_t), PROT_READ, ++ MAP_SHARED, ++ g_ssam_fs_poller_ctx.flr_fd, 0); ++ if (g_ssam_fs_poller_ctx.flr_map == MAP_FAILED) { ++ close(g_ssam_fs_poller_ctx.flr_fd); ++ return -1; ++ } ++ ++ if (g_ssam_fs_poller_ctx.flr_seq_poller == NULL) { ++ g_ssam_fs_poller_ctx.flr_seq_poller = SPDK_POLLER_REGISTER(ssam_fs_flr_poller, ++ g_ssam_fs_poller_ctx.flr_map, ++ SSAM_FS_FLR_POLLER_PERIOD); ++ } ++ ++ for (int i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ if (ssam_get_virtio_type(i) != SSAM_DEVICE_VIRTIO_FS) { ++ continue; ++ } ++ g_ssam_fs_poller_ctx.pf_poller_ctx[i].func_id = i; ++ g_ssam_fs_poller_ctx.pf_poller_ctx[i].restart_flag = true; ++ g_ssam_fs_poller_ctx.pf_poller[i] = SPDK_POLLER_REGISTER(ssam_dev_io_scan_poller, ++ &g_ssam_fs_poller_ctx.pf_poller_ctx[i], 0); ++ } ++ ++ return 0; ++} ++ ++void spdk_ssam_fs_poller_destroy(void) ++{ ++ if (g_ssam_fs_poller_ctx.flr_seq_poller != NULL) { ++ spdk_poller_unregister(&g_ssam_fs_poller_ctx.flr_seq_poller); ++ g_ssam_fs_poller_ctx.flr_seq_poller = NULL; ++ } ++ ++ if (g_ssam_fs_poller_ctx.flr_map != MAP_FAILED) { ++ munmap(g_ssam_fs_poller_ctx.flr_map, SSAM_HOSTEP_NUM_MAX * sizeof(uint8_t)); ++ } ++ ++ if (g_ssam_fs_poller_ctx.flr_fd >= 0) { ++ close(g_ssam_fs_poller_ctx.flr_fd); ++ } ++ ++ for (int i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ if (ssam_get_virtio_type(i) != SSAM_DEVICE_VIRTIO_FS) { ++ continue; ++ } ++ if (g_ssam_fs_poller_ctx.pf_poller[i] != NULL) { ++ spdk_poller_unregister(&g_ssam_fs_poller_ctx.pf_poller[i]); ++ g_ssam_fs_poller_ctx.pf_poller[i] = NULL; ++ } ++ } ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_fs) +diff --git a/lib/ssam/ssam_fs_internal.h b/lib/ssam/ssam_fs_internal.h +new file mode 100644 +index 0000000..869454d +--- /dev/null ++++ b/lib/ssam/ssam_fs_internal.h +@@ -0,0 +1,530 @@ ++#ifndef SSAM_FS_INTERNAL_H ++#define SSAM_FS_INTERNAL_H ++ ++#define FUSE_USE_VERSION 34 ++ ++#include "spdk/stdinc.h" ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "fuse3/fuse.h" ++#include "fuse3/fuse_lowlevel.h" ++ ++#include "ssam_driver/dpak_ssam.h" ++#include "ssam_internal.h" ++ ++/* We are re-using pointers to our `struct lo_inode` and `struct ++ lo_dirp` elements as inodes. This means that we must be able to ++ store uintptr_t values in a fuse_ino_t variable. The following ++ incantation checks this condition at compile time. */ ++#if defined(__GNUC__) && (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 6) && !defined __cplusplus ++_Static_assert(sizeof(fuse_ino_t) >= sizeof(uintptr_t), ++ "fuse_ino_t too small to hold uintptr_t values!"); ++#else ++struct _uintptr_to_must_hold_fuse_ino_t_dummy_struct { ++unsigned _uintptr_to_must_hold_fuse_ino_t : ++ ((sizeof(fuse_ino_t) >= sizeof(uintptr_t)) ? 1 : -1); ++}; ++#endif ++ ++#define SSAM_FS_DEFAULT_THREADS 4 ++#define SSAM_FS_LCORE_ID_MAX 24 ++#define SSAM_FUSE_ARGS_NUM 3 ++#define SSAM_FS_FLR_POLLER_PERIOD (10 * 1000 * 1000) /* 10s */ ++#define SSAM_FS_FLR_SEQ_PATH "/proc/vbs/flr_seq" ++#define SSAM_FS_BUF_LEN 256 ++#define SSAM_FS_MAX_PAGES 104 ++#define SSAM_FS_STATIC_BUF_SIZE (PATH_MAX + 1) ++#define SSAM_FS_DEFAULT_ALIGN 0x1000 ++#define SSAM_FS_REF_COUNT 2 ++ ++#define ST_ATIM_NSEC(stbuf) 0 ++#define ST_CTIM_NSEC(stbuf) 0 ++#define ST_MTIM_NSEC(stbuf) 0 ++ ++#define FOPEN_DIRECT_IO (1 << 0) ++#define FOPEN_KEEP_CACHE (1 << 1) ++#define FOPEN_NONSEEKABLE (1 << 2) ++#define FOPEN_CACHE_DIR (1 << 3) ++#define FOPEN_STREAM (1 << 4) ++#define FOPEN_NOFLUSH (1 << 5) ++#define FOPEN_PARALLEL_DIRECT_WRITES (1 << 6) ++ ++#define SHM_SIZE 4096 ++#define SHM_NAME 32 ++#define MEM_MAJOR 7 ++#define MEM_MINOR 40 ++ ++/* UDAA Error provides information regarding different errors caused while using the UDAA libraries. */ ++ ++typedef enum udaa_error { ++ UDAA_SUCCESS, ++ UDAA_ERROR_UNKNOWN, ++ UDAA_ERROR_NOT_PERMITTED, /**< Operation not permitted */ ++ UDAA_ERROR_IN_USE, /**< Resource already in use */ ++ UDAA_ERROR_NOT_SUPPORTED, /**< Operation not supported */ ++ UDAA_ERROR_WAIT, /**< Resource temporarily unavailable, try again */ ++ UDAA_ERROR_INVALID_VALUE, /**< Invalid input */ ++ UDAA_ERROR_NO_MEMORY, /**< Memory allocation failure */ ++ UDAA_ERROR_INITIALIZATION, /**< Resource initialization failure */ ++ UDAA_ERROR_TIME_OUT, /**< Timer expired waiting for resource */ ++ UDAA_ERROR_NOT_FOUND, /**< Resource Not Found */ ++ UDAA_ERROR_IO_FAILED, /**< Input/Output Operation Failed */ ++ UDAA_ERROR_BAD_STATE, /**< Bad State */ ++ UDAA_ERROR_AGAIN, /**< No element is available, try again later */ ++} udaa_error_t; ++ ++/** ++ * @brief Specifies the device type for udaa representor device ++ * ++ */ ++enum udaa_pci_func_type { ++ UDAA_PCI_FUNC_PF = 0, /* physical function */ ++ UDAA_PCI_FUNC_VF, /* virtual function */ ++ UDAA_PCI_FUNC_SF, /* sub function */ ++}; ++ ++/** ++ * @brief Specifies the PCI function type for udaa representor device ++ * ++ */ ++typedef enum udaa_func_emulation_type { ++ UDAA_PCI_FUNC_NVME, /* nvme emulation function */ ++ UDAA_PCI_FUNC_VIRTIO_NET, /* virtio-net emulation function */ ++ UDAA_PCI_FUNC_VIRTIO_BLK, /* virtio-blk emulation function */ ++ UDAA_PCI_FUNC_VIRTIO_SCSI, /* virtio-blk emulation function */ ++ UDAA_PCI_FUNC_VIRTIO_VSOCK, /* virtio-blk emulation function */ ++ UDAA_PCI_FUNC_VIRTIO_FS, /* virtio-fs emulation function */ ++} udaa_eml_type_t; ++ ++struct udaa_emlq { ++ uint16_t queue_id; ++ udaa_eml_type_t eml_type; ++ struct ssam_request **vmio_req; ++}; ++ ++#define VIRTIO_FS_CFG_TAG_SIZE 36 ++ ++struct udaa_dev_fs_cfg { ++ /* Name associated with FS */ ++ uint8_t tag[VIRTIO_FS_CFG_TAG_SIZE]; ++ ++ /* Total number of VQs exposed by the device */ ++ uint32_t num_request_queues; ++ ++ /* Minimum byte size of each buffer in the notification queue, if such is supported */ ++ uint32_t notify_buf_min_size_bytes; ++}; ++ ++struct udaa_dev_info { ++ char sn[16]; ++ ++ uint16_t dev_id; ++ uint16_t dpdk_tid; ++ uint16_t modern; /* not used */ ++ ++ uint16_t vq_size; ++ uint16_t num_vqs; ++ ++ uint16_t func_id; /* global function id */ ++ uint16_t pf_id; /* pf id */ ++ uint16_t vf_num; /* if is pf */ ++ ++ uint16_t pf_idx; ++ uint16_t pf_configured; ++ ++ enum udaa_pci_func_type pci_func_type; ++ uint16_t func_eml_type; ++ ++ struct udaa_dev_fs_cfg fs_cfg; ++}; ++ ++struct udaa_eml_dev { ++ uint16_t pf_idx; ++ struct udaa_dev_info dev_info; ++}; ++ ++/** ++ * @brief Emulation request structure describes request created by the emulation device consumer and delivered by correspondendt context. ++ * ++ * UDAA Job layout ++ * ++ * SDK job --> +--------------------------+ ++ * | UDAA Job (base) | ++ * | type | ++ * | ctx | ++ * | | ++ * +------------+-------------+ <-- request arguments ++ * | | variable size ++ * | arguments | library specific ++ * | . | structure ++ * | . | ++ * | . | ++ * | . | ++ * | . | ++ * | . | ++ * | | ++ * +------------+-------------+ ++ */ ++struct mount_info { ++ size_t size; ++ enum fuse_buf_flags flags; ++ int fd; ++ off_t pos; ++ uint32_t len; ++ uint32_t opcode; ++ uint64_t unique; ++ uint64_t nodeid; ++ uint32_t uid; ++ uint32_t gid; ++ uint32_t pid; ++ uint16_t total_extlen; /* length of extensions in 8byte units */ ++ uint16_t padding; ++}; ++ ++struct fuse_init_in { ++ uint32_t major; ++ uint32_t minor; ++ uint32_t max_readahead; ++ uint32_t flags; ++ uint32_t flags2; ++ uint32_t unused[11]; ++}; ++ ++struct udaa_eml_req { ++ int type; /* < Defines the type of the request. */ ++ struct udaa_eml_dev *eml_dev; /* < UDAA Emulation device that executes the request. */ ++ struct udaa_buf **in_buf; /* < Data_in buffers > */ ++ uint32_t in_buf_num; /* < Number of in_buf > */ ++ struct udaa_buf **out_buf; /* < Data_out buffers(the first one is for the fuse_out_header) > */ ++ uint32_t out_buf_num; /* < Number of out_buf > */ ++ void *buf; /* < Consecutive buffer for application usage > */ ++ size_t buf_len; /* < Consecutive buffer size > */ ++}; ++ ++enum host_dma_mode { ++ READ_HOST_MODE = 0, /* *< read host data and write to SPU */ ++ WRITE_HOST_MODE = 1, /* *< write data to host */ ++ HOST_DMA_MODE_MAX ++}; ++ ++struct lo_inode { ++ struct lo_inode *next; /* protected by lo->mutex */ ++ struct lo_inode *prev; /* protected by lo->mutex */ ++ int fd; ++ ino_t ino; ++ dev_t dev; ++ uint64_t refcount; /* protected by lo->mutex */ ++}; ++ ++enum { ++ CACHE_NEVER, ++ CACHE_NORMAL, ++ CACHE_ALWAYS, ++}; ++ ++struct lo_data { ++ pthread_mutex_t mutex; ++ pthread_mutex_t exit_mutex; ++ int writeback; ++ int flock; ++ int xattr; ++ char *source; ++ double timeout; ++ int cache; ++ int timeout_set; ++ struct lo_inode root; /* protected by lo->mutex */ ++ ++ struct udaa_emlq **udaa_fs_queues; ++ uint32_t num_queues; ++ struct udaa_eml_req *fs_reqs; ++ struct fuse_session *se; ++ struct spdk_ssam_session *smsession[SSAM_FS_LCORE_ID_MAX]; ++ uint16_t gfunc_id; ++ uint16_t exit_num; ++ bool mounted; ++ bool used; ++ char *name; ++ char *dbdf; ++ uint32_t flr_seq; ++ bool have_shm; ++ bool load_shm_flag; ++ bool delete_flag; ++ ++ spdk_ssam_session_rsp_fn rsp_fn; ++ void *rsp_ctx; ++}; ++ ++struct fuse_out_header { ++ uint32_t len; ++ int32_t error; ++ uint64_t unique; ++}; ++ ++struct fuse_attr { ++ uint64_t ino; ++ uint64_t size; ++ uint64_t blocks; ++ uint64_t atime; ++ uint64_t mtime; ++ uint64_t ctime; ++ uint32_t atimensec; ++ uint32_t mtimensec; ++ uint32_t ctimensec; ++ uint32_t mode; ++ uint32_t nlink; ++ uint32_t uid; ++ uint32_t gid; ++ uint32_t rdev; ++ uint32_t blksize; ++ uint32_t flags; ++}; ++ ++#define SSAM_FUSE_COMPAT_ATTR_OUT_SIZE 96 ++ ++struct fuse_attr_out { ++ uint64_t attr_valid; /* Cache timeout for the attributes */ ++ uint32_t attr_valid_nsec; ++ uint32_t dummy; ++ struct fuse_attr attr; ++}; ++ ++#define SSAM_FUSE_COMPAT_ENTRY_OUT_SIZE 120 ++ ++struct fuse_entry_out { ++ uint64_t nodeid; /* Inode ID */ ++ uint64_t generation; /* Inode generation: nodeid:gen must ++ be unique for the fs's lifetime */ ++ uint64_t entry_valid; /* Cache timeout for the name */ ++ uint64_t attr_valid; /* Cache timeout for the attributes */ ++ uint32_t entry_valid_nsec; ++ uint32_t attr_valid_nsec; ++ struct fuse_attr attr; ++}; ++ ++struct fuse_open_out { ++ uint64_t fh; ++ uint32_t open_flags; ++ uint32_t padding; ++}; ++ ++struct fuse_write_out { ++ uint32_t size; ++ uint32_t padding; ++}; ++ ++struct fuse_kstatfs { ++ uint64_t blocks; ++ uint64_t bfree; ++ uint64_t bavail; ++ uint64_t files; ++ uint64_t ffree; ++ uint32_t bsize; ++ uint32_t namelen; ++ uint32_t frsize; ++ uint32_t padding; ++ uint32_t spare[6]; ++}; ++ ++#define SSAM_FUSE_COMPAT_STATFS_SIZE 48 ++ ++struct fuse_statfs_out { ++ struct fuse_kstatfs st; ++}; ++ ++struct fuse_getxattr_out { ++ uint32_t size; ++ uint32_t padding; ++}; ++ ++struct fuse_lseek_out { ++ uint64_t offset; ++}; ++ ++struct fuse_req { ++ struct fuse_session *se; ++ uint64_t unique; ++ int ctr; ++ pthread_mutex_t lock; ++ struct fuse_ctx ctx; ++ struct fuse_chan *ch; ++ int interrupted; ++ unsigned int ioctl_64bit : 1; ++ union { ++ struct { ++ uint64_t unique; ++ } i; ++ struct { ++ fuse_interrupt_func_t func; ++ void *data; ++ } ni; ++ } u; ++ struct fuse_req *next; ++ struct fuse_req *prev; ++}; ++ ++struct fuse_notify_req { ++ uint64_t unique; ++ void (*reply)(struct fuse_notify_req *, fuse_req_t, fuse_ino_t, const void *, ++ const struct fuse_buf *); ++ struct fuse_notify_req *next; ++ struct fuse_notify_req *prev; ++}; ++ ++struct fuse_session { ++ char *mountpoint; ++ volatile int exited; ++ int fd; ++ struct fuse_custom_io *io; ++ struct mount_opts *mo; ++ int debug; ++ int deny_others; ++ struct fuse_lowlevel_ops op; ++ int got_init; ++ struct cuse_data *cuse_data; ++ void *userdata; ++ uid_t owner; ++ struct fuse_conn_info conn; ++ struct fuse_req list; ++ struct fuse_req interrupts; ++ pthread_mutex_t lock; ++ int got_destroy; ++ pthread_key_t pipe_key; ++ int broken_splice_nonblock; ++ uint64_t notify_ctr; ++ struct fuse_notify_req notify_list; ++ size_t bufsize; ++ int error; ++}; ++ ++struct fuse_chan { ++ pthread_mutex_t lock; ++ int ctr; ++ int fd; ++}; ++ ++struct fuse_release_in { ++ uint64_t fh; ++ uint32_t flags; ++ uint32_t release_flags; ++ uint64_t lock_owner; ++}; ++ ++#define SSAM_FUSE_OPCODE_READ 15 ++#define SSAM_FUSE_OPCODE_WRITE 16 ++#define SSAM_FUSE_OPCODE_RELEASE 18 ++#define SSAM_FUSE_OPCODE_FLUSH 25 ++#define SSAM_FUSE_OPCODE_INIT 26 ++#define SSAM_FUSE_OPCODE_RELEASEDIR 29 ++#define SSAM_FUSE_OPCODE_DESTROY 38 ++ ++struct fuse_in_header { ++ uint32_t len; ++ uint32_t opcode; ++ uint64_t unique; ++ uint64_t nodeid; ++ uint32_t uid; ++ uint32_t gid; ++ uint32_t pid; ++ uint16_t total_extlen; /* length of extensions in 8byte units */ ++ uint16_t padding; ++}; ++ ++struct fuse_init_out { ++ uint32_t major; ++ uint32_t minor; ++ uint32_t max_readahead; ++ uint32_t flags; ++ uint16_t max_background; ++ uint16_t congestion_threshold; ++ uint32_t max_write; ++ uint32_t time_gran; ++ uint16_t max_pages; ++ uint16_t map_alignment; ++ uint32_t flags2; ++ uint32_t unused[7]; ++}; ++ ++struct ssam_fs_stat { ++ uint64_t bytes_read; /* Read Bytes */ ++ uint64_t num_read_ops; /* Read IO */ ++ uint64_t bytes_written; /* Write Bytes */ ++ uint64_t num_write_ops; /* Write IO */ ++ uint64_t read_latency_ticks; ++ uint64_t write_latency_ticks; ++ ++ uint64_t complete_read_ios; /* Number of successfully completed read requests, */ ++ uint64_t err_read_ios; /* Number of failed completed read requests, */ ++ uint64_t complete_write_ios; /* Number of successfully completed write requests, */ ++ uint64_t err_write_ios; /* Number of failed completed write requests, */ ++ uint64_t flush_ios; /* Total number of flush requests, */ ++ uint64_t complete_flush_ios; /* Number of successfully completed flush requests, */ ++ uint64_t err_flush_ios; /* Number of failed completed flush requests, */ ++ uint64_t fatal_ios; ++ uint64_t other_ios; ++ ++ uint64_t start_tsc; ++ uint64_t complete_start_tsc; ++ uint64_t complete_end_tsc; ++ ++ uint32_t op_type; ++ uint32_t payload_size; ++ __virtio32 type; ++}; ++ ++struct spdk_ssam_fs_session { ++ struct spdk_ssam_session smsession; ++ struct spdk_poller *fs_poller; ++ struct fuse_buf fbuf; ++ struct fuse_out_header iov_header; ++ ++ char *dynamic_buf; /* for read/write IO, which size is variable. */ ++ char *static_buf; /* for most control IO, which size is 4096. */ ++ ssize_t in_len; ++ ++ struct iovec ++ *src_iov; /* for asynchronous dma in fuse_udaa_writev, which iovcnt is variable. */ ++ struct iovec dst_iov; /* for asynchronous dma in fuse_udaa_read, which iovcnt is 1. */ ++ bool fbuf_used; ++ bool need_write_config; ++ struct lo_data *p_lo; ++ struct ssam_fs_stat fs_stat; ++}; ++ ++struct spdk_ssam_dev_io_scan_poller_ctx { ++ uint8_t func_id; ++ bool restart_flag; ++}; ++ ++struct spdk_ssam_fs_poller_ctx { ++ struct spdk_poller *flr_seq_poller; ++ int flr_fd; ++ void *flr_map; ++ struct spdk_poller *pf_poller[SSAM_HOSTEP_NUM_MAX]; ++ pthread_mutex_t poll_mutex[SSAM_HOSTEP_NUM_MAX]; ++ struct spdk_ssam_dev_io_scan_poller_ctx pf_poller_ctx[SSAM_HOSTEP_NUM_MAX]; ++}; ++ ++int ssam_fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e, ++ struct spdk_ssam_fs_session *fsmsession); ++int ssam_fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f, ++ struct spdk_ssam_fs_session *fsmsession); ++int ssam_fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, ++ const struct fuse_file_info *f, ++ struct spdk_ssam_fs_session *fsmsession); ++int ssam_fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout, ++ struct spdk_ssam_fs_session *fsmsession); ++int ssam_fuse_reply_write(fuse_req_t req, size_t count, struct spdk_ssam_fs_session *fsmsession); ++int ssam_fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf, ++ struct spdk_ssam_fs_session *fsmsession); ++int ssam_fuse_reply_xattr(fuse_req_t req, size_t count, struct spdk_ssam_fs_session *fsmsession); ++int ssam_fuse_reply_lseek(fuse_req_t req, off_t off, struct spdk_ssam_fs_session *fsmsession); ++int ssam_fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags, ++ struct spdk_ssam_fs_session *fsmsession); ++ ++#endif ++/* SSAM_FS_INTERNAL_H */ +diff --git a/lib/ssam/ssam_fuse_adapter.c b/lib/ssam/ssam_fuse_adapter.c +new file mode 100644 +index 0000000..5a9191d +--- /dev/null ++++ b/lib/ssam/ssam_fuse_adapter.c +@@ -0,0 +1,455 @@ ++/* - ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "spdk/env.h" ++ ++#include "ssam_fs_internal.h" ++ ++static size_t pagesize; ++ ++static __attribute__((constructor)) void ssam_fuse_ll_init_pagesize(void) ++{ ++ pagesize = getpagesize(); ++} ++ ++static size_t iov_length(const struct iovec *iov, size_t count) ++{ ++ size_t seg; ++ size_t ret = 0; ++ ++ for (seg = 0; seg < count; seg++) { ++ ret += iov[seg].iov_len; ++ } ++ return ret; ++} ++ ++static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr) ++{ ++ attr->ino = stbuf->st_ino; ++ attr->mode = stbuf->st_mode; ++ attr->nlink = stbuf->st_nlink; ++ attr->uid = stbuf->st_uid; ++ attr->gid = stbuf->st_gid; ++ attr->rdev = stbuf->st_rdev; ++ attr->size = stbuf->st_size; ++ attr->blksize = stbuf->st_blksize; ++ attr->blocks = stbuf->st_blocks; ++ attr->atime = stbuf->st_atime; ++ attr->mtime = stbuf->st_mtime; ++ attr->ctime = stbuf->st_ctime; ++ attr->atimensec = ST_ATIM_NSEC(stbuf); ++ attr->mtimensec = ST_MTIM_NSEC(stbuf); ++ attr->ctimensec = ST_CTIM_NSEC(stbuf); ++} ++ ++static unsigned long calc_timeout_sec(double t) ++{ ++ if (t > (double)ULONG_MAX) { ++ return ULONG_MAX; ++ } else if (t < 0.0) { ++ return 0; ++ } else { ++ return (unsigned long)t; ++ } ++} ++ ++static unsigned int calc_timeout_nsec(double t) ++{ ++ double f = t - (double)calc_timeout_sec(t); ++ if (f < 0.0) { ++ return 0; ++ } else if (f >= 0.999999999) { ++ return 999999999; ++ } else { ++ return (unsigned int)(f * 1.0e9); ++ } ++} ++ ++static void fill_entry(struct fuse_entry_out *arg, const struct fuse_entry_param *e) ++{ ++ arg->nodeid = e->ino; ++ arg->generation = e->generation; ++ arg->entry_valid = calc_timeout_sec(e->entry_timeout); ++ arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout); ++ arg->attr_valid = calc_timeout_sec(e->attr_timeout); ++ arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout); ++ convert_stat(&e->attr, &arg->attr); ++} ++ ++static void fill_open(struct fuse_open_out *arg, const struct fuse_file_info *f) ++{ ++ arg->fh = f->fh; ++ if (f->direct_io) { ++ arg->open_flags |= FOPEN_DIRECT_IO; ++ } ++ if (f->keep_cache) { ++ arg->open_flags |= FOPEN_KEEP_CACHE; ++ } ++ if (f->cache_readdir) { ++ arg->open_flags |= FOPEN_CACHE_DIR; ++ } ++ if (f->nonseekable) { ++ arg->open_flags |= FOPEN_NONSEEKABLE; ++ } ++ if (f->noflush) { ++ arg->open_flags |= FOPEN_NOFLUSH; ++ } ++ if (f->parallel_direct_writes) { ++ arg->open_flags |= FOPEN_PARALLEL_DIRECT_WRITES; ++ } ++} ++ ++static void list_del_req(struct fuse_req *req) ++{ ++ struct fuse_req *prev = req->prev; ++ struct fuse_req *next = req->next; ++ prev->next = next; ++ next->prev = prev; ++} ++ ++static void ssam_fuse_chan_put(struct fuse_chan *ch) ++{ ++ if (ch == NULL) { ++ return; ++ } ++ pthread_mutex_lock(&ch->lock); ++ ch->ctr--; ++ if (!ch->ctr) { ++ pthread_mutex_unlock(&ch->lock); ++ close(ch->fd); ++ pthread_mutex_destroy(&ch->lock); ++ free(ch); ++ } else { ++ pthread_mutex_unlock(&ch->lock); ++ } ++} ++ ++static void destroy_req(fuse_req_t req) ++{ ++ assert(req->ch == NULL); ++ pthread_mutex_destroy(&req->lock); ++ free(req); ++} ++ ++static void ssam_fuse_free_req(fuse_req_t req) ++{ ++ int ctr; ++ struct fuse_session *se = req->se; ++ ++ pthread_mutex_lock(&se->lock); ++ req->u.ni.func = NULL; ++ req->u.ni.data = NULL; ++ list_del_req(req); ++ ctr = --req->ctr; ++ ssam_fuse_chan_put(req->ch); ++ req->ch = NULL; ++ pthread_mutex_unlock(&se->lock); ++ if (!ctr) { ++ destroy_req(req); ++ } ++} ++ ++/* Send data. If *ch* is NULL, send via session master fd */ ++static int ssam_fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch, struct iovec *iov, ++ int count) ++{ ++ struct fuse_out_header *out = iov[0].iov_base; ++ ++ assert(se != NULL); ++ out->len = iov_length(iov, count); ++ if (se->debug) { ++ if (out->unique == 0) { ++ SPDK_INFOLOG(ssam_fs, "NOTIFY: code=%d length=%u\n", out->error, out->len); ++ } else if (out->error) { ++ SPDK_INFOLOG(ssam_fs, "unique: %llu, error: %i (%s), outsize: %i\n", ++ (unsigned long long)out->unique, ++ out->error, strerror(-out->error), out->len); ++ } else { ++ SPDK_INFOLOG(ssam_fs, "unique: %llu, success, outsize: %i\n", (unsigned long long)out->unique, ++ out->len); ++ } ++ } ++ ++ ssize_t res; ++ if (se->io != NULL) { ++ /* se->io->writev is never NULL if se->io is not NULL as ++ specified by fuse_session_custom_io() */ ++ res = se->io->writev(ch ? ch->fd : se->fd, iov, count, se->userdata); ++ } else { ++ res = writev(ch ? ch->fd : se->fd, iov, count); ++ } ++ ++ int err = errno; ++ ++ if (res == -1) { ++ /* ENOENT means the operation was interrupted */ ++ if (!fuse_session_exited(se) && err != ENOENT) { ++ SPDK_ERRLOG("fuse: writing device"); ++ } ++ return -err; ++ } ++ ++ return 0; ++} ++ ++static int ssam_fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov, int count) ++{ ++ struct fuse_out_header out; ++ ++#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 32 ++ const char *str = strerrordesc_np(error * -1); ++ if ((str == NULL && error != 0) || error > 0) { ++#else ++ if (error <= -1000 || error > 0) { ++#endif ++ SPDK_ERRLOG("fuse: bad error value: %i\n", error); ++ error = -ERANGE; ++ } ++ ++ out.unique = req->unique; ++ out.error = error; ++ ++ iov[0].iov_base = &out; ++ iov[0].iov_len = sizeof(struct fuse_out_header); ++ ++ return ssam_fuse_send_msg(req->se, req->ch, iov, count); ++} ++ ++static int ssam_fuse_send_reply_iov(fuse_req_t req, int error, struct iovec *iov, int count) ++{ ++ int res; ++ ++ res = ssam_fuse_send_reply_iov_nofree(req, error, iov, count); ++ ssam_fuse_free_req(req); ++ return res; ++} ++ ++static int ssam_fuse_send_reply(fuse_req_t req, int error, const void *arg, size_t argsize) ++{ ++ struct iovec iov[2]; ++ int count = 1; ++ if (argsize) { ++ iov[1].iov_base = (void *)arg; ++ iov[1].iov_len = argsize; ++ count++; ++ } ++ return ssam_fuse_send_reply_iov(req, error, iov, count); ++} ++ ++static int ssam_fuse_send_reply_ok(fuse_req_t req, const void *arg, size_t argsize) ++{ ++ return ssam_fuse_send_reply(req, 0, arg, argsize); ++} ++ ++int ssam_fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e, ++ struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct fuse_entry_out *arg = (struct fuse_entry_out *)fsmsession->static_buf; ++ size_t size = req->se->conn.proto_minor < 9 ? SSAM_FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(*arg); ++ ++ /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant ++ negative entry */ ++ if (!e->ino && req->se->conn.proto_minor < 4) { ++ return fuse_reply_err(req, ENOENT); ++ } ++ ++ memset(arg, 0, sizeof(*arg)); ++ fill_entry(arg, e); ++ return ssam_fuse_send_reply_ok(req, arg, size); ++} ++ ++int ssam_fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f, ++ struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct fuse_open_out *arg = (struct fuse_open_out *)fsmsession->static_buf; ++ ++ memset(arg, 0, sizeof(*arg)); ++ fill_open(arg, f); ++ return ssam_fuse_send_reply_ok(req, arg, sizeof(*arg)); ++} ++ ++int ssam_fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, ++ const struct fuse_file_info *f, ++ struct spdk_ssam_fs_session *fsmsession) ++{ ++ size_t entrysize = req->se->conn.proto_minor < 9 ? SSAM_FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof( ++ struct fuse_entry_out); ++ struct fuse_entry_out *earg = (struct fuse_entry_out *) fsmsession->static_buf; ++ struct fuse_open_out *oarg = (struct fuse_open_out *)(fsmsession->static_buf + entrysize); ++ ++ memset(fsmsession->static_buf, 0, sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)); ++ fill_entry(earg, e); ++ fill_open(oarg, f); ++ return ssam_fuse_send_reply_ok(req, fsmsession->static_buf, ++ entrysize + sizeof(struct fuse_open_out)); ++} ++ ++int ssam_fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout, ++ struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct fuse_attr_out *arg = (struct fuse_attr_out *)fsmsession->static_buf; ++ size_t size = req->se->conn.proto_minor < 9 ? SSAM_FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(*arg); ++ ++ memset(arg, 0, sizeof(*arg)); ++ arg->attr_valid = calc_timeout_sec(attr_timeout); ++ arg->attr_valid_nsec = calc_timeout_nsec(attr_timeout); ++ convert_stat(attr, &arg->attr); ++ ++ return ssam_fuse_send_reply_ok(req, arg, size); ++} ++ ++int ssam_fuse_reply_write(fuse_req_t req, size_t count, struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct fuse_write_out *arg = (struct fuse_write_out *)fsmsession->static_buf; ++ ++ memset(arg, 0, sizeof(*arg)); ++ arg->size = count; ++ ++ return ssam_fuse_send_reply_ok(req, arg, sizeof(*arg)); ++} ++ ++static void convert_statfs(const struct statvfs *stbuf, struct fuse_kstatfs *kstatfs) ++{ ++ kstatfs->bsize = stbuf->f_bsize; ++ kstatfs->frsize = stbuf->f_frsize; ++ kstatfs->blocks = stbuf->f_blocks; ++ kstatfs->bfree = stbuf->f_bfree; ++ kstatfs->bavail = stbuf->f_bavail; ++ kstatfs->files = stbuf->f_files; ++ kstatfs->ffree = stbuf->f_ffree; ++ kstatfs->namelen = stbuf->f_namemax; ++} ++ ++int ssam_fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf, ++ struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct fuse_statfs_out *arg = (struct fuse_statfs_out *)fsmsession->static_buf; ++ size_t size = req->se->conn.proto_minor < 4 ? SSAM_FUSE_COMPAT_STATFS_SIZE : sizeof(*arg); ++ ++ memset(arg, 0, sizeof(*arg)); ++ convert_statfs(stbuf, &arg->st); ++ ++ return ssam_fuse_send_reply_ok(req, arg, size); ++} ++ ++int ssam_fuse_reply_xattr(fuse_req_t req, size_t count, struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct fuse_getxattr_out *arg = (struct fuse_getxattr_out *)fsmsession->static_buf; ++ ++ memset(arg, 0, sizeof(*arg)); ++ arg->size = count; ++ ++ return ssam_fuse_send_reply_ok(req, arg, sizeof(*arg)); ++} ++ ++int ssam_fuse_reply_lseek(fuse_req_t req, off_t off, struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct fuse_lseek_out *arg = (struct fuse_lseek_out *)fsmsession->static_buf; ++ ++ memset(arg, 0, sizeof(*arg)); ++ arg->offset = off; ++ ++ return ssam_fuse_send_reply_ok(req, arg, sizeof(*arg)); ++} ++ ++static int ssam_fuse_send_data_iov_fallback(struct fuse_session *se, struct fuse_chan *ch, ++ struct iovec *iov, ++ int iov_count, struct fuse_bufvec *buf, size_t len, struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len); ++ int res; ++ uint64_t phys_addr; ++ ++ /* Optimize common case */ ++ if (buf->count == 1 && buf->idx == 0 && buf->off == 0 && !(buf->buf[0].flags & (1 << 1))) { ++ /* FIXME: also avoid memory copy if there are multiple buffers ++ but none of them contain an fd */ ++ ++ iov[iov_count].iov_base = buf->buf[0].mem; ++ iov[iov_count].iov_len = len; ++ iov_count++; ++ return ssam_fuse_send_msg(se, ch, iov, iov_count); ++ } ++ ++ fsmsession->dynamic_buf = ssam_mempool_alloc(fsmsession->smsession.mp, len, &phys_addr); ++ if (!fsmsession->dynamic_buf) { ++ return -ENOMEM; ++ } ++ ++ mem_buf.buf[0].mem = fsmsession->dynamic_buf; ++ res = fuse_buf_copy(&mem_buf, buf, 0); ++ if (res < 0) { ++ ssam_mempool_free(fsmsession->smsession.mp, fsmsession->dynamic_buf); ++ fsmsession->dynamic_buf = NULL; ++ return -res; ++ } ++ len = res; ++ ++ iov[iov_count].iov_base = fsmsession->dynamic_buf; ++ iov[iov_count].iov_len = len; ++ iov_count++; ++ res = ssam_fuse_send_msg(se, ch, iov, iov_count); ++ ++ return res; ++} ++ ++static int ssam_fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch, struct iovec *iov, ++ int iov_count, ++ struct fuse_bufvec *buf, unsigned int flags, struct spdk_ssam_fs_session *fsmsession) ++{ ++ size_t len = fuse_buf_size(buf); ++ (void)flags; ++ ++ return ssam_fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len, fsmsession); ++} ++ ++int ssam_fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags, ++ struct spdk_ssam_fs_session *fsmsession) ++{ ++ struct iovec iov[2]; ++ struct fuse_out_header out; ++ int res; ++ ++ iov[0].iov_base = &out; ++ iov[0].iov_len = sizeof(struct fuse_out_header); ++ ++ out.unique = req->unique; ++ out.error = 0; ++ ++ res = ssam_fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags, fsmsession); ++ if (res <= 0) { ++ ssam_fuse_free_req(req); ++ return res; ++ } else { ++ return fuse_reply_err(req, res); ++ } ++} +diff --git a/lib/ssam/ssam_internal.h b/lib/ssam/ssam_internal.h +new file mode 100644 +index 0000000..62b7a81 +--- /dev/null ++++ b/lib/ssam/ssam_internal.h +@@ -0,0 +1,538 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef SSAM_INTERNAL_H ++#define SSAM_INTERNAL_H ++ ++#include "stdint.h" ++ ++#include ++#include "ssam_driver/dpak_ssam.h" ++ ++#include "spdk_internal/thread.h" ++#include "spdk/log.h" ++#include "spdk/util.h" ++#include "spdk/rpc.h" ++#include "spdk/bdev.h" ++#include "spdk/ssam.h" ++#include "ssam_config.h" ++ ++#define SPDK_SSAM_FEATURES ((1ULL << VHOST_F_LOG_ALL) | \ ++ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \ ++ (1ULL << VIRTIO_F_VERSION_1) | \ ++ (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \ ++ (1ULL << VIRTIO_RING_F_EVENT_IDX) | \ ++ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \ ++ (1ULL << VIRTIO_F_RING_PACKED)) ++ ++#define VIRITO_DEFAULT_QUEUE_SIZE 256 ++#define VIRITO_FS_DEFAULT_QUEUE_SIZE 1024 ++#define VIRTIO_FS_DEFAULT_CONFIG_LEN 44 ++#define VIRTIO_FS_DEFAULT_TAG_LEN 36 ++#define VIRTIO_FS_DEFAULT_CONFIG_QUEUE_OFFSET 9 ++ ++#define SPDK_SSAM_VQ_MAX_SUBMISSIONS 16 ++#define SPDK_SSAM_MAX_VQUEUES 256 ++#define SPDK_SSAM_MAX_VQ_SIZE 256 ++#define SSAM_JSON_DEFAULT_QUEUES_NUM 16 ++ ++/* ssam not support config vq size so far */ ++#define SPDK_SSAM_DEFAULT_VQ_SIZE SPDK_SSAM_MAX_VQ_SIZE ++#define SPDK_SSAM_DEFAULT_VQUEUES 16 ++#define SPDK_SSAM_IOVS_MAX 32 ++#define SPDK_SSAM_MAX_SEG_SIZE (32 * 1024) ++ ++#define SPDK_INVALID_GFUNC_ID UINT16_MAX ++#define SPDK_INVALID_CORE_ID UINT16_MAX ++#define SPDK_INVALID_MAX_THREADS UINT16_MAX ++ ++#define SSAM_PF_MAX_NUM 32 ++#define SPDK_SSAM_SCSI_CTRLR_MAX_DEVS 255 ++#define SSAM_VIRTIO_SCSI_LUN_ID 0x400001 ++#define SPDK_SSAM_SCSI_DEFAULT_VQUEUES 128 ++#define SSAM_MAX_SESSION_PER_DEV UINT16_MAX ++#define SSAM_DEFAULT_MEMPOOL_EXTRA_SIZE 0 ++#define SSAM_MAX_CORE_NUM 16 ++#define SSAM_MAX_CORE_NUM_WITH_LARGE_IO 10 ++ ++#define SPDK_LIMIT_LOG_MAX_INTERNEL_IN_MS 3000 ++#define SPDK_CONVERT_MS_TO_US 1000 ++ ++#define PERF_STAT ++ ++typedef void (*spdk_ssam_session_io_wait_cb)(void *cb_arg); ++ ++struct spdk_ssam_session_io_wait { ++ spdk_ssam_session_io_wait_cb cb_fn; ++ void *cb_arg; ++ TAILQ_ENTRY(spdk_ssam_session_io_wait) link; ++}; ++ ++typedef void (*spdk_ssam_session_io_wait_r_cb)(void *cb_arg); ++ ++struct spdk_ssam_session_io_wait_r { ++ spdk_ssam_session_io_wait_r_cb cb_fn; ++ void *cb_arg; ++ TAILQ_ENTRY(spdk_ssam_session_io_wait_r) link; ++}; ++ ++struct spdk_ssam_virtqueue { ++ void *tasks; ++ struct spdk_ssam_session *smsession; ++ uint32_t *index; ++ int num; ++ int use_num; ++ int index_l; ++ int index_r; ++}; ++ ++struct spdk_ssam_session_backend { ++ enum virtio_type type; ++ int (*remove_session)(struct spdk_ssam_session *smsession); ++ void (*remove_self)(struct spdk_ssam_session *smsession); ++ void (*request_worker)(struct spdk_ssam_session *smsession, void *arg); ++ void (*destroy_bdev_device)(struct spdk_ssam_session *smsession, void *args); ++ void (*response_worker)(struct spdk_ssam_session *smsession, void *arg); ++ void (*no_data_req_worker)(struct spdk_ssam_session *smsession); ++ ++ int (*ssam_get_config)(struct spdk_ssam_session *smsession, ++ uint8_t *config, uint32_t len, uint16_t queues); ++ int (*ssam_set_config)(struct spdk_ssam_session *smsession, ++ uint8_t *config, uint32_t offset, uint32_t size, uint32_t flags); ++ ++ void (*print_stuck_io_info)(struct spdk_ssam_session *smsession); ++ ++ void (*dump_info_json)(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++ void (*write_config_json)(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++ void (*show_iostat_json)(struct spdk_ssam_session *smsession, uint32_t id, ++ struct spdk_json_write_ctx *w); ++ void (*clear_iostat_json)(struct spdk_ssam_session *smsession); ++ struct spdk_bdev *(*get_bdev)(struct spdk_ssam_session *smsession, uint32_t id); ++}; ++ ++struct spdk_ssam_session { ++ /* Unique session name, format as ssam.tid.gfunc_id. */ ++ char *name; ++ ++ struct spdk_ssam_dev *smdev; ++ ++ /* Session poller thread, same as ssam dev poller thread */ ++ struct spdk_thread *thread; ++ struct ssam_mempool *mp; ++ const struct spdk_ssam_session_backend *backend; ++ spdk_ssam_session_rsp_fn rsp_fn; ++ void *rsp_ctx; ++ struct spdk_ssam_virtqueue virtqueue[SPDK_SSAM_MAX_VQUEUES]; ++ ++ /* Number of processing tasks, can not remove session when task_cnt > 0 */ ++ int task_cnt; ++ ++ /* Number of pending asynchronous operations */ ++ uint32_t pending_async_op_num; ++ ++ /* ssam global virtual function id */ ++ uint16_t gfunc_id; ++ ++ /* Depth of virtio-blk virtqueue */ ++ uint16_t queue_size; ++ ++ /* Number of virtio-blk virtqueue */ ++ uint16_t max_queues; ++ bool started; ++ bool initialized; ++ ++ /* spdk_ssam_session_fn process finish flag */ ++ bool async_done; ++ ++ bool registered; ++ ++ TAILQ_ENTRY(spdk_ssam_session) tailq; ++}; ++ ++struct ssam_iovs { ++ struct iovec sges[SPDK_SSAM_IOVS_MAX]; ++}; ++ ++struct ssam_iovec { ++ struct ssam_iovs virt; /* virt's iov_base is virtual address */ ++ struct ssam_iovs phys; /* phys's iov_base is physical address */ ++}; ++ ++struct ssam_stat { ++ uint64_t poll_cur_tsc; ++ uint64_t poll_tsc; ++ uint64_t poll_count; ++}; ++ ++struct spdk_ssam_dev { ++ /* ssam device name, format as ssam.tid */ ++ char *name; ++ /* virtio type */ ++ enum virtio_type type; ++ ++ /* ssam device poller thread, same as session poller thread */ ++ struct spdk_thread *thread; ++ struct spdk_poller *requestq_poller; ++ struct spdk_poller *responseq_poller; ++ struct spdk_poller *stop_poller; ++ ++ /* Store sessions of this dev, max number is SSAM_MAX_SESSION_PER_DEV */ ++ struct spdk_ssam_session **smsessions; ++ ++ TAILQ_ENTRY(spdk_ssam_dev) tailq; ++ ++ /* IO num that is on flight */ ++ uint64_t io_num; ++ ++ uint64_t discard_io_num; ++ ++ /* IO stuck ticks in dma process */ ++ uint64_t io_stuck_tsc; ++ struct ssam_stat stat; ++ ++ uint64_t io_wait_cnt; ++ uint64_t io_wait_r_cnt; ++ ++ /* Number of started and actively polled sessions */ ++ uint32_t active_session_num; ++ ++ /* Information of tid, indicate from which ssam queue to receive or send data */ ++ uint16_t tid; ++ uint16_t lcore_id; ++ TAILQ_HEAD(, spdk_ssam_session_io_wait) io_wait_queue; ++ TAILQ_HEAD(, spdk_ssam_session_io_wait_r) io_wait_queue_r; ++}; ++ ++struct spdk_ssam_dma_cb { ++ uint8_t status; ++ uint8_t req_dir; ++ uint16_t vq_idx; ++ uint16_t task_idx; ++ uint16_t gfunc_id; ++}; ++ ++struct spdk_ssam_send_event_flag { ++ bool need_async; ++ bool need_rsp; ++}; ++ ++/** ++ * Remove a session from sessions array. ++ * ++ * \param smsessions sessions array. ++ * \param smsession the session to be removed. ++ */ ++void ssam_sessions_remove(struct spdk_ssam_session **smsessions, ++ struct spdk_ssam_session *smsession); ++ ++/** ++ * Check out whether sessions is empty or not. ++ * ++ * \param smsessions sessions array. ++ * \return true indicate sessions is empty or false not empty. ++ */ ++bool ssam_sessions_empty(struct spdk_ssam_session **smsessions); ++ ++/** ++ * Get next session in sessions array, begin with current session. ++ * ++ * \param smsessions sessions array. ++ * \param smsession the begin session. ++ * \return the next session found or null not found. ++ */ ++struct spdk_ssam_session *ssam_sessions_next(struct spdk_ssam_session **smsessions, ++ struct spdk_ssam_session *smsession); ++ ++/** ++ * Insert io wait task to session. ++ * ++ * \param smsession the session that io wait insert to. ++ * \param io_wait the io wait to be insert. ++ */ ++void ssam_session_insert_io_wait(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_session_io_wait *io_wait); ++ ++/** ++ * Insert io wait compilete or dma task to smdev. ++ * ++ * \param smdev the smdev that io wait insert to. ++ * \param io_wait_r the io wait to be insert. ++ */ ++void ssam_session_insert_io_wait_r(struct spdk_ssam_dev *smdev, ++ struct spdk_ssam_session_io_wait_r *io_wait_r); ++ ++/** ++ * Remove session from sessions and then stop session dev poller. ++ * ++ * \param smsession the session that to be removed. ++ */ ++void ssam_session_destroy(struct spdk_ssam_session *smsession); ++ ++/** ++ * Show a ssam device info in json format. ++ * ++ * \param smdev ssam device. ++ * \param gfunc_id ssam global vf id. ++ * \param arg user-provided parameter. ++ */ ++void ssam_dump_info_json(struct spdk_ssam_dev *smdev, uint16_t gfunc_id, ++ struct spdk_json_write_ctx *w); ++ ++/** ++ * Get a ssam device name. ++ * ++ * \param smdev ssam device. ++ * \return ssam device name or NULL ++ */ ++const char *ssam_dev_get_name(const struct spdk_ssam_dev *smdev); ++ ++/** ++ * Get a ssam session name. ++ * ++ * \param smdev smsession session. ++ * \return ssam session name or NULL ++ */ ++const char *ssam_session_get_name(const struct spdk_ssam_session *smsession); ++ ++/** ++ * Call a function of the provided ssam session. ++ * The function will be called on this session's thread. ++ * ++ * \param smsession ssam session. ++ * \param fn function to call on each session's thread ++ * \param cpl_fn function to be called at the end of the ssam management thread. ++ * Optional, can be NULL. ++ * \param send_event_flag whether an asynchronous operation or response is required ++ * \param ctx additional argument to the both callbacks ++ * \return error code ++ */ ++int ssam_send_event_to_session(struct spdk_ssam_session *smsession, spdk_ssam_session_fn fn, ++ spdk_ssam_session_cpl_fn cpl_fn, struct spdk_ssam_send_event_flag send_event_flag, void *ctx); ++ ++/** ++ * Finish a blocking ssam_send_event_to_session() call and finally ++ * start the session. This must be called on the target lcore, which ++ * will now receive all session-related messages (e.g. from ++ * ssam_send_event_to_session()). ++ * ++ * Must be called under the global ssam lock. ++ * ++ * \param smsession ssam session ++ * \param response return code ++ */ ++void ssam_session_start_done(struct spdk_ssam_session *smsession, int response); ++ ++/** ++ * Finish a blocking ssam_send_event_to_session() call and finally ++ * stop the session. This must be called on the session's lcore which ++ * used to receive all session-related messages (e.g. from ++ * ssam_send_event_to_session()). After this call, the session- ++ * related messages will be once again processed by any arbitrary thread. ++ * ++ * Must be called under the global ssam lock. ++ * ++ * \param smsession ssam session ++ * \param rsp return code ++ * \param ctx user context ++ */ ++void ssam_session_stop_done(struct spdk_ssam_session *smsession, int rsp, void **ctx); ++ ++/** ++ * Set session be freed, so that not access session any more. ++ * ++ * \param ctx user context ++ */ ++void ssam_set_session_be_freed(void **ctx); ++ ++/** ++ * Find a ssam device in the global g_ssam_devices list by gfunc_id, ++ * if find the ssam device, register a session to the existent ssam device ++ * sessions list, if not find, first create a ssam device to the global ++ * g_ssam_devices list, and then register a session to the new ssam device ++ * sessions list. ++ * ++ * Must be called under the global ssam lock. ++ * ++ * \param info ssam session register info. ++ * \param smsession ssam session created. ++ * \return 0 for success or negative for failed. ++ */ ++int ssam_session_register(struct spdk_ssam_session_reg_info *info, ++ struct spdk_ssam_session **smsession); ++ ++/** ++ * unregister smsession response call back function. ++ * ++ * \param smsession ssam session ++\ */ ++void ssam_session_unreg_response_cb(struct spdk_ssam_session *smsession); ++ ++void ssam_dev_unregister(struct spdk_ssam_dev **dev); ++ ++void ssam_send_event_async_done(void **ctx); ++ ++void ssam_send_dev_destroy_msg(struct spdk_ssam_session *smsession, void *args); ++ ++/** ++ * Get ssam config. ++ * ++ * \param smsession ssam session ++ * \param config a memory region to store config. ++ * \param len the input config param memory region length. ++ * \return 0 success or -1 failed. ++ */ ++int ssam_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues); ++ ++/** ++ * Mount gfunc_id volume to the ssam normal queue. ++ * ++ * \param smsession ssam session ++ * \param lun_id lun id of gfunc_id. ++ * ++ * \return 0 success or not 0 failed. ++ */ ++int ssam_mount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id); ++ ++/** ++ * Unmount function. ++ * ++ * \param smsession ssam session ++ * \param lun_id lun id of gfunc_id. ++ * ++ * \return 0 success or not 0 failed. ++ */ ++int ssam_umount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id); ++ ++/** ++ * Mount gfunc_id volume to the ssam normal queue again. ++ * ++ * \param smsession ssam session ++ * \param lun_id lun id of gfunc_id. ++ * ++ * \return 0 success or not 0 failed. ++ */ ++int ssam_remount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id); ++ ++/** ++ * Register worker poller to dev. ++ * ++ * \param smdev the dev that to be registered worker poller. ++ * \return 0 success or not 0 failed. ++ */ ++int ssam_dev_register_worker_poller(struct spdk_ssam_dev *smdev); ++ ++/** ++ * Unregister worker poller for dev. ++ * ++ * \param smdev the dev that to be unregistered woker poller. ++ */ ++void ssam_dev_unregister_worker_poller(struct spdk_ssam_dev *smdev); ++ ++/** ++ * Get the differential value of the current tsc. ++ * ++ * \param tsc the current tsc. ++ * \return the differential value. ++ */ ++uint64_t ssam_get_diff_tsc(uint64_t tsc); ++ ++/** ++ * Get the bdev name of the specific gfunc_id. ++ * ++ * \param gfunc_id ssam global vf id. ++ * ++ * \return the bdev name of gfunc_id ++ */ ++const char *ssam_get_bdev_name_by_gfunc_id(uint16_t gfunc_id); ++ ++/** ++ * Remove a ssam session. Remove a session associate to the unique gfunc_id, ++ * then remove the ssam device if the device not have a session any more. ++ * ++ * Notice that this interface cannot be reentrant, so must call ssam_lock first. ++ * ++ * \param smsession ssam session ++ * ++ * \return 0 on success, negative errno on error. ++ */ ++int ssam_session_unregister(struct spdk_ssam_session *smsession); ++ ++/** ++ * Get ssam iostat. ++ * ++ * \param smsession ssam session ++ * \param stat a memory region to store iostat. ++ */ ++void spdk_ssam_get_iostat(struct spdk_ssam_session *smsession, ++ struct spdk_bdev_io_stat *stat); ++ ++/** ++ * Decrease dev io num. ++ * ++ * \param smdev ssam device. ++ */ ++void ssam_dev_io_dec(struct spdk_ssam_dev *smdev); ++ ++/** ++ * Get ssam session bdev. ++ * ++ * \param smsession ssam session ++ * ++ * \return the session bdev. ++ */ ++struct spdk_bdev *ssam_get_session_bdev(struct spdk_ssam_session *smsession); ++ ++/** ++ * free memory with rte. ++ * ++ * \param smsession ssam session ++ * ++ * \return 0 on success. ++ */ ++int ssam_free_ex(void *addr); ++ ++/** ++ * Get elem info from memory addr. ++ * ++ * \param memory addr ++ * ++ */ ++int ssam_malloc_elem_from_addr(const void *data, unsigned long long *pg_size, int *socket_id); ++ ++int spdk_ssam_fs_poller_init(void); ++ ++void spdk_ssam_fs_poller_destroy(void); ++ ++#endif /* SSAM_INTERNAL_H */ +diff --git a/lib/ssam/ssam_malloc.c b/lib/ssam/ssam_malloc.c +new file mode 100644 +index 0000000..4b52d11 +--- /dev/null ++++ b/lib/ssam/ssam_malloc.c +@@ -0,0 +1,56 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include "spdk/env.h" ++ ++#include "ssam_internal.h" ++ ++int ssam_free_ex(void *addr) ++{ ++ spdk_free(addr); ++ return 0; ++} ++ ++int ssam_malloc_elem_from_addr(const void *data, unsigned long long *pg_size, int *socket_id) ++{ ++ struct rte_memseg_list *msl = NULL; ++ ++ msl = rte_mem_virt2memseg_list(data); ++ if (msl == NULL) { ++ return -1; ++ } ++ ++ *socket_id = msl->socket_id; ++ *pg_size = msl->page_sz; ++ return 0; ++} +diff --git a/lib/ssam/ssam_rpc.c b/lib/ssam/ssam_rpc.c +new file mode 100644 +index 0000000..3bdfee2 +--- /dev/null ++++ b/lib/ssam/ssam_rpc.c +@@ -0,0 +1,2145 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "spdk/string.h" ++#include "spdk/env.h" ++#include "spdk/bdev_module.h" ++#include "spdk/ssam.h" ++#include "spdk/bdev.h" ++ ++#include "ssam_internal.h" ++#include "ssam_config.h" ++#include "rte_malloc.h" ++ ++#include "ssam_fs_internal.h" ++ ++static int ssam_rpc_get_gfunc_id_by_dbdf(char *dbdf, uint16_t *gfunc_id); ++ ++struct rpc_ssam_blk_ctrlr { ++ char *dev_name; ++ char *index; ++ bool readonly; ++ char *serial; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_construct_ssam_blk_ctrlr[] = { ++ {"dev_name", offsetof(struct rpc_ssam_blk_ctrlr, dev_name), spdk_json_decode_string}, ++ {"index", offsetof(struct rpc_ssam_blk_ctrlr, index), spdk_json_decode_string}, ++ {"readonly", offsetof(struct rpc_ssam_blk_ctrlr, readonly), spdk_json_decode_bool, true}, ++ {"serial", offsetof(struct rpc_ssam_blk_ctrlr, serial), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_ssam_blk_ctrlr(struct rpc_ssam_blk_ctrlr *req) ++{ ++ if (req->dev_name != NULL) { ++ free(req->dev_name); ++ req->dev_name = NULL; ++ } ++ ++ if (req->index != NULL) { ++ free(req->index); ++ req->index = NULL; ++ } ++ ++ if (req->serial != NULL) { ++ free(req->serial); ++ req->serial = NULL; ++ } ++} ++ ++static int ++ssam_rpc_para_check(uint16_t gfunc_id) ++{ ++ int rc; ++ ++ rc = ssam_check_gfunc_id(gfunc_id); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_rpc_para_check_type(uint16_t gfunc_id, enum ssam_device_type target_type) ++{ ++ int rc; ++ enum ssam_device_type type; ++ ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ type = ssam_get_virtio_type(gfunc_id); ++ if (type == target_type) { ++ return 0; ++ } ++ SPDK_ERRLOG("Invalid virtio type, need type %d, actually %d\n", target_type, type); ++ ++ return -EINVAL; ++} ++ ++static void ++rpc_ssam_send_response_cb(void *arg, int rsp) ++{ ++ struct spdk_jsonrpc_request *request = arg; ++ ++ if (rsp != 0) { ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rsp)); ++ } else { ++ spdk_jsonrpc_send_bool_response(request, true); ++ } ++ return; ++} ++ ++struct ssam_log_command_info { ++ char *user_name; ++ char *event; ++ char *src_addr; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_construct_log_command_info[] = { ++ {"user_name", offsetof(struct ssam_log_command_info, user_name), spdk_json_decode_string}, ++ {"event", offsetof(struct ssam_log_command_info, event), spdk_json_decode_string}, ++ {"src_addr", offsetof(struct ssam_log_command_info, src_addr), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_ssam_log_command_info(struct ssam_log_command_info *req) ++{ ++ if (req->user_name != NULL) { ++ free(req->user_name); ++ req->user_name = NULL; ++ } ++ if (req->event != NULL) { ++ free(req->event); ++ req->event = NULL; ++ } ++ if (req->src_addr != NULL) { ++ free(req->src_addr); ++ req->src_addr = NULL; ++ } ++} ++ ++static void ++rpc_ssam_log_command_info(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct ssam_log_command_info req = {0}; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("log info params error, skip\n"); ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_construct_log_command_info, ++ SPDK_COUNTOF(g_rpc_construct_log_command_info), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("decode cmd info failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ SPDK_NOTICELOG("log event: from %s user %s event %s\n", req.src_addr, req.user_name, req.event); ++ ++invalid: ++ free_rpc_ssam_log_command_info(&req); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++} ++SPDK_RPC_REGISTER("log_command_info", rpc_ssam_log_command_info, ++ SPDK_RPC_RUNTIME) ++ ++static int ++rpc_ssam_session_reg_response_cb(struct spdk_ssam_session *smsession, ++ struct spdk_jsonrpc_request *request) ++{ ++ if (smsession->rsp_fn != NULL) { ++ return -1; ++ } ++ smsession->rsp_fn = rpc_ssam_send_response_cb; ++ smsession->rsp_ctx = request; ++ return 0; ++} ++ ++static void ++rpc_init_session_reg_info(struct spdk_ssam_session_reg_info *info, ++ uint16_t queues, uint16_t gfunc_id, struct spdk_jsonrpc_request *request) ++{ ++ info->queues = queues; ++ info->gfunc_id = gfunc_id; ++ info->rsp_ctx = (void *)request; ++ info->rsp_fn = rpc_ssam_send_response_cb; ++} ++ ++static void ++free_rpc_ssam_session_reg_info(struct spdk_ssam_session_reg_info *info) ++{ ++ if (info->name != NULL) { ++ free(info->name); ++ info->name = NULL; ++ } ++ if (info->dbdf != NULL) { ++ free(info->dbdf); ++ info->dbdf = NULL; ++ } ++} ++ ++static uint16_t ++rpc_ssam_get_gfunc_id_by_index(char *index) ++{ ++ uint16_t gfunc_id, i; ++ int rc; ++ if (strlen(index) <= 0x5) { ++ for (i = 0; i < strlen(index); i++) { ++ if (!isdigit(index[i])) { ++ return SPDK_INVALID_GFUNC_ID; ++ } ++ } ++ gfunc_id = spdk_strtol(index, 10) > SPDK_INVALID_GFUNC_ID ? SPDK_INVALID_GFUNC_ID : spdk_strtol( ++ index, 10); ++ } else { ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(index, &gfunc_id); ++ if (rc != 0) { ++ return SPDK_INVALID_GFUNC_ID; ++ } ++ } ++ return gfunc_id; ++} ++ ++static void ++rpc_ssam_create_blk_controller(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct spdk_ssam_session_reg_info info = {0}; ++ struct rpc_ssam_blk_ctrlr req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ uint16_t queues; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_create_blk_controller params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_construct_ssam_blk_ctrlr, ++ SPDK_COUNTOF(g_rpc_construct_ssam_blk_ctrlr), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = rpc_ssam_get_gfunc_id_by_index(req.index); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_BLK); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ if (req.dev_name == NULL) { ++ rc = -ENODEV; ++ goto invalid; ++ } ++ ++ queues = ssam_get_queues(); ++ if (queues > SPDK_SSAM_MAX_VQUEUES) { ++ SPDK_ERRLOG("Queue number out of range, need less or equal than %u, actually %u.\n", ++ SPDK_SSAM_MAX_VQUEUES, queues); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rpc_init_session_reg_info(&info, queues, gfunc_id, request); ++ ++ rc = ssam_blk_construct(&info, req.dev_name, req.readonly, req.serial); ++ if (rc < 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_blk_ctrlr(&req); ++ free_rpc_ssam_session_reg_info(&info); ++ return; ++ ++invalid: ++ free_rpc_ssam_blk_ctrlr(&req); ++ free_rpc_ssam_session_reg_info(&info); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("create_blk_controller", rpc_ssam_create_blk_controller, ++ SPDK_RPC_RUNTIME) ++ ++struct rpc_delete_ssam_ctrlr { ++ char *index; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_delete_ssam_ctrlr_decoder[] = { ++ {"index", offsetof(struct rpc_delete_ssam_ctrlr, index), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_delete_ssam_ctrlr(struct rpc_delete_ssam_ctrlr *req) ++{ ++ if (req->index != NULL) { ++ free(req->index); ++ req->index = NULL; ++ } ++} ++ ++static void ++rpc_ssam_delete_controller(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_delete_ssam_ctrlr req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession; ++ int rc; ++ enum ssam_device_type type; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_delete_controller params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_delete_ssam_ctrlr_decoder, ++ SPDK_COUNTOF(g_rpc_delete_ssam_ctrlr_decoder), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = rpc_ssam_get_gfunc_id_by_index(req.index); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ type = ssam_get_virtio_type(gfunc_id); ++ if (type == SSAM_DEVICE_VIRTIO_FS) { ++ SPDK_ERRLOG("should use fs_controller_delte to delete fs_controller\n"); ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Couldn't find session with function id %d.\n", gfunc_id); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_session_reg_response_cb(smsession, request); ++ if (rc != 0) { ++ SPDK_ERRLOG("The controller is being operated.\n"); ++ rc = -EALREADY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = ssam_session_unregister(smsession); ++ if (rc != 0) { ++ /* ++ * Unregitster response cb to avoid use request in the cb function, ++ * because if error happend, request will be responsed immediately ++ */ ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ ssam_unlock(); ++ ++ free_rpc_delete_ssam_ctrlr(&req); ++ return; ++ ++invalid: ++ free_rpc_delete_ssam_ctrlr(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("delete_controller", rpc_ssam_delete_controller, SPDK_RPC_RUNTIME) ++ ++struct rpc_delete_ssam_scsi_ctrlr { ++ char *name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_delete_ssam_scsi_ctrlr_decoder[] = { ++ {"name", offsetof(struct rpc_delete_ssam_scsi_ctrlr, name), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_delete_ssam_scsi_ctrlrs(struct rpc_delete_ssam_scsi_ctrlr *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_delete_scsi_controller(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_delete_ssam_scsi_ctrlr req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_delete_controller params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_delete_ssam_scsi_ctrlr_decoder, ++ SPDK_COUNTOF(g_rpc_delete_ssam_scsi_ctrlr_decoder), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Couldn't find session with function id %d.\n", gfunc_id); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_session_reg_response_cb(smsession, request); ++ if (rc != 0) { ++ SPDK_ERRLOG("The controller is being operated.\n"); ++ rc = -EALREADY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = ssam_session_unregister(smsession); ++ if (rc != 0) { ++ /* ++ * Unregitster response cb to avoid use request in the cb function, ++ * because if error happend, request will be responsed immediately ++ */ ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ ssam_unlock(); ++ ++ free_rpc_delete_ssam_scsi_ctrlrs(&req); ++ return; ++ ++invalid: ++ free_rpc_delete_ssam_scsi_ctrlrs(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("delete_scsi_controller", rpc_ssam_delete_scsi_controller, SPDK_RPC_RUNTIME) ++ ++struct rpc_get_ssam_ctrlrs { ++ uint32_t function_id; ++ char *dbdf; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_get_ssam_ctrlrs_decoder[] = { ++ {"function_id", offsetof(struct rpc_get_ssam_ctrlrs, function_id), spdk_json_decode_uint32, true}, ++ {"dbdf", offsetof(struct rpc_get_ssam_ctrlrs, dbdf), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_get_ssam_ctrlrs(struct rpc_get_ssam_ctrlrs *req) ++{ ++ if (req->dbdf != NULL) { ++ free(req->dbdf); ++ req->dbdf = NULL; ++ } ++} ++ ++static void ++_rpc_get_ssam_controller(struct spdk_json_write_ctx *w, ++ struct spdk_ssam_dev *smdev, uint16_t gfunc_id) ++{ ++ ssam_dump_info_json(smdev, gfunc_id, w); ++} ++ ++static int ++rpc_ssam_show_controllers(struct spdk_jsonrpc_request *request, uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ if (gfunc_id != SPDK_INVALID_GFUNC_ID) { ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_array_begin(w); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ _rpc_get_ssam_controller(w, smdev, gfunc_id); ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_array_begin(w); ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ _rpc_get_ssam_controller(w, smdev, gfunc_id); ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ ++ return 0; ++} ++ ++static int ++rpc_ssam_show_scsi_controllers(struct spdk_jsonrpc_request *request, uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ if (gfunc_id != SPDK_INVALID_GFUNC_ID) { ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } else if (smsession->backend->type != VIRTIO_TYPE_SCSI) { ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ smdev = smsession->smdev; ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_array_begin(w); ++ ++ smsession = smdev->smsessions[gfunc_id]; ++ smsession->backend->dump_info_json(smsession, w); ++ ssam_unlock(); ++ ++ spdk_json_write_array_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_array_begin(w); ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ if (smsession->backend->type == VIRTIO_TYPE_SCSI) { ++ smsession->backend->dump_info_json(smsession, w); ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ ++ return 0; ++} ++ ++static int ++rpc_ssam_show_fs_controllers(struct spdk_jsonrpc_request *request, uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ if (gfunc_id != SPDK_INVALID_GFUNC_ID) { ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_array_begin(w); ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ bool is_smsession_exit = 0; ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ if (smsession->backend->type == VIRTIO_TYPE_FS && smsession->gfunc_id == gfunc_id) { ++ if (is_smsession_exit == 0) { ++ is_smsession_exit = 1; ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "ctrlr", ssam_dev_get_name(smdev)); ++ spdk_json_write_named_string_fmt(w, "cpumask", "0x%s", ++ spdk_cpuset_fmt(spdk_thread_get_cpumask(smdev->thread))); ++ spdk_json_write_named_uint32(w, "session_num", (uint32_t)smdev->active_session_num); ++ spdk_json_write_named_object_begin(w, "backend_specific"); ++ spdk_json_write_named_array_begin(w, "session"); ++ } ++ smsession->backend->dump_info_json(smsession, w); ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ if (is_smsession_exit == 0) { ++ smdev = ssam_dev_next(smdev); ++ continue; ++ } ++ if (is_smsession_exit == 1) { ++ spdk_json_write_array_end(w); ++ spdk_json_write_object_end(w); ++ spdk_json_write_object_end(w); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_array_begin(w); ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ bool is_smsession_exit = 0; ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ if (smsession->backend->type == VIRTIO_TYPE_FS) { ++ is_smsession_exit = 1; ++ break; ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ if (is_smsession_exit == 0) { ++ smdev = ssam_dev_next(smdev); ++ continue; ++ } ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "ctrlr", ssam_dev_get_name(smdev)); ++ spdk_json_write_named_string_fmt(w, "cpumask", "0x%s", ++ spdk_cpuset_fmt(spdk_thread_get_cpumask(smdev->thread))); ++ spdk_json_write_named_uint32(w, "session_num", (uint32_t)smdev->active_session_num); ++ spdk_json_write_named_object_begin(w, "backend_specific"); ++ spdk_json_write_named_array_begin(w, "session"); ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ if (smsession->backend->type == VIRTIO_TYPE_FS) { ++ smsession->backend->dump_info_json(smsession, w); ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ spdk_json_write_array_end(w); ++ spdk_json_write_object_end(w); ++ spdk_json_write_object_end(w); ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ ++ return 0; ++} ++ ++static void ++rpc_ssam_get_controllers(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_get_ssam_ctrlrs req = { ++ .function_id = SPDK_INVALID_GFUNC_ID, ++ .dbdf = NULL, ++ }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params != NULL) { ++ rc = spdk_json_decode_object(params, g_rpc_get_ssam_ctrlrs_decoder, ++ SPDK_COUNTOF(g_rpc_get_ssam_ctrlrs_decoder), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ if (req.function_id != SPDK_INVALID_GFUNC_ID && req.dbdf != NULL) { ++ SPDK_ERRLOG("get_controllers can have at most one parameter\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ if (req.function_id != SPDK_INVALID_GFUNC_ID) { ++ gfunc_id = req.function_id; ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ if (req.dbdf != NULL) { ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(req.dbdf, &gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ rc = rpc_ssam_show_controllers(request, gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_get_ssam_ctrlrs(&req); ++ return; ++ ++invalid: ++ free_rpc_get_ssam_ctrlrs(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("get_controllers", rpc_ssam_get_controllers, SPDK_RPC_RUNTIME) ++ ++struct rpc_get_ssam_scsi_ctrlrs { ++ char *name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_get_ssam_scsi_ctrlrs_decoder[] = { ++ {"name", offsetof(struct rpc_get_ssam_scsi_ctrlrs, name), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_ssam_ctrlrs(struct rpc_get_ssam_scsi_ctrlrs *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_get_scsi_controllers(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_get_ssam_scsi_ctrlrs req = { ++ .name = NULL, ++ }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params != NULL) { ++ rc = spdk_json_decode_object(params, g_rpc_get_ssam_scsi_ctrlrs_decoder, ++ SPDK_COUNTOF(g_rpc_get_ssam_scsi_ctrlrs_decoder), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ if (req.name != NULL) { ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ rc = rpc_ssam_show_scsi_controllers(request, gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_ctrlrs(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_ctrlrs(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("get_scsi_controllers", rpc_ssam_get_scsi_controllers, SPDK_RPC_RUNTIME) ++ ++struct rpc_ssam_controller_get_iostat { ++ uint32_t function_id; ++ char *dbdf; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_ssam_controller_get_iostat_decoder[] = { ++ {"function_id", offsetof(struct rpc_ssam_controller_get_iostat, function_id), spdk_json_decode_uint32, true}, ++ {"dbdf", offsetof(struct rpc_ssam_controller_get_iostat, dbdf), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_ssam_controller_get_iostat(struct rpc_ssam_controller_get_iostat *req) ++{ ++ if (req->dbdf != NULL) { ++ free(req->dbdf); ++ req->dbdf = NULL; ++ } ++} ++ ++static int ++rpc_ssam_show_iostat(struct spdk_jsonrpc_request *request, uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ if (gfunc_id != SPDK_INVALID_GFUNC_ID) { ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_uint64(w, "tick_rate", spdk_get_ticks_hz()); ++ spdk_json_write_named_array_begin(w, "dbdfs"); ++ ++ if (smsession->backend->show_iostat_json != NULL) { ++ smsession->backend->show_iostat_json(smsession, SPDK_SSAM_SCSI_CTRLR_MAX_DEVS, w); ++ } ++ ++ ssam_unlock(); ++ ++ spdk_json_write_array_end(w); ++ spdk_json_write_object_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_uint64(w, "tick_rate", spdk_get_ticks_hz()); ++ spdk_json_write_named_array_begin(w, "dbdfs"); ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "name", smdev->name); ++ spdk_json_write_named_uint64(w, "flight_io", smdev->io_num); ++ spdk_json_write_named_uint64(w, "discard_io_num", smdev->discard_io_num); ++ spdk_json_write_named_uint64(w, "wait_io", smdev->io_wait_cnt); ++ spdk_json_write_named_uint64(w, "wait_io_r", smdev->io_wait_r_cnt); ++ spdk_json_write_object_end(w); ++ while (smsession != NULL) { ++ if (smsession->backend->show_iostat_json != NULL) { ++ smsession->backend->show_iostat_json(smsession, SPDK_SSAM_SCSI_CTRLR_MAX_DEVS, w); ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_json_write_object_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++} ++ ++static void ++rpc_ssam_controller_get_iostat(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_ssam_controller_get_iostat req = { ++ .function_id = SPDK_INVALID_GFUNC_ID, ++ .dbdf = NULL, ++ }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params != NULL) { ++ rc = spdk_json_decode_object(params, g_rpc_ssam_controller_get_iostat_decoder, ++ SPDK_COUNTOF(g_rpc_ssam_controller_get_iostat_decoder), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ if (req.function_id != SPDK_INVALID_GFUNC_ID && req.dbdf != NULL) { ++ SPDK_ERRLOG("controller_get_iostat can have at most one parameter\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ if (req.function_id != SPDK_INVALID_GFUNC_ID) { ++ gfunc_id = req.function_id; ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ if (req.dbdf != NULL) { ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(req.dbdf, &gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ rc = rpc_ssam_show_iostat(request, gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_controller_get_iostat(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_controller_get_iostat(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("controller_get_iostat", rpc_ssam_controller_get_iostat, SPDK_RPC_RUNTIME) ++ ++struct rpc_ssam_clear_iostat { ++ char *type; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_ssam_clear_iostat_decoder[] = { ++ {"type", offsetof(struct rpc_ssam_clear_iostat, type), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_ssam_clear_iostat(struct rpc_ssam_clear_iostat *req) ++{ ++ if (req->type != NULL) { ++ free(req->type); ++ req->type = NULL; ++ } ++} ++ ++static void ++rpc_ssam_clear_iostat(int typenum) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ if (typenum == VIRTIO_TYPE_UNKNOWN) { ++ if (smsession->backend->clear_iostat_json != NULL) { ++ smsession->backend->clear_iostat_json(smsession); ++ } ++ } else { ++ if (smsession->backend->clear_iostat_json != NULL && (int)smsession->backend->type == typenum) { ++ smsession->backend->clear_iostat_json(smsession); ++ } ++ } ++ ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++} ++ ++static void ++rpc_ssam_controller_clear_iostat(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_ssam_clear_iostat req = { ++ .type = NULL, ++ }; ++ int rc; ++ int typenum = VIRTIO_TYPE_UNKNOWN; ++ ++ if (params != NULL) { ++ rc = spdk_json_decode_object(params, g_rpc_ssam_clear_iostat_decoder, ++ SPDK_COUNTOF(g_rpc_ssam_clear_iostat_decoder), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ if (req.type != NULL) { ++ if (strcmp(req.type, SPDK_SESSION_TYPE_FS) == 0) { ++ typenum = VIRTIO_TYPE_FS; ++ } else if (strcmp(req.type, SPDK_SESSION_TYPE_SCSI) == 0) { ++ typenum = VIRTIO_TYPE_SCSI; ++ } else if (strcmp(req.type, SPDK_SESSION_TYPE_BLK) == 0) { ++ typenum = VIRTIO_TYPE_BLK; ++ } else { ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ rpc_ssam_clear_iostat(typenum); ++ free_rpc_ssam_clear_iostat(&req); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ free_rpc_ssam_clear_iostat(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++SPDK_RPC_REGISTER("controller_clear_iostat", rpc_ssam_controller_clear_iostat, SPDK_RPC_RUNTIME) ++ ++struct rpc_bdev_resize { ++ uint32_t function_id; ++ uint64_t new_size_in_mb; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_bdev_resize[] = { ++ {"function_id", offsetof(struct rpc_bdev_resize, function_id), spdk_json_decode_uint32}, ++ {"new_size_in_mb", offsetof(struct rpc_bdev_resize, new_size_in_mb), spdk_json_decode_uint64}, ++}; ++ ++static int ++ssam_bdev_resize(struct spdk_bdev *bdev, uint64_t new_size_in_mb) ++{ ++ char *bdev_name = bdev->name; ++ int rc; ++ uint64_t current_size_in_mb; ++ uint64_t new_size_in_byte; ++ ++ if (bdev->blocklen == 0) { ++ SPDK_ERRLOG("The blocklen of bdev %s is zero\n", bdev_name); ++ return -EINVAL; ++ } ++ ++ if (UINT64_MAX / bdev->blockcnt < bdev->blocklen) { ++ SPDK_ERRLOG("The old size of bdev is too large, blockcnt: %lu, blocklen: %u\n", ++ bdev->blockcnt, bdev->blocklen); ++ return -EINVAL; ++ } ++ ++ if (new_size_in_mb == 0) { ++ goto end; ++ } ++ ++ current_size_in_mb = bdev->blocklen * bdev->blockcnt / SSAM_MB; ++ if (new_size_in_mb < current_size_in_mb) { ++ SPDK_ERRLOG("The new bdev size must not be smaller than current bdev size\n"); ++ return -EINVAL; ++ } ++ ++ if (UINT64_MAX / new_size_in_mb < SSAM_MB) { ++ SPDK_ERRLOG("The new bdev size is too large\n"); ++ return -EINVAL; ++ } ++ ++end: ++ new_size_in_byte = new_size_in_mb * SSAM_MB; ++ ++ rc = spdk_bdev_notify_blockcnt_change(bdev, new_size_in_byte / bdev->blocklen); ++ if (rc != 0) { ++ SPDK_ERRLOG("failed to notify block cnt change\n"); ++ return -EINVAL; ++ } ++ SPDK_NOTICELOG("bdev %s resize %lu(mb) done.\n", bdev->name, new_size_in_mb); ++ ++ return 0; ++} ++ ++static void ++rpc_ssam_bdev_resize(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_bdev_resize req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_bdev_resize params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_bdev_resize, ++ SPDK_COUNTOF(g_rpc_bdev_resize), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = req.function_id; ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_BLK); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Before resize target, there need to create controller.\n"); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ if (smsession->backend->get_bdev != NULL) { ++ bdev = smsession->backend->get_bdev(smsession, 0); ++ } ++ if (bdev == NULL) { ++ SPDK_ERRLOG("The controller hasn't correlated to a bdev.\n"); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ ssam_unlock(); ++ ++ rc = ssam_bdev_resize(bdev, req.new_size_in_mb); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("bdev_resize", rpc_ssam_bdev_resize, SPDK_RPC_RUNTIME) ++ ++struct rpc_scsi_bdev_resize { ++ char *name; ++ uint32_t tgt_id; ++ uint64_t new_size_in_mb; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_scsi_bdev_resize[] = { ++ {"name", offsetof(struct rpc_scsi_bdev_resize, name), spdk_json_decode_string}, ++ {"tgt_id", offsetof(struct rpc_scsi_bdev_resize, tgt_id), spdk_json_decode_uint32}, ++ {"new_size_in_mb", offsetof(struct rpc_scsi_bdev_resize, new_size_in_mb), spdk_json_decode_uint64}, ++}; ++ ++static void ++free_rpc_scsi_bdev_resize(struct rpc_scsi_bdev_resize *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_scsi_bdev_resize(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_scsi_bdev_resize req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_bdev_resize params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_scsi_bdev_resize, ++ SPDK_COUNTOF(g_rpc_scsi_bdev_resize), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Before resize target, there need to create controller.\n"); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ if (smsession->backend->get_bdev != NULL) { ++ bdev = smsession->backend->get_bdev(smsession, req.tgt_id); ++ } ++ if (bdev == NULL) { ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ ssam_unlock(); ++ ++ rc = ssam_bdev_resize(bdev, req.new_size_in_mb); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_scsi_bdev_resize(&req); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ free_rpc_scsi_bdev_resize(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("scsi_bdev_resize", rpc_ssam_scsi_bdev_resize, SPDK_RPC_RUNTIME) ++ ++struct rpc_bdev_aio_resize { ++ char *name; ++ uint64_t new_size_in_mb; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_bdev_aio_resize[] = { ++ {"name", offsetof(struct rpc_bdev_aio_resize, name), spdk_json_decode_string}, ++ {"new_size_in_mb", offsetof(struct rpc_bdev_aio_resize, new_size_in_mb), spdk_json_decode_uint64}, ++}; ++ ++static void ++free_rpc_ssam_bdev_aio_resize(struct rpc_bdev_aio_resize *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_bdev_aio_resize(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_bdev_aio_resize req = {0}; ++ struct spdk_bdev *bdev = NULL; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_bdev_resize params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_bdev_aio_resize, ++ SPDK_COUNTOF(g_rpc_bdev_aio_resize), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ if (req.name) { ++ bdev = spdk_bdev_get_by_name(req.name); ++ if (bdev == NULL) { ++ SPDK_ERRLOG("bdev '%s' does not exist\n", req.name); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ rc = ssam_bdev_resize(bdev, req.new_size_in_mb); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_bdev_aio_resize(&req); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ free_rpc_ssam_bdev_aio_resize(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("bdev_aio_resize", rpc_ssam_bdev_aio_resize, SPDK_RPC_RUNTIME) ++ ++static void ++rpc_os_ready(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) ++{ ++ int rc = 0; ++ int fd; ++ char *enable = "1"; ++ ++ fd = open(SSAM_STORAGE_READY_FILE, O_RDWR); ++ if (fd < 0) { ++ SPDK_ERRLOG("Open storage ready file failed.\n"); ++ rc = EPERM; ++ goto invalid; ++ } ++ ++ rc = write(fd, enable, strlen(enable)); ++ if (rc < 0) { ++ SPDK_ERRLOG("Write storage ready file failed.\n"); ++ close(fd); ++ goto invalid; ++ } ++ ++ close(fd); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("os_ready", rpc_os_ready, SPDK_RPC_RUNTIME) ++ ++struct rpc_create_scsi_controller { ++ char *dbdf; ++ char *name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_create_scsi_controller[] = { ++ {"dbdf", offsetof(struct rpc_create_scsi_controller, dbdf), spdk_json_decode_string}, ++ {"name", offsetof(struct rpc_create_scsi_controller, name), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_ssam_create_scsi_controller(struct rpc_create_scsi_controller *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++ if (req->dbdf != NULL) { ++ free(req->dbdf); ++ req->dbdf = NULL; ++ } ++} ++ ++static int ++ssam_rpc_get_gfunc_id_by_dbdf(char *dbdf, uint16_t *gfunc_id) ++{ ++ int rc; ++ uint32_t dbdf_num; ++ ++ rc = ssam_dbdf_str2num(dbdf, &dbdf_num); ++ if (rc != 0) { ++ SPDK_ERRLOG("convert dbdf(%s) to num failed, rc: %d.\n", dbdf, rc); ++ return -EINVAL; ++ } ++ ++ rc = ssam_get_funcid_by_dbdf(dbdf_num, gfunc_id); ++ if (rc != 0) { ++ SPDK_ERRLOG("find gfuncid by dbdf(%u) failed, rc: %d.\n", dbdf_num, rc); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_rpc_para_check_name(char *name) ++{ ++ uint16_t gfunc_id = ssam_get_gfunc_id_by_name(name); ++ if (gfunc_id == SPDK_INVALID_GFUNC_ID) { ++ return 0; ++ } ++ ++ return -EEXIST; ++} ++ ++static void ++rpc_ssam_create_scsi_controller(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct spdk_ssam_session_reg_info info = {0}; ++ struct rpc_create_scsi_controller req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ uint16_t queues; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_create_scsi_controller params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_create_scsi_controller, ++ SPDK_COUNTOF(g_rpc_create_scsi_controller), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_para_check_name(req.name); ++ if (rc != 0) { ++ SPDK_ERRLOG("controller name(%s) is existed\n", req.name); ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(req.dbdf, &gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ queues = ssam_get_queues(); ++ if (queues > SPDK_SSAM_MAX_VQUEUES) { ++ SPDK_ERRLOG("Queue number out of range, need less or equal than %u, actually %u.\n", ++ SPDK_SSAM_MAX_VQUEUES, queues); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rpc_init_session_reg_info(&info, queues, gfunc_id, request); ++ ++ info.name = strdup(req.name); ++ if (info.name == NULL) { ++ SPDK_ERRLOG("Failed to create name(%s) for ssam session reg info.\n", req.name); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ info.dbdf = strdup(req.dbdf); ++ if (info.dbdf == NULL) { ++ SPDK_ERRLOG("Failed to create dbdf(%s) for ssam session reg info.\n", req.dbdf); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = ssam_scsi_construct(&info); ++ if (rc < 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_create_scsi_controller(&req); ++ free_rpc_ssam_session_reg_info(&info); ++ return; ++ ++invalid: ++ free_rpc_ssam_create_scsi_controller(&req); ++ free_rpc_ssam_session_reg_info(&info); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++ ++SPDK_RPC_REGISTER("create_scsi_controller", rpc_ssam_create_scsi_controller, SPDK_RPC_RUNTIME) ++ ++struct rpc_scsi_controller_add_target { ++ char *name; ++ int32_t scsi_tgt_num; ++ char *bdev_name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_scsi_controller_add_target[] = { ++ {"name", offsetof(struct rpc_scsi_controller_add_target, name), spdk_json_decode_string}, ++ {"scsi_tgt_num", offsetof(struct rpc_scsi_controller_add_target, scsi_tgt_num), spdk_json_decode_uint32}, ++ {"bdev_name", offsetof(struct rpc_scsi_controller_add_target, bdev_name), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_ssam_scsi_ctrlr_add_target(struct rpc_scsi_controller_add_target *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++ if (req->bdev_name != NULL) { ++ free(req->bdev_name); ++ req->bdev_name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_scsi_controller_add_target(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_scsi_controller_add_target req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_scsi_controller_add_target params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_scsi_controller_add_target, ++ SPDK_COUNTOF(g_rpc_scsi_controller_add_target), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Before adding a SCSI target, there should be a SCSI controller.\n"); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_session_reg_response_cb(smsession, request); ++ if (rc != 0) { ++ SPDK_ERRLOG("The controller is being operated.\n"); ++ rc = -EALREADY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = ssam_scsi_dev_add_tgt(smsession, req.scsi_tgt_num, req.bdev_name); ++ if (rc != 0) { ++ /* ++ * Unregitster response cb to avoid use request in the cb function, ++ * because if error happend, request will be responsed immediately ++ */ ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ ssam_unlock(); ++ ++ free_rpc_ssam_scsi_ctrlr_add_target(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_scsi_ctrlr_add_target(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("scsi_controller_add_target", rpc_ssam_scsi_controller_add_target, ++ SPDK_RPC_RUNTIME) ++ ++struct rpc_scsi_controller_remove_target { ++ char *name; ++ int32_t scsi_tgt_num; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_scsi_controller_remove_target[] = { ++ {"name", offsetof(struct rpc_scsi_controller_remove_target, name), spdk_json_decode_string}, ++ {"scsi_tgt_num", offsetof(struct rpc_scsi_controller_remove_target, scsi_tgt_num), spdk_json_decode_int32}, ++}; ++ ++static void ++free_rpc_scsi_controller_remove_target(struct rpc_scsi_controller_remove_target *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_scsi_controller_remove_target(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_scsi_controller_remove_target req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_scsi_controller_remove_target params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_scsi_controller_remove_target, ++ SPDK_COUNTOF(g_rpc_scsi_controller_remove_target), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_session_reg_response_cb(smsession, request); ++ if (rc != 0) { ++ SPDK_ERRLOG("The controller is being operated.\n"); ++ rc = -EALREADY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = ssam_scsi_dev_remove_tgt(smsession, req.scsi_tgt_num, ++ rpc_ssam_send_response_cb, request); ++ if (rc != 0) { ++ /* ++ * Unregitster response cb to avoid use request in the cb function, ++ * because if error happend, request will be responsed immediately ++ */ ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ ssam_unlock(); ++ free_rpc_scsi_controller_remove_target(&req); ++ return; ++ ++invalid: ++ free_rpc_scsi_controller_remove_target(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("scsi_controller_remove_target", rpc_ssam_scsi_controller_remove_target, ++ SPDK_RPC_RUNTIME) ++ ++struct rpc_ssam_scsi_device_iostat { ++ char *name; ++ int32_t scsi_tgt_num; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_ssam_scsi_device_iostat[] = { ++ {"name", offsetof(struct rpc_ssam_scsi_device_iostat, name), spdk_json_decode_string}, ++ {"scsi_tgt_num", offsetof(struct rpc_ssam_scsi_device_iostat, scsi_tgt_num), spdk_json_decode_int32}, ++}; ++ ++static void ++free_rpc_ssam_scsi_device_iostat(struct rpc_ssam_scsi_device_iostat *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static int ++rpc_ssam_show_scsi_iostat(struct spdk_jsonrpc_request *request, uint16_t gfunc_id, ++ uint16_t scsi_tgt_num) ++{ ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } else if (smsession->backend->type != VIRTIO_TYPE_SCSI) { ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ ++ if (smsession->backend->show_iostat_json != NULL) { ++ smsession->backend->show_iostat_json(smsession, scsi_tgt_num, w); ++ } ++ ++ ssam_unlock(); ++ ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++} ++ ++static void ++rpc_ssam_scsi_device_iostat(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_ssam_scsi_device_iostat req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_scsi_device_iostat params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_ssam_scsi_device_iostat, ++ SPDK_COUNTOF(g_rpc_ssam_scsi_device_iostat), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ if (req.scsi_tgt_num < 0 || req.scsi_tgt_num > SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("scsi_tgt_num is out of range\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_show_scsi_iostat(request, gfunc_id, req.scsi_tgt_num); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_scsi_device_iostat(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_scsi_device_iostat(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("scsi_device_iostat", rpc_ssam_scsi_device_iostat, SPDK_RPC_RUNTIME) ++ ++struct rpc_limit_log_interval { ++ int interval; ++}; ++ ++static void ++rpc_ssam_device_pcie_list(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct spdk_json_write_ctx *w = NULL; ++ int rc; ++ uint32_t size = ssam_get_device_pcie_list_size(); ++ if (size == 0) { ++ rc = ssam_init_device_pcie_list(); ++ if (rc != 0) { ++ SPDK_ERRLOG("init device_pcie_list failed\n"); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++ } ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_object_begin(w); ++ ++ ssam_dump_device_pcie_list(w); ++ ++ spdk_json_write_object_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return; ++} ++ ++SPDK_RPC_REGISTER("device_pcie_list", rpc_ssam_device_pcie_list, SPDK_RPC_RUNTIME) ++ ++struct rpc_fs_controller_create { ++ char *dbdf; ++ char *fs_name; ++ char *name; ++ uint16_t max_threads; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_fs_controller_create[] = { ++ {"dbdf", offsetof(struct rpc_fs_controller_create, dbdf), spdk_json_decode_string}, ++ {"fs_name", offsetof(struct rpc_fs_controller_create, fs_name), spdk_json_decode_string}, ++ {"name", offsetof(struct rpc_fs_controller_create, name), spdk_json_decode_string}, ++ {"max_threads", offsetof(struct rpc_fs_controller_create, max_threads), spdk_json_decode_uint16, true}, ++}; ++ ++static void ++free_rpc_ssam_fs_controller_create(struct rpc_fs_controller_create *req) ++{ ++ if (req->dbdf != NULL) { ++ free(req->dbdf); ++ req->dbdf = NULL; ++ } ++ if (req->fs_name != NULL) { ++ free(req->fs_name); ++ req->fs_name = NULL; ++ } ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_fs_controller_create(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct ssam_fs_construct_info info = { ++ .gfunc_id = SPDK_INVALID_GFUNC_ID, ++ .fs_name = NULL, ++ .name = NULL, ++ .max_threads = SSAM_FS_DEFAULT_THREADS, ++ }; ++ struct rpc_fs_controller_create req = { ++ .max_threads = SPDK_INVALID_MAX_THREADS, ++ }; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_fs_controller_create params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_fs_controller_create, ++ SPDK_COUNTOF(g_rpc_fs_controller_create), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_para_check_name(req.name); ++ if (rc != 0) { ++ SPDK_ERRLOG("controller name(%s) is existed\n", req.name); ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(req.dbdf, &info.gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_para_check_type(info.gfunc_id, SSAM_DEVICE_VIRTIO_FS); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ info.fs_name = req.fs_name; ++ info.dbdf = req.dbdf; ++ info.name = req.name; ++ if (req.max_threads != SPDK_INVALID_MAX_THREADS) { ++ if (req.max_threads == 0 || req.max_threads > ssam_get_core_num()) { ++ SPDK_ERRLOG("max_threads out of range, should bewteen 1 and %u\n", ssam_get_core_num()); ++ rc = -ERANGE; ++ goto invalid; ++ } ++ info.max_threads = req.max_threads; ++ } ++ ++ rc = ssam_fs_construct(&info); ++ if (rc != 0) { ++ SPDK_ERRLOG("contruct fs controller failed\n"); ++ goto invalid; ++ } ++ free_rpc_ssam_fs_controller_create(&req); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ free_rpc_ssam_fs_controller_create(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++ ++SPDK_RPC_REGISTER("fs_controller_create", rpc_ssam_fs_controller_create, SPDK_RPC_RUNTIME) ++ ++struct rpc_fs_controller_delete { ++ char *name; ++ bool force; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_fs_controller_delete[] = { ++ {"name", offsetof(struct rpc_fs_controller_delete, name), spdk_json_decode_string}, ++ {"force", offsetof(struct rpc_fs_controller_delete, force), spdk_json_decode_bool}, ++}; ++ ++static void ++free_rpc_ssam_fs_controller_delete(struct rpc_fs_controller_delete *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_fs_controller_delete(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_fs_controller_delete req = {0}; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_fs_controller_delete params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_fs_controller_delete, ++ SPDK_COUNTOF(g_rpc_fs_controller_delete), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = ssam_fs_destory(req.name, req.force, request, rpc_ssam_send_response_cb); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_fs_controller_delete(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_fs_controller_delete(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("fs_controller_delete", rpc_ssam_fs_controller_delete, SPDK_RPC_RUNTIME) ++ ++struct rpc_fs_controller_list { ++ char *name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_fs_controller_list[] = { ++ {"name", offsetof(struct rpc_fs_controller_list, name), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_ssam_fs_controller_list(struct rpc_fs_controller_list *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_fs_controller_list(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_fs_controller_list req = { ++ .name = NULL, ++ }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params != NULL) { ++ rc = spdk_json_decode_object(params, g_rpc_fs_controller_list, ++ SPDK_COUNTOF(g_rpc_fs_controller_list), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ if (req.name != NULL) { ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ rc = rpc_ssam_show_fs_controllers(request, gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_fs_controller_list(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_fs_controller_list(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++ ++SPDK_RPC_REGISTER("fs_controller_list", rpc_ssam_fs_controller_list, SPDK_RPC_RUNTIME) ++ ++struct rpc_controller_get_fs_iostat { ++ char *name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_controller_get_fs_iostat_decoder[] = { ++ {"name", offsetof(struct rpc_controller_get_fs_iostat, name), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_ssam_controller_get_fs_iostat(struct rpc_controller_get_fs_iostat *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static int ++rpc_ssam_show_fs_iostat(struct spdk_jsonrpc_request *request, uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_array_begin(w, "fs_controllers"); ++ ssam_lock(); ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ if (smsession->backend->show_iostat_json != NULL && smsession->backend->type == VIRTIO_TYPE_FS && ++ (gfunc_id == SPDK_INVALID_GFUNC_ID || smsession->gfunc_id == gfunc_id)) { ++ smsession->backend->show_iostat_json(smsession, SPDK_SSAM_SCSI_CTRLR_MAX_DEVS, w); ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_json_write_object_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++} ++ ++static void ++rpc_ssam_controller_get_fs_iostat(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_controller_get_fs_iostat req = { ++ .name = NULL, ++ }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params != NULL) { ++ rc = spdk_json_decode_object(params, g_rpc_controller_get_fs_iostat_decoder, ++ SPDK_COUNTOF(g_rpc_controller_get_fs_iostat_decoder), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ if (req.name != NULL) { ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ rc = rpc_ssam_show_fs_iostat(request, gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_controller_get_fs_iostat(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_controller_get_fs_iostat(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("fs_device_iostat", rpc_ssam_controller_get_fs_iostat, SPDK_RPC_RUNTIME) ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_rpc) +diff --git a/lib/ssam/ssam_scsi.c b/lib/ssam/ssam_scsi.c +new file mode 100644 +index 0000000..4cfd453 +--- /dev/null ++++ b/lib/ssam/ssam_scsi.c +@@ -0,0 +1,2444 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "linux/virtio_scsi.h" ++ ++#include "spdk/stdinc.h" ++ ++ ++#include "spdk/likely.h" ++#include "spdk/scsi_spec.h" ++#include "spdk/env.h" ++#include "spdk/scsi.h" ++#include "spdk/ssam.h" ++#include "spdk/string.h" ++#include "spdk/bdev_module.h" ++ ++#include "ssam_internal.h" ++ ++#define SESSION_STOP_POLLER_PERIOD 1000 ++#define IOV_HEADER_TAIL_NUM 2 ++#define PAYLOAD_SIZE_MAX (2048U * 2048) ++#define VMIO_TYPE_VIRTIO_SCSI_CTRL 4 ++#define SSAM_SPDK_SCSI_DEV_MAX_LUN 1 ++#define SSAM_SENSE_DATE_LEN 32 ++ ++/* Features supported by virtio-scsi lib. */ ++#define SPDK_SSAM_SCSI_FEATURES (SPDK_SSAM_FEATURES | \ ++ (1ULL << VIRTIO_SCSI_F_INOUT) | \ ++ (1ULL << VIRTIO_SCSI_F_HOTPLUG) | \ ++ (1ULL << VIRTIO_SCSI_F_CHANGE) | \ ++ (1ULL << VIRTIO_SCSI_F_T10_PI)) ++ ++/* Features that are specified in VIRTIO SCSI but currently not supported: ++ * - Live migration not supported yet ++ * - T10 PI ++ */ ++#define SPDK_SSAM_SCSI_DISABLED_FEATURES (SPDK_SSAM_DISABLED_FEATURES | \ ++ (1ULL << VIRTIO_SCSI_F_T10_PI)) ++ ++/* ssam-user-scsi support protocol features */ ++#define SPDK_SSAM_SCSI_PROTOCOL_FEATURES (1ULL << SSAM_USER_PROTOCOL_F_INFLIGHT_SHMFD) ++ ++enum spdk_scsi_dev_ssam_status { ++ /* Target ID is empty. */ ++ SSAM_SCSI_DEV_EMPTY, ++ ++ /* Target is still being added. */ ++ SSAM_SCSI_DEV_ADDING, ++ ++ /* Target ID occupied. */ ++ SSAM_SCSI_DEV_PRESENT, ++ ++ /* Target ID is occupied but removal is in progress. */ ++ SSAM_SCSI_DEV_REMOVING, ++ ++ /* In session - device (SCSI target) seen but removed. */ ++ SSAM_SCSI_DEV_REMOVED, ++}; ++ ++struct ssam_scsi_stat { ++ uint64_t count; ++ uint64_t total_tsc; /* pre_dma <- -> post_return */ ++ uint64_t dma_tsc; /* pre_dma <- -> post_dma */ ++ uint64_t bdev_tsc; /* pre_bdev <- -> post_bdev */ ++ uint64_t bdev_submit_tsc; /* <- spdk_bdev_xxx -> */ ++ uint64_t complete_tsc; /* pre_return <- -> post_return */ ++ uint64_t internel_tsc; /* total_tsc - dma_tsc - bdev_tsc - complete_tsc */ ++ ++ uint64_t complete_read_ios; /* Number of successfully completed read requests */ ++ uint64_t err_read_ios; /* Number of failed completed read requests */ ++ uint64_t complete_write_ios; /* Number of successfully completed write requests */ ++ uint64_t err_write_ios; /* Number of failed completed write requests */ ++ uint64_t flush_ios; /* Total number of flush requests */ ++ uint64_t complete_flush_ios; /* Number of successfully completed flush requests */ ++ uint64_t err_flush_ios; /* Number of failed completed flush requests */ ++ uint64_t fatal_ios; ++ uint64_t io_retry; ++ ++ uint64_t start_count; ++ uint64_t dma_count; ++ uint64_t dma_complete_count; ++ uint64_t bdev_count; ++ uint64_t bdev_complete_count; ++}; ++ ++struct spdk_scsi_dev_io_state { ++ struct spdk_bdev_io_stat stat; ++ uint64_t submit_tsc; ++ struct ssam_scsi_stat scsi_stat; ++}; ++ ++/* Context for a SCSI target in a ssam device */ ++struct spdk_scsi_dev_ssam_state { ++ struct spdk_scsi_dev_io_state *io_stat[SSAM_SPDK_SCSI_DEV_MAX_LUN]; ++ struct spdk_scsi_dev *dev; ++ ++ enum spdk_scsi_dev_ssam_status status; ++ ++ uint64_t flight_io; ++}; ++ ++struct ssam_scsi_tgt_hotplug_ctx { ++ unsigned scsi_tgt_num; ++}; ++ ++struct spdk_ssam_scsi_session { ++ struct spdk_ssam_session smsession; ++ int ref; ++ bool registered; ++ struct spdk_poller *stop_poller; ++ struct spdk_scsi_dev_ssam_state scsi_dev_state[SPDK_SSAM_SCSI_CTRLR_MAX_DEVS]; ++ char *dbdf; ++}; ++ ++struct ssam_scsi_session_ctx { ++ struct spdk_ssam_scsi_session *ssmsession; ++ void **user_ctx; ++}; ++ ++struct ssam_scsi_task_stat { ++ uint64_t start_tsc; ++ uint64_t dma_start_tsc; ++ uint64_t dma_end_tsc; ++ uint64_t bdev_start_tsc; ++ uint64_t bdev_func_tsc; ++ uint64_t bdev_end_tsc; ++ uint64_t complete_start_tsc; ++ uint64_t complete_end_tsc; ++}; ++ ++struct spdk_ssam_scsi_task { ++ struct spdk_scsi_task scsi_task; ++ /* Returned status of I/O processing, it can be VIRTIO_BLK_S_OK, ++ * VIRTIO_BLK_S_IOERR or VIRTIO_BLK_S_UNSUPP ++ */ ++ union { ++ struct virtio_scsi_cmd_resp resp; ++ struct virtio_scsi_ctrl_tmf_resp tmf_resp; ++ }; ++ ++ /* Number of bytes processed successfully */ ++ uint32_t used_len; ++ ++ /* Records the amount of valid data in the struct iovec iovs array. */ ++ uint32_t iovcnt; ++ struct ssam_iovec iovs; ++ ++ /* If set, the task is currently used for I/O processing. */ ++ bool used; ++ ++ /* For bdev io wait */ ++ struct spdk_ssam_scsi_session *ssmsession; ++ struct spdk_ssam_session_io_wait session_io_wait; ++ ++ /* ssam request data */ ++ struct ssam_request *io_req; ++ ++ uint16_t vq_idx; ++ uint16_t task_idx; ++ int32_t tgt_id; ++ struct spdk_ssam_session *smsession; ++ struct spdk_scsi_dev *scsi_dev; ++ struct ssam_scsi_task_stat task_stat; ++}; ++ ++struct ssam_add_tgt_ev_ctx { ++ char *bdev_name; ++ int tgt_num; ++}; ++ ++static void ssam_scsi_request_worker(struct spdk_ssam_session *smsession, void *arg); ++static void ssam_scsi_destroy_bdev_device(struct spdk_ssam_session *smsession, void *args); ++static void ssam_scsi_response_worker(struct spdk_ssam_session *smsession, void *arg); ++static int ssam_scsi_remove_session(struct spdk_ssam_session *smsession); ++static void ssam_scsi_remove_self(struct spdk_ssam_session *smsession); ++static void ssam_scsi_dump_info_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++static void ssam_scsi_write_config_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++static int ssam_scsi_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues); ++static void ssam_scsi_show_iostat_json(struct spdk_ssam_session *smsession, uint32_t id, ++ struct spdk_json_write_ctx *w); ++static void ssam_scsi_clear_iostat_json(struct spdk_ssam_session *smsession); ++static void ssam_scsi_print_stuck_io_info(struct spdk_ssam_session *smsession); ++static void ssam_scsi_req_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, ++ uint8_t status); ++static struct spdk_bdev *ssam_scsi_get_bdev(struct spdk_ssam_session *smsession, uint32_t id); ++ ++static void ssam_free_scsi_task_pool(struct spdk_ssam_scsi_session *ssmsession); ++static int ssam_scsi_dev_hot_remove_tgt(struct spdk_ssam_session *smsession, void **_ctx); ++static void ssam_scsi_process_io_task(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_scsi_task *task); ++static int ssam_scsi_task_iovs_memory_get(struct spdk_ssam_scsi_task *task, uint32_t payload_size); ++static void ssam_scsi_submit_io_task(struct spdk_ssam_scsi_task *task); ++static void ssam_scsi_destruct_tgt(struct spdk_ssam_scsi_session *ssmsession, int scsi_tgt_num); ++ ++static const struct spdk_ssam_session_backend g_ssam_scsi_session_backend = { ++ .type = VIRTIO_TYPE_SCSI, ++ .request_worker = ssam_scsi_request_worker, ++ .destroy_bdev_device = ssam_scsi_destroy_bdev_device, ++ .response_worker = ssam_scsi_response_worker, ++ .remove_session = ssam_scsi_remove_session, ++ .remove_self = ssam_scsi_remove_self, ++ .print_stuck_io_info = ssam_scsi_print_stuck_io_info, ++ .dump_info_json = ssam_scsi_dump_info_json, ++ .write_config_json = ssam_scsi_write_config_json, ++ .ssam_get_config = ssam_scsi_get_config, ++ .show_iostat_json = ssam_scsi_show_iostat_json, ++ .clear_iostat_json = ssam_scsi_clear_iostat_json, ++ .get_bdev = ssam_scsi_get_bdev, ++}; ++ ++static void ++ssam_scsi_task_stat_tick(uint64_t *tsc) ++{ ++#ifdef PERF_STAT ++ *tsc = spdk_get_ticks(); ++#endif ++ return; ++} ++ ++static void ++ssam_scsi_stat_statistics(struct spdk_ssam_scsi_task *task) ++{ ++#ifdef PERF_STAT ++ if (task->scsi_task.lun == NULL || task->io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL || ++ task->task_stat.bdev_func_tsc == 0 || task->task_stat.bdev_end_tsc == 0) { ++ return; ++ } ++ ++ int32_t lun_id = spdk_scsi_lun_get_id(task->scsi_task.lun); ++ struct ssam_scsi_stat *scsi_stat = ++ &task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[lun_id]->scsi_stat; ++ ++ uint64_t dma_tsc = task->task_stat.dma_end_tsc - task->task_stat.dma_start_tsc; ++ uint64_t bdev_tsc = task->task_stat.bdev_end_tsc - task->task_stat.bdev_start_tsc; ++ uint64_t bdev_submit_tsc = task->task_stat.bdev_func_tsc - task->task_stat.bdev_start_tsc; ++ uint64_t complete_tsc = task->task_stat.complete_end_tsc - task->task_stat.complete_start_tsc; ++ uint64_t total_tsc = task->task_stat.complete_end_tsc - task->task_stat.start_tsc; ++ ++ struct ssam_io_message *io_cmd = &task->io_req->req.cmd; ++ if (io_cmd->writable) { /* read io */ ++ if (task->scsi_task.status == SPDK_SCSI_STATUS_GOOD) { ++ scsi_stat->complete_read_ios++; ++ } else { ++ scsi_stat->err_read_ios++; ++ } ++ } else { ++ if (task->scsi_task.status == SPDK_SCSI_STATUS_GOOD) { ++ scsi_stat->complete_write_ios++; ++ } else { ++ scsi_stat->err_write_ios++; ++ } ++ } ++ ++ scsi_stat->dma_tsc += dma_tsc; ++ scsi_stat->bdev_tsc += bdev_tsc; ++ scsi_stat->bdev_submit_tsc += bdev_submit_tsc; ++ scsi_stat->complete_tsc += complete_tsc; ++ scsi_stat->total_tsc += total_tsc; ++ scsi_stat->internel_tsc += total_tsc - complete_tsc - bdev_tsc - dma_tsc; ++ scsi_stat->count += 1; ++#endif ++} ++ ++static uint32_t ++ssam_scsi_tgtid_to_lunid(uint32_t tgt_id) ++{ ++ return (((tgt_id) << 0x8) | SSAM_VIRTIO_SCSI_LUN_ID); ++} ++ ++static int ++ssam_scsi_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues) ++{ ++ struct virtio_scsi_config scsi_cfg; ++ scsi_cfg.num_queues = 0x80; ++ scsi_cfg.seg_max = 0x6f; ++ scsi_cfg.max_sectors = 0x1ff; ++ scsi_cfg.cmd_per_lun = 0x80; ++ scsi_cfg.event_info_size = 0; ++ scsi_cfg.sense_size = 0x60; ++ scsi_cfg.cdb_size = 0x20; ++ scsi_cfg.max_channel = 0; ++ scsi_cfg.max_target = SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; ++ scsi_cfg.max_lun = 0xff; ++ ++ memcpy(config, (void *)&scsi_cfg, sizeof(struct virtio_scsi_config)); ++ return 0; ++} ++ ++static int ++ssam_scsi_send_event(struct spdk_ssam_session *smsession, unsigned scsi_dev_num, ++ uint32_t event, uint32_t reason) ++{ ++ struct virtio_scsi_event vscsi_event = {0}; ++ int ret; ++ ++ vscsi_event.event = event; ++ vscsi_event.reason = reason; ++ ++ vscsi_event.lun[0] = 1; ++ vscsi_event.lun[0x1] = (uint8_t)scsi_dev_num; ++ vscsi_event.lun[0x2] = 0; ++ vscsi_event.lun[0x3] = 0; ++ memset(&vscsi_event.lun[0x4], 0, 0x4); ++ ++ ret = ssam_send_action(smsession->gfunc_id, SSAM_FUNCTION_ACTION_SCSI_EVENT, ++ (const void *)&vscsi_event, sizeof(struct virtio_scsi_event)); ++ if (ret < 0) { ++ SPDK_ERRLOG("%s: SCSI target %d send event %u(reason %u) failed: %s.\n", ++ smsession->name, scsi_dev_num, event, reason, strerror(-ret)); ++ } ++ return ret; ++} ++ ++static void ++ssam_scsi_stop_cpl_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ spdk_ssam_session_rsp_fn rsp_fn = smsession->rsp_fn; ++ void *rsp_ctx = smsession->rsp_ctx; ++ ++ SPDK_NOTICELOG("SCSI controller %s deleted\n", smsession->name); ++ ++ if (smsession->name != NULL) { ++ free(smsession->name); ++ smsession->name = NULL; ++ } ++ ++ if (ssmsession->dbdf != NULL) { ++ free(ssmsession->dbdf); ++ ssmsession->dbdf = NULL; ++ } ++ ++ ssam_set_session_be_freed(ctx); ++ memset(ssmsession, 0, sizeof(*ssmsession)); ++ free(ssmsession); ++ ++ if (rsp_fn != NULL) { ++ rsp_fn(rsp_ctx, 0); ++ rsp_fn = NULL; ++ } ++} ++ ++static void ++ssam_scsi_destroy_session(struct ssam_scsi_session_ctx *ctx) ++{ ++ struct spdk_ssam_session *smsession = &ctx->ssmsession->smsession; ++ struct spdk_ssam_scsi_session *ssmsession = ctx->ssmsession; ++ ++ if (smsession->task_cnt > 0) { ++ return; ++ } ++ ++ if (ssmsession->ref > 0) { ++ return; ++ } ++ ++ ssam_session_destroy(smsession); ++ ++ ssmsession->registered = false; ++ spdk_poller_unregister(&ssmsession->stop_poller); ++ ssam_free_scsi_task_pool(ssmsession); ++ ssam_session_stop_done(&ssmsession->smsession, 0, ctx->user_ctx); ++ free(ctx); ++ ++ return; ++} ++ ++static int ++ssam_scsi_destroy_session_poller_cb(void *arg) ++{ ++ struct ssam_scsi_session_ctx *ctx = arg; ++ ++ if (ssam_trylock() != 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ ssam_scsi_destroy_session(ctx); ++ ++ ssam_unlock(); ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static int ++ssam_scsi_stop_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct ssam_scsi_session_ctx *_ctx = ++ (struct ssam_scsi_session_ctx *)calloc(1, sizeof(struct ssam_scsi_session_ctx)); ++ ++ if (_ctx == NULL) { ++ SPDK_ERRLOG("%s: calloc scsi session ctx error.\n", smsession->name); ++ return -ENOMEM; ++ } ++ ++ _ctx->ssmsession = ssmsession; ++ _ctx->user_ctx = ctx; ++ ++ ssmsession->stop_poller = SPDK_POLLER_REGISTER(ssam_scsi_destroy_session_poller_cb, ++ _ctx, SESSION_STOP_POLLER_PERIOD); ++ if (ssmsession->stop_poller == NULL) { ++ SPDK_ERRLOG("%s: ssam_destroy_session_poller_cb start failed.\n", smsession->name); ++ ssam_session_stop_done(smsession, -EBUSY, ctx); ++ free(_ctx); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_scsi_stop(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = true, ++ .need_rsp = true, ++ }; ++ return ssam_send_event_to_session(smsession, ssam_scsi_stop_cb, ssam_scsi_stop_cpl_cb, ++ send_event_flag, NULL); ++} ++ ++/* sync interface for hot-remove */ ++static void ++ssam_scsi_remove_self(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ /* no need error */ ++ if (ssmsession->ref > 0) { ++ return; /* still have targets */ ++ } ++ ++ SPDK_NOTICELOG("%s: is being freed\n", smsession->name); ++ ++ ssmsession->registered = false; ++ ssam_free_scsi_task_pool(ssmsession); ++ ++ ssam_sessions_remove(smsession->smdev->smsessions, smsession); ++ ++ if (smsession->smdev->active_session_num > 0) { ++ smsession->smdev->active_session_num--; ++ } ++ smsession->smdev = NULL; ++ /* free smsession */ ++ free(smsession->name); ++ free(ssmsession->dbdf); ++ free(ssmsession); ++} ++ ++/* async interface */ ++static int ++ssam_scsi_remove_session(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ int ret; ++ ++ if (smsession->registered && ssmsession->ref != 0) { ++ SPDK_ERRLOG("%s: SCSI target %d is still present.\n", smsession->name, ssmsession->ref); ++ return -EBUSY; ++ } ++ ++ ret = ssam_scsi_stop(smsession); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static struct spdk_scsi_dev * ++ssam_scsi_dev_get_tgt(struct spdk_ssam_scsi_session *ssmsession, uint8_t num) ++{ ++ if (ssmsession == NULL) { ++ SPDK_ERRLOG("ssmsession is null.\n"); ++ return NULL; ++ } ++ if (num >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: tgt num %u over %u.\n", ssmsession->smsession.name, num, ++ SPDK_SSAM_SCSI_CTRLR_MAX_DEVS); ++ return NULL; ++ } ++ if (ssmsession->scsi_dev_state[num].status != SSAM_SCSI_DEV_PRESENT) { ++ return NULL; ++ } ++ ++ if (ssmsession->scsi_dev_state[num].dev == NULL) { ++ SPDK_ERRLOG("%s: no tgt num %u device.\n", ssmsession->smsession.name, num); ++ return NULL; ++ } ++ return ssmsession->scsi_dev_state[num].dev; ++} ++ ++static void ++ssam_scsi_dump_device_info(struct spdk_ssam_session *smsession, struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev *sdev; ++ struct spdk_scsi_lun *lun; ++ int32_t tgt_id; ++ ++ spdk_json_write_named_array_begin(w, "scsi_targets"); ++ for (tgt_id = 0; tgt_id < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; tgt_id++) { ++ sdev = ssam_scsi_dev_get_tgt(ssmsession, tgt_id); ++ if (!sdev) { ++ continue; ++ } ++ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_uint32(w, "scsi_target_num", tgt_id); ++ spdk_json_write_named_uint32(w, "id", spdk_scsi_dev_get_id(sdev)); ++ spdk_json_write_named_string(w, "target_name", spdk_scsi_dev_get_name(sdev)); ++ lun = spdk_scsi_dev_get_lun(sdev, 0); ++ if (!lun) { ++ continue; ++ } ++ spdk_json_write_named_string(w, "bdev_name", spdk_scsi_lun_get_bdev_name(lun)); ++ ++ spdk_json_write_object_end(w); ++ } ++ ++ spdk_json_write_array_end(w); ++} ++ ++static void ++ssam_scsi_dump_info_json(struct spdk_ssam_session *smsession, struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_string(w, "dbdf", ssmsession->dbdf); ++ spdk_json_write_named_string(w, "name", ssam_session_get_name(smsession)); ++ spdk_json_write_named_uint32(w, "function_id", (uint32_t)smsession->gfunc_id); ++ spdk_json_write_named_uint32(w, "queues", (uint32_t)smsession->max_queues); ++ spdk_json_write_named_string(w, "ctrlr", ssam_dev_get_name(smsession->smdev)); ++ spdk_json_write_named_string_fmt(w, "cpumask", "0x%s", ++ spdk_cpuset_fmt(spdk_thread_get_cpumask(smsession->smdev->thread))); ++ ++ ssam_scsi_dump_device_info(smsession, w); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_scsi_write_config_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev *sdev; ++ struct spdk_scsi_lun *lun; ++ int32_t tgt_id; ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "method", "create_scsi_controller"); ++ ++ spdk_json_write_named_object_begin(w, "params"); ++ spdk_json_write_named_string(w, "dbdf", ssmsession->dbdf); ++ spdk_json_write_named_string(w, "name", smsession->name); ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++ ++ for (tgt_id = 0; tgt_id < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; tgt_id++) { ++ sdev = ssam_scsi_dev_get_tgt(ssmsession, tgt_id); ++ if (!sdev) { ++ continue; ++ } ++ ++ lun = spdk_scsi_dev_get_lun(sdev, 0); ++ if (!lun) { ++ SPDK_ERRLOG("%s: no lun, continue.\n", smsession->name); ++ continue; ++ } ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "method", "scsi_controller_add_target"); ++ ++ spdk_json_write_named_object_begin(w, "params"); ++ spdk_json_write_named_string(w, "name", smsession->name); ++ spdk_json_write_named_uint32(w, "scsi_tgt_num", tgt_id); ++ ++ spdk_json_write_named_string(w, "bdev_name", spdk_scsi_lun_get_bdev_name(lun)); ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++ } ++} ++ ++static void ++ssam_scsi_show_tgt_iostat_json(struct spdk_ssam_scsi_session *ssmsession, ++ struct spdk_json_write_ctx *w, int32_t tgt_id, struct spdk_scsi_dev *sdev) ++{ ++ struct spdk_scsi_dev_io_state *io_stat; ++ struct spdk_scsi_lun *lun; ++ struct ssam_scsi_stat scsi_stat; ++ uint64_t ticks_hz = spdk_get_ticks_hz(); ++ uint64_t count; ++ uint64_t poll_count; ++ ++ lun = spdk_scsi_dev_get_lun(sdev, 0); ++ if (lun == NULL) { ++ return; ++ } ++ ++ io_stat = ssmsession->scsi_dev_state[tgt_id].io_stat[0]; ++ if (io_stat == NULL) { ++ SPDK_ERRLOG("No scsi iostat, tgt_id %d\n", tgt_id); ++ return; ++ } ++ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_uint32(w, "scsi_dev_num", tgt_id); ++ spdk_json_write_named_uint32(w, "id", spdk_scsi_dev_get_id(sdev)); ++ spdk_json_write_named_string(w, "target_name", spdk_scsi_dev_get_name(sdev)); ++ ++ memcpy(&scsi_stat, &io_stat->scsi_stat, sizeof(struct ssam_scsi_stat)); ++ ++ spdk_json_write_named_int32(w, "id", spdk_scsi_lun_get_id(lun)); ++ spdk_json_write_named_string(w, "bdev_name", spdk_scsi_lun_get_bdev_name(lun)); ++ spdk_json_write_named_uint64(w, "bytes_read", io_stat->stat.bytes_read); ++ spdk_json_write_named_uint64(w, "num_read_ops", io_stat->stat.num_read_ops); ++ spdk_json_write_named_uint64(w, "bytes_written", io_stat->stat.bytes_written); ++ spdk_json_write_named_uint64(w, "num_write_ops", io_stat->stat.num_write_ops); ++ spdk_json_write_named_uint64(w, "read_latency_ticks", io_stat->stat.read_latency_ticks); ++ spdk_json_write_named_uint64(w, "write_latency_ticks", io_stat->stat.write_latency_ticks); ++ ++ spdk_json_write_named_uint64(w, "complete_read_ios", scsi_stat.complete_read_ios); ++ spdk_json_write_named_uint64(w, "err_read_ios", scsi_stat.err_read_ios); ++ spdk_json_write_named_uint64(w, "complete_write_ios", scsi_stat.complete_write_ios); ++ spdk_json_write_named_uint64(w, "err_write_ios", scsi_stat.err_write_ios); ++ spdk_json_write_named_uint64(w, "flush_ios", scsi_stat.flush_ios); ++ spdk_json_write_named_uint64(w, "complete_flush_ios", scsi_stat.complete_flush_ios); ++ spdk_json_write_named_uint64(w, "err_flush_ios", scsi_stat.err_flush_ios); ++ spdk_json_write_named_uint64(w, "fatal_ios", scsi_stat.fatal_ios); ++ spdk_json_write_named_uint64(w, "io_retry", scsi_stat.io_retry); ++ ++ spdk_json_write_named_uint64(w, "start_count", scsi_stat.start_count); ++ spdk_json_write_named_uint64(w, "dma_count", scsi_stat.dma_count); ++ spdk_json_write_named_uint64(w, "dma_complete_count", scsi_stat.dma_complete_count); ++ spdk_json_write_named_uint64(w, "bdev_count", scsi_stat.bdev_count); ++ spdk_json_write_named_uint64(w, "bdev_complete_count", scsi_stat.bdev_complete_count); ++ spdk_json_write_named_uint64(w, "flight_io", ssmsession->scsi_dev_state[tgt_id].flight_io); ++ ++ if (scsi_stat.count == 0) { ++ count = 1; ++ } else { ++ count = scsi_stat.count; ++ } ++ ++ if (ssmsession->smsession.smdev->stat.poll_count == 0) { ++ poll_count = 1; ++ } else { ++ poll_count = ssmsession->smsession.smdev->stat.poll_count; ++ } ++ ++ spdk_json_write_named_string_fmt(w, "poll_lat", "%.9f", ++ (float)ssmsession->smsession.smdev->stat.poll_tsc / poll_count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "total_lat", "%.9f", ++ (float)scsi_stat.total_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "dma_lat", "%.9f", (float)scsi_stat.dma_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "bdev_lat", "%.9f", ++ (float)scsi_stat.bdev_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "bdev_submit_lat", "%.9f", ++ (float)scsi_stat.bdev_submit_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "complete_lat", "%.9f", ++ (float)scsi_stat.complete_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "internal_lat", "%.9f", ++ (float)scsi_stat.internel_tsc / count / ticks_hz); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_scsi_show_iostat_json(struct spdk_ssam_session *smsession, uint32_t id, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev *sdev; ++ int32_t tgt_id; ++ ++ if (id != SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ sdev = ssam_scsi_dev_get_tgt(ssmsession, id); ++ if (sdev != NULL) { ++ ssam_scsi_show_tgt_iostat_json(ssmsession, w, id, sdev); ++ } else { ++ spdk_json_write_object_begin(w); ++ spdk_json_write_object_end(w); ++ } ++ return; ++ } ++ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_uint32(w, "function_id", smsession->gfunc_id); ++ ++ spdk_json_write_named_array_begin(w, "scsi_target"); ++ ++ for (tgt_id = 0; tgt_id < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; tgt_id++) { ++ sdev = ssam_scsi_dev_get_tgt(ssmsession, tgt_id); ++ if (!sdev) { ++ continue; ++ } ++ ssam_scsi_show_tgt_iostat_json(ssmsession, w, tgt_id, sdev); ++ } ++ ++ spdk_json_write_array_end(w); ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_scsi_clear_iostat_json(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev_io_state *io_stat; ++ int32_t tgt_id; ++ int32_t lun_id; ++ for (tgt_id = 0; tgt_id < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; tgt_id++) { ++ for (lun_id = 0; lun_id < SSAM_SPDK_SCSI_DEV_MAX_LUN; lun_id++) { ++ io_stat = ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ if (io_stat == NULL) { ++ continue; ++ } ++ memset(io_stat, 0, sizeof(struct spdk_scsi_dev_io_state)); ++ } ++ } ++ return; ++} ++ ++static struct spdk_bdev * ++ssam_scsi_get_bdev(struct spdk_ssam_session *smsession, uint32_t tgt_id) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev *scsi_dev; ++ struct spdk_scsi_lun *scsi_lun = NULL; ++ const char *bdev_name = NULL; ++ if (tgt_id >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: tgt %d invalid\n", smsession->name, tgt_id); ++ return NULL; ++ } ++ if (ssmsession->scsi_dev_state[tgt_id].dev == NULL) { ++ SPDK_ERRLOG("%s: tgt %d not be created\n", smsession->name, tgt_id); ++ return NULL; ++ } ++ ++ scsi_dev = ssmsession->scsi_dev_state[tgt_id].dev; ++ /* lun id use 0 */ ++ scsi_lun = spdk_scsi_dev_get_lun(scsi_dev, 0); ++ if (scsi_lun == NULL) { ++ return NULL; ++ } ++ bdev_name = spdk_scsi_lun_get_bdev_name(scsi_lun); ++ if (bdev_name == NULL) { ++ return NULL; ++ } ++ return spdk_bdev_get_by_name(bdev_name); ++} ++ ++static int ++ssam_scsi_iostat_construct(struct spdk_ssam_scsi_session *ssmsession, int32_t tgt_id, ++ int *lun_id_list, int num_luns) ++{ ++ struct spdk_scsi_dev_io_state *io_stat; ++ int32_t lun_id; ++ int i; ++ ++ for (i = 0; i < num_luns; i++) { ++ lun_id = lun_id_list[i]; ++ io_stat = ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ if (io_stat != NULL) { ++ SPDK_ERRLOG("io_stat with tgt %d lun %d already exist\n", tgt_id, lun_id); ++ return -EEXIST; ++ } ++ ++ io_stat = calloc(1, sizeof(*io_stat)); ++ if (io_stat == NULL) { ++ SPDK_ERRLOG("Could not allocate io_stat for tgt %d lun %d\n", tgt_id, lun_id); ++ return -ENOMEM; ++ } ++ ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id] = io_stat; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_scsi_iostat_destruct(struct spdk_scsi_dev_ssam_state *state) ++{ ++ int32_t lun_id; ++ ++ for (lun_id = 0; lun_id < SSAM_SPDK_SCSI_DEV_MAX_LUN; lun_id++) { ++ if (state->io_stat[lun_id] != NULL) { ++ free(state->io_stat[lun_id]); ++ state->io_stat[lun_id] = NULL; ++ } ++ } ++ ++ return; ++} ++ ++static void ++ssam_remove_scsi_tgt(struct spdk_ssam_scsi_session *ssmsession, unsigned scsi_tgt_num) ++{ ++ struct spdk_scsi_dev_ssam_state *state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ struct spdk_ssam_session *smsession = &ssmsession->smsession; ++ spdk_ssam_session_rsp_fn rsp_fn = smsession->rsp_fn; ++ void *rsp_ctx = smsession->rsp_ctx; ++ ++ smsession->rsp_fn = NULL; ++ smsession->rsp_ctx = NULL; ++ ++ /* delete scsi port */ ++ spdk_scsi_dev_delete_port(state->dev, 0); ++ ++ /* destruct scsi dev */ ++ spdk_scsi_dev_destruct(state->dev, NULL, NULL); ++ state->dev = NULL; ++ ++ /* free iostat */ ++ ssam_scsi_iostat_destruct(state); ++ state->status = SSAM_SCSI_DEV_EMPTY; ++ ++ /* ref-- */ ++ if (ssmsession->ref > 0) { ++ ssmsession->ref--; ++ } else { ++ SPDK_ERRLOG("%s: ref internel error\n", smsession->name); ++ } ++ if (rsp_fn != NULL) { ++ rsp_fn(rsp_ctx, 0); ++ rsp_fn = NULL; ++ } ++ SPDK_NOTICELOG("%s: target %u is removed\n", smsession->name, scsi_tgt_num); ++} ++ ++static int ++ssam_scsi_get_payload_size(struct ssam_request *io_req, uint32_t *payload_size) ++{ ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ uint32_t payload = 0; ++ uint32_t first_vec; ++ uint32_t end_vec; ++ uint32_t loop; ++ ++ if (io_cmd->writable) { /* read io */ ++ /* FROM_DEV: [req][resp][write_buf]...[write_buf ]*, write_buf start at index 2 */ ++ first_vec = 2; ++ end_vec = io_cmd->iovcnt - 1; ++ } else { /* write io */ ++ first_vec = 1; ++ /* TO_DEV: [req][read_buf]...[read_buf][resp], read_buf last index is iovnt-2 */ ++ end_vec = io_cmd->iovcnt - 2; ++ } ++ ++ for (loop = first_vec; loop <= end_vec; loop++) { ++ if (spdk_unlikely((UINT32_MAX - io_cmd->iovs[loop].iov_len) < payload)) { ++ SPDK_ERRLOG("payload size overflow\n"); ++ return -1; ++ } ++ payload += io_cmd->iovs[loop].iov_len; ++ } ++ ++ if (spdk_unlikely(payload > PAYLOAD_SIZE_MAX)) { ++ SPDK_ERRLOG("payload size larger than %u, payload_size = %u\n", ++ PAYLOAD_SIZE_MAX, payload); ++ return -1; ++ } ++ ++ *payload_size = payload; ++ ++ return 0; ++} ++ ++static void ++ssam_session_io_resubmit(void *arg) ++{ ++ struct spdk_ssam_scsi_task *task = (struct spdk_ssam_scsi_task *)arg; ++ struct spdk_ssam_session *smsession = &task->ssmsession->smsession; ++ uint32_t payload_size = task->scsi_task.transfer_len; ++ int rc; ++ ++ rc = ssam_scsi_task_iovs_memory_get(task, payload_size); ++ if (rc != 0) { ++ ssam_session_insert_io_wait(smsession, &task->session_io_wait); ++ return; ++ } ++ ssam_scsi_process_io_task(smsession, task); ++} ++ ++static void ++ssam_scsi_task_init(struct spdk_ssam_scsi_task *task) ++{ ++ memset(&task->scsi_task, 0, sizeof(struct spdk_scsi_task)); ++ ++ task->used = true; ++ task->iovcnt = 0; ++ task->io_req = NULL; ++ task->session_io_wait.cb_fn = ssam_session_io_resubmit; ++ task->session_io_wait.cb_arg = task; ++} ++ ++static void ++ssam_scsi_task_dma_request_para(struct ssam_dma_request *data_request, ++ struct spdk_ssam_scsi_task *task, ++ uint32_t type, uint8_t status) ++{ ++ struct spdk_scsi_task *scsi_task = &task->scsi_task; ++ struct ssam_io_message *io_cmd = NULL; ++ struct spdk_ssam_dma_cb dma_cb = { ++ .status = status, ++ .req_dir = type, ++ .gfunc_id = task->io_req->gfunc_id, ++ .vq_idx = task->vq_idx, ++ .task_idx = task->task_idx ++ }; ++ ++ io_cmd = &task->io_req->req.cmd; ++ data_request->cb = (void *) * (uint64_t *)&dma_cb; ++ data_request->gfunc_id = task->io_req->gfunc_id; ++ data_request->flr_seq = task->io_req->flr_seq; ++ data_request->direction = type; ++ data_request->data_len = scsi_task->transfer_len; ++ if (type == SSAM_REQUEST_DATA_STORE) { ++ data_request->src = task->iovs.phys.sges; ++ data_request->src_num = task->iovcnt; ++ /* FROM_DEV: [req][resp][write_buf]...[write_buf ]*, write_buf start at index 2 */ ++ data_request->dst = &io_cmd->iovs[2]; ++ /* dma data iovs does not contain header and tail */ ++ data_request->dst_num = io_cmd->iovcnt - IOV_HEADER_TAIL_NUM; ++ } else if (type == SSAM_REQUEST_DATA_LOAD) { ++ data_request->src = &io_cmd->iovs[1]; ++ /* dma data iovs does not contain header and tail */ ++ data_request->src_num = io_cmd->iovcnt - IOV_HEADER_TAIL_NUM; ++ data_request->dst = task->iovs.phys.sges; ++ data_request->dst_num = task->iovcnt; ++ } ++} ++ ++static void ++ssam_scsi_task_finish(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_ssam_session *smsession = task->smsession; ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[task->vq_idx]; ++ ++ if (smsession->task_cnt == 0) { ++ SPDK_ERRLOG("%s: task count internel error\n", smsession->name); ++ return; ++ } ++ ++ task->io_req = NULL; ++ ++ if (task->iovs.virt.sges[0].iov_base != NULL) { ++ ssam_mempool_free(smsession->mp, task->iovs.virt.sges[0].iov_base); ++ task->iovs.virt.sges[0].iov_base = NULL; ++ } ++ ++ memset(&task->iovs, 0, sizeof(task->iovs)); ++ ++ task->iovcnt = 0; ++ smsession->task_cnt--; ++ task->used = false; ++ vq->index[vq->index_l] = task->task_idx; ++ vq->index_l = (vq->index_l + 1) & 0xFF; ++ vq->use_num--; ++} ++ ++static int ++ssam_scsi_io_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, void *rsp_buf, ++ uint32_t rsp_len) ++{ ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ struct ssam_virtio_res *virtio_res = NULL; ++ struct ssam_io_response io_resp; ++ struct iovec io_vec; ++ int rc; ++ ++ memset(&io_resp, 0, sizeof(io_resp)); ++ io_resp.gfunc_id = io_req->gfunc_id; ++ io_resp.iocb_id = io_req->iocb_id; ++ io_resp.status = io_req->status; ++ io_resp.req = io_req; ++ io_resp.flr_seq = io_req->flr_seq; ++ ++ virtio_res = (struct ssam_virtio_res *)&io_resp.data; ++ virtio_res->iovs = &io_vec; ++ if (io_cmd->writable) { /* FROM_DEV: [req][resp][write_buf]...[write_buf ] */ ++ virtio_res->iovs->iov_base = io_cmd->iovs[1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[1].iov_len; ++ } else { /* TO_DEV: [req][read_buf]...[read_buf][resp] */ ++ virtio_res->iovs->iov_base = io_cmd->iovs[io_cmd->iovcnt - 1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[io_cmd->iovcnt - 1].iov_len; ++ } ++ virtio_res->iovcnt = 1; ++ virtio_res->rsp = rsp_buf; ++ virtio_res->rsp_len = rsp_len; ++ ++ rc = ssam_io_complete(smdev->tid, &io_resp); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ ssam_dev_io_dec(smdev); ++ return 0; ++} ++ ++struct ssam_scsi_req_complete_arg { ++ struct spdk_ssam_dev *smdev; ++ struct ssam_request *io_req; ++ uint8_t status; ++}; ++ ++static void ++ssam_scsi_req_complete_cb(void *arg) ++{ ++ struct ssam_scsi_req_complete_arg *cb_arg = (struct ssam_scsi_req_complete_arg *)arg; ++ struct virtio_scsi_cmd_resp resp = {0}; ++ struct virtio_scsi_ctrl_tmf_resp tmf_resp = {0}; ++ int rc; ++ ++ if (spdk_unlikely(cb_arg->io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ tmf_resp.response = cb_arg->status; ++ rc = ssam_scsi_io_complete(cb_arg->smdev, cb_arg->io_req, &tmf_resp, ++ sizeof(struct virtio_scsi_ctrl_tmf_resp)); ++ } else { ++ resp.response = cb_arg->status; ++ rc = ssam_scsi_io_complete(cb_arg->smdev, cb_arg->io_req, &resp, ++ sizeof(struct virtio_scsi_cmd_resp)); ++ } ++ ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_scsi_req_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_scsi_req_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, uint8_t status) ++{ ++ struct virtio_scsi_cmd_resp resp = {0}; ++ struct virtio_scsi_ctrl_tmf_resp tmf_resp = {0}; ++ int rc; ++ ++ if (spdk_unlikely(io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ tmf_resp.response = status; ++ rc = ssam_scsi_io_complete(smdev, io_req, &tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); ++ } else { ++ resp.response = status; ++ rc = ssam_scsi_io_complete(smdev, io_req, &resp, sizeof(struct virtio_scsi_cmd_resp)); ++ } ++ ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_scsi_req_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_scsi_req_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smdev; ++ cb_arg->io_req = io_req; ++ cb_arg->status = status; ++ io_wait_r->cb_fn = ssam_scsi_req_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smdev, io_wait_r); ++ } ++} ++ ++static void ++ssam_scsi_task_put(struct spdk_ssam_scsi_task *task) ++{ ++ memset(&task->resp, 0, sizeof(task->resp)); ++ if (task->io_req->type != VMIO_TYPE_VIRTIO_SCSI_CTRL) { ++ task->ssmsession->scsi_dev_state[task->tgt_id].flight_io--; ++ } ++ spdk_scsi_task_put(&task->scsi_task); ++} ++ ++static void ++ssam_scsi_submit_completion_cb(void *arg) ++{ ++ struct spdk_ssam_scsi_task *task = (struct spdk_ssam_scsi_task *)arg; ++ struct spdk_ssam_session *smsession = task->smsession; ++ int rc; ++ ++ if (spdk_unlikely(task->io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ rc = ssam_scsi_io_complete(smsession->smdev, task->io_req, &task->tmf_resp, ++ sizeof(struct virtio_scsi_ctrl_tmf_resp)); ++ } else { ++ rc = ssam_scsi_io_complete(smsession->smdev, task->io_req, &task->resp, ++ sizeof(struct virtio_scsi_cmd_resp)); ++ } ++ ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_scsi_submit_completion_cb; ++ io_wait_r->cb_arg = task; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_scsi_task_stat_tick(&task->task_stat.complete_end_tsc); ++ ssam_scsi_stat_statistics(task); ++ ++ /* after spdk_task_construct called, put task */ ++ ssam_scsi_task_put(task); ++} ++ ++static void ++ssam_scsi_submit_completion(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_ssam_session *smsession = task->smsession; ++ struct ssam_request *io_req = task->io_req; ++ int rc; ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.complete_start_tsc); ++ if (spdk_unlikely(io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ rc = ssam_scsi_io_complete(smsession->smdev, io_req, &task->tmf_resp, ++ sizeof(struct virtio_scsi_ctrl_tmf_resp)); ++ } else { ++ rc = ssam_scsi_io_complete(smsession->smdev, io_req, &task->resp, ++ sizeof(struct virtio_scsi_cmd_resp)); ++ } ++ ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_scsi_submit_completion_cb; ++ io_wait_r->cb_arg = task; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_scsi_task_stat_tick(&task->task_stat.complete_end_tsc); ++ ssam_scsi_stat_statistics(task); ++ ++ /* after spdk_task_construct called, put task */ ++ ssam_scsi_task_put(task); ++} ++ ++struct ssam_scsi_dma_data_request_arg { ++ struct spdk_ssam_dev *smdev; ++ struct spdk_ssam_scsi_task *task; ++ struct ssam_dma_request dma_req; ++}; ++ ++static void ++ssam_scsi_dma_data_request_cb(void *arg) ++{ ++ struct ssam_scsi_dma_data_request_arg *cb_arg = (struct ssam_scsi_dma_data_request_arg *)arg; ++ int ret = ssam_dma_data_request(cb_arg->smdev->tid, &cb_arg->dma_req); ++ if (ret == -ENOMEM || ret == -EIO) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_scsi_dma_data_request_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ if (ret < 0) { ++ SPDK_ERRLOG("ssam dma data request failed(%d)\n", ret); ++ cb_arg->task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(cb_arg->task); ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_scsi_task_dma_request(struct spdk_ssam_scsi_task *task, enum data_request_dma_type data_dir) ++{ ++ struct spdk_ssam_session *smsession = task->smsession; ++ struct ssam_dma_request data_request = {0}; ++ int ret = 0; ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.dma_start_tsc); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.dma_count++; ++ ++ switch (data_dir) { ++ case SSAM_REQUEST_DATA_STORE: ++ ssam_scsi_task_dma_request_para(&data_request, task, SSAM_REQUEST_DATA_STORE, 0); ++ ++ /* dma request: ipu -> Host */ ++ ret = ssam_dma_data_request(smsession->smdev->tid, &data_request); ++ break; ++ ++ case SSAM_REQUEST_DATA_LOAD: ++ ssam_scsi_task_dma_request_para(&data_request, task, SSAM_REQUEST_DATA_LOAD, 0); ++ ++ /* dma request: Host -> ipu */ ++ ret = ssam_dma_data_request(smsession->smdev->tid, &data_request); ++ break; ++ ++ default: ++ SPDK_ERRLOG("Invalid data dir: %u.\n", data_dir); ++ break; ++ } ++ ++ if (ret == -ENOMEM || ret == -EIO) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_scsi_dma_data_request_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_scsi_dma_data_request_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->dma_req = data_request; ++ cb_arg->task = task; ++ io_wait_r->cb_fn = ssam_scsi_dma_data_request_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ++ if (ret < 0) { ++ SPDK_ERRLOG("ssam dma data request failed(%d)\n", ret); ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ } ++} ++ ++static void ++ssam_scsi_task_copy_resp(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_scsi_task *scsi_task = &task->scsi_task; ++ ++ if (spdk_unlikely(task->io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ task->tmf_resp.response = scsi_task->status; ++ } else { ++ task->resp.status = scsi_task->status; ++ if (spdk_unlikely(scsi_task->sense_data_len > SSAM_SENSE_DATE_LEN)) { ++ return; ++ } ++ if (scsi_task->status != SPDK_SCSI_STATUS_GOOD) { ++ memcpy(task->resp.sense, scsi_task->sense_data, scsi_task->sense_data_len); ++ task->resp.sense_len = scsi_task->sense_data_len; ++ } ++ ++ if (scsi_task->transfer_len != scsi_task->length) { ++ SPDK_ERRLOG("task transfer_len(%u) not equal length(%u), internel error.\n", ++ scsi_task->transfer_len, scsi_task->length); ++ } ++ ++ task->resp.resid = scsi_task->length - scsi_task->data_transferred; ++ } ++} ++ ++static void ++ssam_scsi_read_task_cpl_cb(struct spdk_scsi_task *scsi_task) ++{ ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the task and bdev_io memory may have been released. ++ * Therefore, task and bdev_io are not released in this scenario. ++ */ ++ return; ++ } ++ struct spdk_ssam_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_ssam_scsi_task, ++ scsi_task); ++ int32_t tgt_id = task->tgt_id; ++ int32_t lun_id = spdk_scsi_lun_get_id(scsi_task->lun); ++ struct spdk_scsi_dev_io_state *io_stat = task->ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ ++ /* Second part start of read */ ++ io_stat->submit_tsc = spdk_get_ticks(); ++ ++ ssam_scsi_task_copy_resp(task); ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.bdev_end_tsc); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.bdev_complete_count++; ++ ++ /* 1) Read request without data is no need to dma; ++ * 2) Read request failed just complete it. ++ */ ++ if (scsi_task->length == 0 || scsi_task->status != SPDK_SCSI_STATUS_GOOD) { ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ ++ /* Dma data from IPU to HOST */ ++ ssam_scsi_task_dma_request(task, SSAM_REQUEST_DATA_STORE); ++ ++ return; ++} ++ ++static void ++ssam_scsi_write_task_cpl_cb(struct spdk_scsi_task *scsi_task) ++{ ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the task and bdev_io memory may have been released. ++ * Therefore, task and bdev_io are not released in this scenario. ++ */ ++ return; ++ } ++ struct spdk_ssam_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_ssam_scsi_task, ++ scsi_task); ++ int32_t tgt_id = task->tgt_id; ++ int32_t lun_id = spdk_scsi_lun_get_id(scsi_task->lun); ++ struct spdk_scsi_dev_io_state *io_stat = task->ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ uint32_t payload_size = task->scsi_task.transfer_len; ++ ++ /* Second part start of write */ ++ io_stat->submit_tsc = spdk_get_ticks(); ++ ++ /* copy result from spdk_scsi_task to spdk_ssam_scsi_task->resp */ ++ ssam_scsi_task_copy_resp(task); ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.bdev_end_tsc); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.bdev_complete_count++; ++ ++ ssam_scsi_submit_completion(task); ++ /* Second part end of write */ ++ io_stat->stat.write_latency_ticks += ssam_get_diff_tsc(io_stat->submit_tsc); ++ io_stat->stat.bytes_written += payload_size; ++ io_stat->stat.num_write_ops++; ++ ++ return; ++} ++ ++static void ++ssam_scsi_ctl_task_cpl_cb(struct spdk_scsi_task *scsi_task) ++{ ++ struct spdk_ssam_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_ssam_scsi_task, ++ scsi_task); ++ ++ ssam_scsi_task_copy_resp(task); ++ ++ ssam_scsi_submit_completion(task); ++} ++ ++static void ++ssam_scsi_task_free_cb(struct spdk_scsi_task *scsi_task) ++{ ++ struct spdk_ssam_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_ssam_scsi_task, ++ scsi_task); ++ ++ ssam_scsi_task_finish(task); ++} ++ ++static int ++ssam_scsi_task_init_target(struct spdk_ssam_scsi_task *task, const __u8 *lun) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = task->ssmsession; ++ struct spdk_scsi_dev_ssam_state *state = NULL; ++ int32_t lun_id = (((uint16_t)lun[2] << 8) | lun[3]) & 0x3FFF; ++ int32_t tgt_id = lun[1]; ++ ++ if (lun[0] != 1 || tgt_id >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("First byte must be 1 and second is target\n"); ++ ssmsession->smsession.smdev->discard_io_num++; ++ return -1; ++ } ++ ++ state = &ssmsession->scsi_dev_state[tgt_id]; ++ task->scsi_dev = state->dev; ++ if (state->dev == NULL || state->status != SSAM_SCSI_DEV_PRESENT) { ++ return -1; ++ } ++ ++ task->tgt_id = tgt_id; ++ task->scsi_task.target_port = spdk_scsi_dev_find_port_by_id(task->scsi_dev, 0); ++ task->scsi_task.lun = spdk_scsi_dev_get_lun(state->dev, lun_id); ++ if (task->scsi_task.lun == NULL) { ++ SPDK_ERRLOG("Failed to init scsi task lun by lun_id(%d)\n", lun_id); ++ return -1; ++ } ++ return 0; ++} ++ ++static void ++ssam_scsi_submit_io_task(struct spdk_ssam_scsi_task *task) ++{ ++ task->resp.response = VIRTIO_SCSI_S_OK; ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.bdev_start_tsc); ++ spdk_scsi_dev_queue_task(task->scsi_dev, &task->scsi_task); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.bdev_count++; ++ ssam_scsi_task_stat_tick(&task->task_stat.bdev_func_tsc); ++ ++ SPDK_DEBUGLOG(ssam_blk_data, "====== Task: task_idx %u submitted ======\n", task->task_idx); ++} ++ ++static int ++ssam_scsi_task_iovs_memory_get(struct spdk_ssam_scsi_task *task, uint32_t payload_size) ++{ ++ struct ssam_mempool *mp = task->smsession->mp; ++ void *buffer = NULL; ++ uint64_t phys_addr = 0; ++ uint32_t alloc_size; ++ ++ if (payload_size == 0) { /* A little strange */ ++ alloc_size = 1; /* Alloc one iov at least */ ++ } else { ++ alloc_size = payload_size; ++ } ++ ++ buffer = ssam_mempool_alloc(mp, alloc_size, &phys_addr); ++ if (spdk_unlikely(buffer == NULL)) { ++ return -ENOMEM; ++ } ++ ++ /* ssam request max IO size is PAYLOAD_SIZE_MAX, only use one iov to save data */ ++ task->iovs.virt.sges[0].iov_base = buffer; ++ task->iovs.phys.sges[0].iov_base = (void *)phys_addr; ++ task->iovs.virt.sges[0].iov_len = payload_size; ++ task->iovs.phys.sges[0].iov_len = payload_size; ++ task->iovcnt = 1; ++ ++ return 0; ++} ++ ++static void ++scsi_mgmt_task_submit(struct spdk_ssam_scsi_task *task, enum spdk_scsi_task_func func) ++{ ++ task->tmf_resp.response = VIRTIO_SCSI_S_OK; ++ task->scsi_task.function = func; ++ spdk_scsi_dev_queue_mgmt_task(task->scsi_dev, &task->scsi_task); ++} ++ ++static void ++ssam_scsi_process_ctl_task(struct spdk_ssam_session *smsession, struct spdk_ssam_scsi_task *task) ++{ ++ struct virtio_scsi_ctrl_tmf_req *ctrl_req = (struct virtio_scsi_ctrl_tmf_req *) ++ task->io_req->req.cmd.header; ++ int32_t lun_id = spdk_scsi_lun_get_id(task->scsi_task.lun); ++ struct spdk_scsi_dev_io_state *io_stat = ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[lun_id]; ++ int ret = 0; ++ ++ spdk_scsi_task_construct(&task->scsi_task, ssam_scsi_ctl_task_cpl_cb, ssam_scsi_task_free_cb); ++ ret = ssam_scsi_task_init_target(task, ctrl_req->lun); ++ if (ret < 0) { ++ task->tmf_resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ ++ switch (ctrl_req->type) { ++ case VIRTIO_SCSI_T_TMF: ++ /* Check if we are processing a valid request */ ++ if (task->scsi_dev == NULL) { ++ task->tmf_resp.response = VIRTIO_SCSI_S_BAD_TARGET; ++ ssam_scsi_submit_completion(task); ++ break; ++ } ++ ++ switch (ctrl_req->subtype) { ++ case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: ++ /* Handle LUN reset */ ++ SPDK_DEBUGLOG(ssam_scsi, "%s: LUN reset\n", smsession->name); ++ ++ scsi_mgmt_task_submit(task, SPDK_SCSI_TASK_FUNC_LUN_RESET); ++ return; ++ default: ++ task->tmf_resp.response = VIRTIO_SCSI_S_ABORTED; ++ ssam_scsi_submit_completion(task); ++ /* Unsupported command */ ++ SPDK_DEBUGLOG(ssam_scsi, "%s: unsupported TMF command %x\n", ++ smsession->name, ctrl_req->subtype); ++ break; ++ } ++ break; ++ ++ case VIRTIO_SCSI_T_AN_QUERY: ++ case VIRTIO_SCSI_T_AN_SUBSCRIBE: ++ task->tmf_resp.response = VIRTIO_SCSI_S_ABORTED; ++ ssam_scsi_submit_completion(task); ++ break; ++ ++ default: ++ SPDK_DEBUGLOG(ssam_scsi, "%s: Unsupported control command %x\n", ++ smsession->name, ctrl_req->type); ++ io_stat->scsi_stat.fatal_ios++; ++ break; ++ } ++} ++ ++static void ++ssam_scsi_io_task_construct(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_scsi_task *scsi_task = &task->scsi_task; ++ struct ssam_io_message *io_cmd = &task->io_req->req.cmd; ++ ++ if (io_cmd->writable) { /* read io */ ++ spdk_scsi_task_construct(scsi_task, ssam_scsi_read_task_cpl_cb, ssam_scsi_task_free_cb); ++ } else { /* write io */ ++ spdk_scsi_task_construct(scsi_task, ssam_scsi_write_task_cpl_cb, ssam_scsi_task_free_cb); ++ } ++} ++ ++static int32_t ++ssam_scsi_io_task_setup(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_scsi_task *scsi_task = &task->scsi_task; ++ struct ssam_io_message *io_cmd = &task->io_req->req.cmd; ++ struct virtio_scsi_cmd_req *req = (struct virtio_scsi_cmd_req *)io_cmd->header; ++ uint32_t payload_size; ++ int ret; ++ ++ ssam_scsi_io_task_construct(task); ++ ++ ret = ssam_scsi_get_payload_size(task->io_req, &payload_size); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ ret = ssam_scsi_task_init_target(task, req->lun); ++ if (ret < 0) { ++ return ret; ++ } ++ ++ scsi_task->dxfer_dir = (io_cmd->writable ? SPDK_SCSI_DIR_FROM_DEV : SPDK_SCSI_DIR_TO_DEV); ++ scsi_task->iovs = task->iovs.virt.sges; ++ scsi_task->cdb = req->cdb; ++ scsi_task->transfer_len = payload_size; ++ scsi_task->length = payload_size; ++ ++ ret = ssam_scsi_task_iovs_memory_get(task, payload_size); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_scsi_process_io_task(struct spdk_ssam_session *smsession, struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev_io_state *io_stat; ++ uint64_t cur_tsc; ++ int32_t lun_id; ++ ++ ssmsession->scsi_dev_state[task->tgt_id].flight_io++; ++ ++ if (spdk_unlikely(task->scsi_task.lun == NULL)) { ++ spdk_scsi_task_process_null_lun(&task->scsi_task); ++ task->resp.response = VIRTIO_SCSI_S_OK; ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ ++ lun_id = spdk_scsi_lun_get_id(task->scsi_task.lun); ++ io_stat = ssmsession->scsi_dev_state[task->tgt_id].io_stat[lun_id]; ++ if (io_stat == NULL) { ++ SPDK_ERRLOG("No io_stat with tgt %d lun %d\n", task->tgt_id, lun_id); ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ /* First part start of read and write */ ++ cur_tsc = spdk_get_ticks(); ++ io_stat->submit_tsc = cur_tsc; ++ memset(&task->task_stat, 0, sizeof(task->task_stat)); ++ task->task_stat.start_tsc = cur_tsc; ++ io_stat->scsi_stat.start_count++; ++ ++ switch (task->scsi_task.dxfer_dir) { ++ case SPDK_SCSI_DIR_FROM_DEV: /* read: read data from backend to ipu, then dma to host */ ++ ssam_scsi_submit_io_task(task); ++ /* First part end of read */ ++ uint8_t rw_type = task->scsi_task.cdb[0]; ++ if (rw_type == SPDK_SBC_READ_6 || rw_type == SPDK_SBC_READ_10 || ++ rw_type == SPDK_SBC_READ_12 || rw_type == SPDK_SBC_READ_16) { ++ io_stat->stat.read_latency_ticks += ssam_get_diff_tsc(io_stat->submit_tsc); ++ io_stat->stat.bytes_read += task->scsi_task.transfer_len; ++ io_stat->stat.num_read_ops++; ++ } ++ break; ++ ++ case SPDK_SCSI_DIR_TO_DEV: /* write: dma data from host to ipu, then submit to backend */ ++ ssam_scsi_task_dma_request(task, SSAM_REQUEST_DATA_LOAD); ++ break; ++ ++ default: ++ SPDK_ERRLOG("scsi task dxfer dir error, dir is %u.\n", task->scsi_task.dxfer_dir); ++ io_stat->scsi_stat.fatal_ios++; ++ break; ++ } ++} ++ ++static void ++ssam_scsi_pre_process_io_task(struct spdk_ssam_session *smsession, struct spdk_ssam_scsi_task *task) ++{ ++ int ret; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ ++ ret = ssam_scsi_io_task_setup(task); ++ if (ret != 0) { ++ if (ret == -ENOMEM) { ++ ssam_session_insert_io_wait(smsession, &task->session_io_wait); ++ return; ++ } ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssmsession->scsi_dev_state[task->tgt_id].flight_io++; ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ ++ ssam_scsi_process_io_task(smsession, task); ++} ++ ++static void ++ssam_scsi_process_request(struct spdk_ssam_session *smsession, struct ssam_request *io_req, ++ uint16_t vq_idx) ++{ ++ struct spdk_ssam_scsi_task *task = NULL; ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[vq_idx]; ++ ++ if (spdk_unlikely(vq->use_num >= vq->num)) { ++ SPDK_ERRLOG("Session:%s vq(%hu) task_cnt(%u) limit(%u).\n", smsession->name, vq_idx, vq->use_num, ++ vq->num); ++ ssam_scsi_req_complete(smsession->smdev, io_req, VIRTIO_SCSI_S_FAILURE); ++ return; ++ } ++ ++ uint32_t index = vq->index[vq->index_r]; ++ task = &((struct spdk_ssam_scsi_task *)vq->tasks)[index]; ++ if (spdk_unlikely(task->used)) { ++ SPDK_ERRLOG("%s: vq(%hu) task_idx(%hu) is already pending.\n", smsession->name, vq_idx, ++ task->task_idx); ++ ssam_scsi_req_complete(smsession->smdev, io_req, VIRTIO_SCSI_S_FAILURE); ++ return; ++ } ++ ++ smsession->task_cnt++; ++ vq->index_r = (vq->index_r + 1) & 0xFF; ++ vq->use_num++; ++ ssam_scsi_task_init(task); ++ task->io_req = io_req; ++ ++ if (spdk_unlikely(io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ ssam_scsi_process_ctl_task(smsession, task); ++ } else { ++ ssam_scsi_pre_process_io_task(smsession, task); ++ } ++ ++ return; ++} ++ ++static void ++ssam_scsi_request_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct ssam_request *io_req = (struct ssam_request *)arg; ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ struct spdk_ssam_dev *smdev = smsession->smdev; ++ struct virtio_scsi_cmd_req *req = (struct virtio_scsi_cmd_req *)io_cmd->header; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ uint16_t vq_idx = io_cmd->virtio.vq_idx; ++ uint32_t tgt_id = req->lun[1]; ++ ++ smdev->io_num++; ++ ++ if (vq_idx >= smsession->max_queues) { ++ SPDK_ERRLOG("vq_idx out of range, need less than %u, actually %u\n", ++ smsession->max_queues, vq_idx); ++ goto err; ++ } ++ ++ if (io_req->status != SSAM_IO_STATUS_OK) { ++ SPDK_WARNLOG("%s: ssam request status invalid, but still process, status=%d\n", ++ smsession->name, io_req->status); ++ goto err; ++ } ++ ++ if (ssmsession->scsi_dev_state[tgt_id].status != SSAM_SCSI_DEV_PRESENT) { ++ /* If dev has been deleted, return io err */ ++ goto err; ++ } ++ ++ ssam_scsi_process_request(smsession, io_req, vq_idx); ++ ++ return; ++ ++err: ++ ssam_scsi_req_complete(smsession->smdev, io_req, VIRTIO_SCSI_S_FAILURE); ++ return; ++} ++ ++static void ++ssam_scsi_response_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct ssam_dma_rsp *dma_rsp = (struct ssam_dma_rsp *)arg; ++ const struct spdk_ssam_dma_cb *dma_cb = (const struct spdk_ssam_dma_cb *)&dma_rsp->cb; ++ struct spdk_ssam_scsi_task *task = NULL; ++ uint16_t vq_idx = dma_cb->vq_idx; ++ uint16_t task_idx = dma_cb->task_idx; ++ uint8_t req_dir = dma_cb->req_dir; ++ ++ if (spdk_unlikely(vq_idx >= smsession->max_queues)) { ++ smsession->smdev->discard_io_num++; ++ SPDK_ERRLOG("vq_idx out of range, need less than %u, actually %u\n", ++ smsession->max_queues, vq_idx); ++ return; ++ } ++ ++ task = &((struct spdk_ssam_scsi_task *)smsession->virtqueue[vq_idx].tasks)[task_idx]; ++ if (dma_rsp->status != 0) { ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ SPDK_ERRLOG("dma data process failed!\n"); ++ return; ++ } ++ if (dma_rsp->last_flag == 0) { ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ SPDK_ERRLOG("last_flag should not equal 0!\n"); ++ return; ++ } ++ int32_t tgt_id = task->tgt_id; ++ int32_t lun_id = spdk_scsi_lun_get_id(task->scsi_task.lun); ++ struct spdk_scsi_dev_io_state *io_stat = task->ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.dma_end_tsc); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.dma_complete_count++; ++ ++ if (req_dir == SSAM_REQUEST_DATA_LOAD) { ++ /* Write: write data ready, submit task to backend */ ++ ssam_scsi_submit_io_task(task); ++ /* First part end of write */ ++ io_stat->stat.write_latency_ticks += ssam_get_diff_tsc(io_stat->submit_tsc); ++ } else if (req_dir == SSAM_REQUEST_DATA_STORE) { ++ /* Read: data have been read by user, complete the task */ ++ task->resp.response = VIRTIO_SCSI_S_OK; ++ ssam_scsi_submit_completion(task); ++ /* Second part end of read */ ++ io_stat->stat.read_latency_ticks += ssam_get_diff_tsc(io_stat->submit_tsc); ++ } else { ++ io_stat->scsi_stat.fatal_ios++; ++ } ++} ++ ++static void ++ssam_scsi_destroy_bdev_device(struct spdk_ssam_session *smsession, void *args) ++{ ++ unsigned scsi_tgt_num = (unsigned)(uintptr_t)(args); ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ ++ ssam_remove_scsi_tgt(ssmsession, scsi_tgt_num); ++} ++ ++static void ++ssam_free_scsi_task_pool(struct spdk_ssam_scsi_session *ssmsession) ++{ ++ struct spdk_ssam_session *smsession = &ssmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = NULL; ++ uint16_t max_queues = smsession->max_queues; ++ uint16_t i; ++ ++ if (max_queues > SPDK_SSAM_MAX_VQUEUES) { ++ return; ++ } ++ ++ for (i = 0; i < max_queues; i++) { ++ vq = &smsession->virtqueue[i]; ++ if (vq->tasks != NULL) { ++ spdk_free(vq->tasks); ++ vq->tasks = NULL; ++ } ++ ++ if (vq->index != NULL) { ++ spdk_free(vq->index); ++ vq->index = NULL; ++ } ++ } ++} ++ ++static int ++ssam_alloc_scsi_task_pool(struct spdk_ssam_scsi_session *ssmsession) ++{ ++ struct spdk_ssam_session *smsession = &ssmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = NULL; ++ struct spdk_ssam_scsi_task *task = NULL; ++ uint16_t max_queues = smsession->max_queues; ++ uint32_t task_cnt = smsession->queue_size; ++ uint16_t i; ++ uint32_t j; ++ ++ if ((max_queues > SPDK_SSAM_MAX_VQUEUES) || (max_queues == 0)) { ++ SPDK_ERRLOG("%s: max_queues %u invalid\n", smsession->name, max_queues); ++ return -EINVAL; ++ } ++ ++ if ((task_cnt == 0) || (task_cnt > SPDK_SSAM_MAX_VQ_SIZE)) { ++ SPDK_ERRLOG("%s: virtuque size %u invalid\n", smsession->name, task_cnt); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < max_queues; i++) { ++ vq = &smsession->virtqueue[i]; ++ vq->smsession = smsession; ++ vq->num = task_cnt; ++ vq->use_num = 0; ++ vq->index_l = 0; ++ vq->index_r = 0; ++ vq->tasks = spdk_zmalloc(sizeof(struct spdk_ssam_scsi_task) * task_cnt, ++ SPDK_CACHE_LINE_SIZE, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ vq->index = spdk_zmalloc(sizeof(uint32_t) * task_cnt, ++ SPDK_CACHE_LINE_SIZE, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ if (vq->tasks == NULL || vq->index == NULL) { ++ SPDK_ERRLOG("%s: failed to allocate %"PRIu32" tasks for virtqueue %"PRIu16"\n", ++ smsession->name, task_cnt, i); ++ ssam_free_scsi_task_pool(ssmsession); ++ return -ENOMEM; ++ } ++ ++ for (j = 0; j < task_cnt; j++) { ++ task = &((struct spdk_ssam_scsi_task *)vq->tasks)[j]; ++ task->ssmsession = ssmsession; ++ task->smsession = &ssmsession->smsession; ++ task->vq_idx = i; ++ task->task_idx = j; ++ vq->index[j] = j; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_scsi_print_stuck_io_info(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_task *tasks; ++ struct spdk_ssam_scsi_task *task; ++ int i, j; ++ ++ for (i = 0; i < smsession->max_queues; i++) { ++ for (j = 0; j < smsession->queue_size; j++) { ++ tasks = (struct spdk_ssam_scsi_task *)smsession->virtqueue[i].tasks; ++ task = &tasks[j]; ++ if (task == NULL) { ++ continue; ++ } ++ if (task->used) { ++ SPDK_INFOLOG(ssam_scsi, "%s: stuck io payload_size %u, vq_idx %u, task_idx %u\n", ++ smsession->name, task->scsi_task.length, task->vq_idx, task->task_idx); ++ } ++ } ++ } ++} ++ ++static int ++ssam_scsi_start_cb(struct spdk_ssam_session *smsession, void **unused) ++{ ++ SPDK_NOTICELOG("SCSI controller %s created with queues %u\n", ++ smsession->name, smsession->max_queues); ++ ++ ssam_session_start_done(smsession, 0); ++ ++ return 0; ++} ++ ++static int ++ssam_scsi_start(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ int rc = ssam_alloc_scsi_task_pool(ssmsession); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: failed to alloc task pool.\n", smsession->name); ++ return rc; ++ } ++ return ssam_send_event_to_session(smsession, ssam_scsi_start_cb, NULL, send_event_flag, NULL); ++} ++ ++static int ++ssam_scsi_session_connect(struct spdk_ssam_session *smsession, uint16_t queues) ++{ ++ uint16_t queue_cnt = queues; ++ ++ if (queue_cnt == 0) { ++ queue_cnt = SPDK_SSAM_SCSI_DEFAULT_VQUEUES; ++ } ++ ++ smsession->max_queues = queue_cnt; ++ smsession->queue_size = SPDK_SSAM_DEFAULT_VQ_SIZE; ++ ++ return ssam_scsi_start(smsession); ++} ++ ++int ++ssam_scsi_construct(struct spdk_ssam_session_reg_info *info) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_ssam_scsi_session *ssmsession = NULL; ++ uint32_t session_ctx_size = sizeof(struct spdk_ssam_scsi_session) - sizeof( ++ struct spdk_ssam_session); ++ uint16_t tid; ++ int rc = 0; ++ ++ ssam_lock(); ++ ++ tid = ssam_get_tid(); ++ if (tid == SPDK_INVALID_TID) { ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ info->tid = tid; ++ info->backend = &g_ssam_scsi_session_backend; ++ info->session_ctx_size = session_ctx_size; ++ snprintf(info->type_name, SPDK_SESSION_TYPE_MAX_LEN, "%s", SPDK_SESSION_TYPE_SCSI); ++ rc = ssam_session_register(info, &smsession); ++ if (rc != 0) { ++ ssam_unlock(); ++ return rc; ++ } ++ smsession->started = true; ++ ++ ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ ssmsession->registered = true; ++ ssmsession->dbdf = strdup(info->dbdf); ++ if (ssmsession->dbdf == NULL) { ++ ssam_session_unregister(smsession); ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ rc = ssam_scsi_session_connect(smsession, info->queues); ++ if (rc != 0) { ++ ssam_session_unreg_response_cb(smsession); ++ ssam_session_unregister(smsession); ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ ssam_unlock(); ++ ++ return 0; ++} ++ ++static int ++ssam_get_scsi_tgt_num(struct spdk_ssam_scsi_session *ssmsession, int *scsi_tgt_num_out) ++{ ++ int scsi_tgt_num = *scsi_tgt_num_out; ++ if (scsi_tgt_num < 0) { ++ for (scsi_tgt_num = 0; scsi_tgt_num < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; scsi_tgt_num++) { ++ if (ssmsession->scsi_dev_state[scsi_tgt_num].dev == NULL) { ++ break; ++ } ++ } ++ ++ if (scsi_tgt_num == SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: all SCSI target slots are already in use.\n", ssmsession->smsession.name); ++ return -ENOSPC; ++ } ++ } else { ++ if (scsi_tgt_num >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: SCSI target number is too big (got %d, max %d)\n", ++ ssmsession->smsession.name, scsi_tgt_num, SPDK_SSAM_SCSI_CTRLR_MAX_DEVS - 1); ++ return -EINVAL; ++ } ++ } ++ *scsi_tgt_num_out = scsi_tgt_num; ++ return 0; ++} ++ ++static int ssam_scsi_dev_param_changed(struct spdk_ssam_scsi_session *ssmsession, ++ unsigned scsi_tgt_num) ++{ ++ struct spdk_scsi_dev_ssam_state *state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ ++ if (state->dev == NULL) { ++ return 0; ++ } ++ int rc = ssam_scsi_send_event(&ssmsession->smsession, scsi_tgt_num, VIRTIO_SCSI_T_PARAM_CHANGE, ++ 0x2a | (0x09 << 0x8)); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: tgt %d resize send action failed\n", ssmsession->smsession.name, scsi_tgt_num); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static unsigned ++ssam_get_scsi_dev_num(const struct spdk_ssam_scsi_session *ssmsession, ++ const struct spdk_scsi_lun *lun) ++{ ++ const struct spdk_scsi_dev *scsi_dev; ++ unsigned scsi_dev_num; ++ ++ scsi_dev = spdk_scsi_lun_get_dev(lun); ++ for (scsi_dev_num = 0; scsi_dev_num < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; scsi_dev_num++) { ++ if (ssmsession->scsi_dev_state[scsi_dev_num].dev == scsi_dev) { ++ break; ++ } ++ } ++ return scsi_dev_num; ++} ++ ++static void ++ssam_scsi_lun_resize(const struct spdk_scsi_lun *lun, void *arg) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = arg; ++ unsigned scsi_dev_num; ++ ++ scsi_dev_num = ssam_get_scsi_dev_num(ssmsession, lun); ++ if (scsi_dev_num == SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ /* The entire device has been already removed. */ ++ return; ++ } ++ ++ (void)ssam_scsi_dev_param_changed(ssmsession, scsi_dev_num); ++} ++ ++static void ++ssam_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg) ++{ ++ struct ssam_scsi_tgt_hotplug_ctx *ctx; ++ struct spdk_ssam_scsi_session *ssmsession = arg; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = false, ++ }; ++ unsigned scsi_dev_num; ++ ++ scsi_dev_num = ssam_get_scsi_dev_num(ssmsession, lun); ++ if (scsi_dev_num == SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ /* The entire device has been already removed. */ ++ return; ++ } ++ ++ ctx = calloc(1, sizeof(*ctx)); ++ if (ctx == NULL) { ++ SPDK_ERRLOG("calloc failed\n"); ++ return; ++ } ++ ++ ctx->scsi_tgt_num = scsi_dev_num; ++ ssam_send_event_to_session(&ssmsession->smsession, ssam_scsi_dev_hot_remove_tgt, ++ NULL, send_event_flag, ctx); ++} ++ ++static int ++ssam_scsi_session_add_tgt(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct ssam_add_tgt_ev_ctx *args = (struct ssam_add_tgt_ev_ctx *)(*ctx); ++ unsigned scsi_tgt_num = args->tgt_num; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ int rc; ++ ++ rc = spdk_scsi_dev_allocate_io_channels(ssmsession->scsi_dev_state[scsi_tgt_num].dev); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: Couldn't allocate io channnel for SCSI target %u.\n", ++ smsession->name, scsi_tgt_num); ++ } ++ ++ rc = ssam_scsi_send_event(smsession, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, ++ VIRTIO_SCSI_EVT_RESET_RESCAN); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: send event %d(reason %d) to target %hu failed, ret: %d, host maynot boot.\n", ++ smsession->name, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_RESCAN, scsi_tgt_num, rc); ++ if (rc == -ENOSPC) { ++ spdk_scsi_dev_free_io_channels(ssmsession->scsi_dev_state[scsi_tgt_num].dev); ++ ssam_scsi_destruct_tgt(ssmsession, scsi_tgt_num); ++ return rc; ++ } ++ } ++ ++ ssmsession->scsi_dev_state[scsi_tgt_num].status = SSAM_SCSI_DEV_PRESENT; ++ ssmsession->scsi_dev_state[scsi_tgt_num].flight_io = 0; ++ ++ return 0; ++} ++ ++static void ++ssam_scsi_dev_add_tgt_cpl_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct ssam_add_tgt_ev_ctx *args = (struct ssam_add_tgt_ev_ctx *)(*ctx); ++ unsigned scsi_tgt_num = args->tgt_num; ++ ssmsession->ref++; ++ ++ SPDK_NOTICELOG("SCSI controller %s target %u added with bdev %s\n", ++ smsession->name, scsi_tgt_num, args->bdev_name); ++ ++ free(args->bdev_name); ++ args->bdev_name = NULL; ++ free(args); ++} ++ ++struct ssam_scsi_session_remove_tgt_arg { ++ struct spdk_ssam_session *smsession; ++ unsigned scsi_tgt_num; ++}; ++ ++static void ++ssam_scsi_session_remove_tgt_cpl(struct spdk_ssam_session *smsession, void **_ctx) ++{ ++ struct ssam_scsi_tgt_hotplug_ctx *ctx = *_ctx; ++ unsigned scsi_tgt_num = ctx->scsi_tgt_num; ++ int rc; ++ rc = ssam_umount_normal(smsession, ssam_scsi_tgtid_to_lunid(scsi_tgt_num)); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: function umount failed when remove scsi tgt:%s.\n", ++ smsession->name, strerror(-rc)); ++ } ++ free(ctx); ++} ++ ++static int ++ssam_scsi_session_remove_tgt(struct spdk_ssam_session *smsession, void **_ctx) ++{ ++ struct ssam_scsi_tgt_hotplug_ctx *ctx = *_ctx; ++ unsigned scsi_tgt_num = ctx->scsi_tgt_num; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev_ssam_state *state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ int rc = 0; ++ ++ if (state->status != SSAM_SCSI_DEV_PRESENT) { ++ SPDK_WARNLOG("%s: SCSI target %u is not present, skip.\n", smsession->name, scsi_tgt_num); ++ rc = -ENODEV; ++ goto out; ++ } ++ ++ if (ssmsession->scsi_dev_state[scsi_tgt_num].flight_io != 0) { ++ SPDK_ERRLOG("%s: SCSI target %u is busy.\n", smsession->name, scsi_tgt_num); ++ rc = -EBUSY; ++ goto out; ++ } ++ ++ state->status = SSAM_SCSI_DEV_REMOVING; ++ ++ SPDK_NOTICELOG("%s: target %d is removing\n", smsession->name, scsi_tgt_num); ++ ++ rc = ssam_scsi_send_event(smsession, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, ++ VIRTIO_SCSI_EVT_RESET_REMOVED); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: scsi send remove event failed\n", smsession->name); ++ if (rc == -ENOSPC) { ++ state->status = SSAM_SCSI_DEV_PRESENT; ++ goto out; ++ } ++ } ++ ++ spdk_scsi_dev_free_io_channels(state->dev); ++ ++ ssam_send_dev_destroy_msg(smsession, (void *)(uintptr_t)scsi_tgt_num); ++ ++ /* free ctx see ssam_scsi_session_remove_tgt_cpl() */ ++ return rc; ++ ++out: ++ free(ctx); ++ ++ return rc; ++} ++ ++static int ++ssam_scsi_construct_tgt(struct spdk_ssam_scsi_session *ssmsession, int scsi_tgt_num, ++ const char *bdev_name) ++{ ++ struct spdk_scsi_dev_ssam_state *state = NULL; ++ char target_name[SPDK_SCSI_DEV_MAX_NAME] = {0}; ++ int lun_id_list[SSAM_SPDK_SCSI_DEV_MAX_LUN]; ++ const char *bdev_names_list[SSAM_SPDK_SCSI_DEV_MAX_LUN]; ++ int rc; ++ ++ state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ if (state->dev != NULL) { ++ SPDK_ERRLOG("%s: SCSI target %u already occupied\n", ssmsession->smsession.name, scsi_tgt_num); ++ return -EEXIST; ++ } ++ ++ (void)snprintf(target_name, sizeof(target_name), "Target %u", scsi_tgt_num); ++ lun_id_list[0] = 0; ++ bdev_names_list[0] = (char *)bdev_name; ++ ++ state->status = SSAM_SCSI_DEV_ADDING; ++ rc = ssam_scsi_iostat_construct(ssmsession, scsi_tgt_num, lun_id_list, SSAM_SPDK_SCSI_DEV_MAX_LUN); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ state->dev = spdk_scsi_dev_construct_ext(target_name, bdev_names_list, lun_id_list, ++ SSAM_SPDK_SCSI_DEV_MAX_LUN, ++ SPDK_SPC_PROTOCOL_IDENTIFIER_SAS, ++ ssam_scsi_lun_resize, ssmsession, ++ ssam_scsi_lun_hotremove, ssmsession); ++ if (state->dev == NULL) { ++ SPDK_ERRLOG("%s: couldn't create SCSI target %u using bdev '%s'\n", ++ ssmsession->smsession.name, scsi_tgt_num, bdev_name); ++ rc = -EINVAL; ++ goto dev_fail; ++ } ++ ++ rc = spdk_scsi_dev_add_port(state->dev, 0, "ssam"); ++ if (rc != 0) { ++ goto port_fail; ++ } ++ ++ return rc; ++ ++port_fail: ++ spdk_scsi_dev_destruct(state->dev, NULL, NULL); ++ ++dev_fail: ++ ssam_scsi_iostat_destruct(state); ++ ++ return rc; ++} ++ ++static void ++ssam_scsi_destruct_tgt(struct spdk_ssam_scsi_session *ssmsession, int scsi_tgt_num) ++{ ++ struct spdk_scsi_dev_ssam_state *state = NULL; ++ state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ ++ if (state->dev) { ++ spdk_scsi_dev_delete_port(state->dev, 0); ++ spdk_scsi_dev_destruct(state->dev, NULL, NULL); ++ state->dev = NULL; ++ } ++ ssam_scsi_iostat_destruct(state); ++ ++ state->status = SSAM_SCSI_DEV_EMPTY; ++} ++ ++int ++ssam_scsi_dev_add_tgt(struct spdk_ssam_session *smsession, int scsi_tgt_num, ++ const char *bdev_name) ++{ ++ int rc; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct ssam_add_tgt_ev_ctx *ctx = calloc(1, sizeof(struct ssam_add_tgt_ev_ctx)); ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ ++ if (ctx == NULL) { ++ SPDK_ERRLOG("calloc ssam_add_tgt_ev_ctx failed\n"); ++ return -ENOMEM; ++ } ++ ++ if (bdev_name == NULL) { ++ SPDK_ERRLOG("No lun name specified\n"); ++ free(ctx); ++ return -EINVAL; ++ } ++ ++ ctx->bdev_name = spdk_sprintf_alloc("%s", bdev_name); ++ if (ctx->bdev_name == NULL) { ++ SPDK_ERRLOG("calloc ssam_add_tgt_ev_ctx bdev_name failed\n"); ++ free(ctx); ++ return -ENOMEM; ++ } ++ ++ rc = ssam_get_scsi_tgt_num(ssmsession, &scsi_tgt_num); ++ if (rc < 0) { ++ free(ctx->bdev_name); ++ free(ctx); ++ return rc; ++ } ++ ++ rc = ssam_mount_normal(smsession, ssam_scsi_tgtid_to_lunid(scsi_tgt_num)); ++ if (rc != SSAM_MOUNT_OK) { ++ SPDK_ERRLOG("%s: mount ssam volume failed, tgt id %d\n", smsession->name, scsi_tgt_num); ++ free(ctx->bdev_name); ++ free(ctx); ++ return rc; ++ } ++ ++ rc = ssam_scsi_construct_tgt(ssmsession, scsi_tgt_num, bdev_name); ++ if (rc != 0) { ++ free(ctx->bdev_name); ++ free(ctx); ++ return rc; ++ } ++ ++ ctx->tgt_num = scsi_tgt_num; ++ rc = ssam_send_event_to_session(&ssmsession->smsession, ssam_scsi_session_add_tgt, ++ ssam_scsi_dev_add_tgt_cpl_cb, send_event_flag, (void *)ctx); ++ if (rc != 0) { ++ ssam_scsi_destruct_tgt(ssmsession, scsi_tgt_num); ++ free(ctx->bdev_name); ++ free(ctx); ++ return rc; ++ } ++ ++ SPDK_INFOLOG(ssam_scsi, "%s: added SCSI target %u using bdev '%s'\n", ++ ssmsession->smsession.name, scsi_tgt_num, bdev_name); ++ ++ return 0; ++} ++ ++static int ++ssam_scsi_dev_hot_remove_tgt(struct spdk_ssam_session *smsession, void **_ctx) ++{ ++ int rc = 0; ++ struct ssam_scsi_tgt_hotplug_ctx *ctx = *_ctx; ++ struct spdk_ssam_scsi_session *ssmsession; ++ ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ unsigned scsi_tgt_num = ctx->scsi_tgt_num; ++ if (!ssmsession) { ++ SPDK_ERRLOG("invalid SCSI device"); ++ rc = -EINVAL; ++ goto out; ++ } ++ ++ struct spdk_scsi_dev_ssam_state *scsi_dev_state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ if (scsi_dev_state->dev == NULL) { ++ /* Nothing to do */ ++ SPDK_WARNLOG("%s: There is no need to remove scsi target\n", smsession->name); ++ rc = -ENODEV; ++ goto out; ++ } ++ ++ if (scsi_dev_state->status != SSAM_SCSI_DEV_PRESENT) { ++ SPDK_INFOLOG(ssam_scsi, "%s: SCSI target %u is being removed\n", smsession->name, scsi_tgt_num); ++ rc = 0; ++ goto out; ++ } ++ ++ scsi_dev_state->status = SSAM_SCSI_DEV_REMOVING; ++ ++ SPDK_NOTICELOG("%s: target %d is hot removing\n", smsession->name, scsi_tgt_num); ++ ++ rc = ssam_scsi_send_event(smsession, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, ++ VIRTIO_SCSI_EVT_RESET_REMOVED); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: scsi send remove event failed\n", smsession->name); ++ if (rc == -ENOSPC) { ++ scsi_dev_state->status = SSAM_SCSI_DEV_PRESENT; ++ goto out; ++ } ++ } ++ ++ spdk_scsi_dev_free_io_channels(scsi_dev_state->dev); ++ ++ ssam_send_dev_destroy_msg(smsession, (void *)(uintptr_t)scsi_tgt_num); ++ ++out: ++ free(ctx); ++ return rc; ++} ++ ++int ++ssam_scsi_dev_remove_tgt(struct spdk_ssam_session *smsession, unsigned scsi_tgt_num, ++ spdk_ssam_session_rsp_fn cb_fn, void *cb_arg) ++{ ++ struct spdk_ssam_scsi_session *ssmsession; ++ struct ssam_scsi_tgt_hotplug_ctx *ctx; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ ++ if (scsi_tgt_num >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: invalid SCSI target number %d\n", smsession->name, scsi_tgt_num); ++ return -EINVAL; ++ } ++ ++ ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ if (!ssmsession) { ++ SPDK_ERRLOG("An invalid SCSI device that removing from a SCSI target."); ++ return -EINVAL; ++ } ++ ++ ctx = calloc(1, sizeof(*ctx)); ++ if (ctx == NULL) { ++ SPDK_ERRLOG("calloc failed\n"); ++ return -ENOMEM; ++ } ++ ++ ctx->scsi_tgt_num = scsi_tgt_num; ++ ++ ssam_send_event_to_session(smsession, ssam_scsi_session_remove_tgt, ++ ssam_scsi_session_remove_tgt_cpl, send_event_flag, ctx); ++ ++ return 0; ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_scsi) +diff --git a/mk/spdk.lib_deps.mk b/mk/spdk.lib_deps.mk +index a6c7825..8328eec 100644 +--- a/mk/spdk.lib_deps.mk ++++ b/mk/spdk.lib_deps.mk +@@ -169,4 +169,5 @@ DEPDIRS-event_scsi := event scsi event_bdev + + DEPDIRS-event_iscsi := event iscsi event_scsi event_sock + DEPDIRS-event_vhost := event vhost event_scsi ++DEPDIRS-event_ssam := event ssam event_scsi + DEPDIRS-event_sock := event sock +diff --git a/module/bdev/iscsi/bdev_iscsi.c b/module/bdev/iscsi/bdev_iscsi.c +index 7516ea9..579fb9a 100644 +--- a/module/bdev/iscsi/bdev_iscsi.c ++++ b/module/bdev/iscsi/bdev_iscsi.c +@@ -42,9 +42,11 @@ + #include "spdk/rpc.h" + #include "spdk/string.h" + #include "spdk/iscsi_spec.h" ++#include "spdk/likely.h" + + #include "spdk/log.h" + #include "spdk/bdev_module.h" ++#include "spdk/event.h" + + #include "iscsi/iscsi.h" + #include "iscsi/scsi-lowlevel.h" +@@ -55,6 +57,7 @@ struct bdev_iscsi_lun; + + #define BDEV_ISCSI_CONNECTION_POLL_US 500 /* 0.5 ms */ + #define BDEV_ISCSI_NO_MAIN_CH_POLL_US 10000 /* 10ms */ ++#define BDEV_ISCSI_TIMEOUT 10 /* 10s */ + + #define DEFAULT_INITIATOR_NAME "iqn.2016-06.io.spdk:init" + +@@ -85,6 +88,8 @@ struct bdev_iscsi_lun { + struct spdk_thread *no_main_ch_poller_td; + bool unmap_supported; + struct spdk_poller *poller; ++ uint32_t unfinished_io_num; ++ uint64_t event_tsc; + }; + + struct bdev_iscsi_io_channel { +@@ -101,6 +106,7 @@ struct bdev_iscsi_conn_req { + bool unmap_supported; + int lun; + int status; ++ int attempts; + TAILQ_ENTRY(bdev_iscsi_conn_req) link; + }; + +@@ -130,6 +136,7 @@ _iscsi_free_lun(void *arg) + struct bdev_iscsi_lun *lun = arg; + + assert(lun != NULL); ++ SPDK_NOTICELOG("iscsi bdev %s removed.\n", lun->bdev.name); + iscsi_destroy_context(lun->context); + pthread_mutex_destroy(&lun->mutex); + free(lun->bdev.name); +@@ -195,6 +202,9 @@ static void + bdev_iscsi_io_complete(struct bdev_iscsi_io *iscsi_io, enum spdk_bdev_io_status status) + { + iscsi_io->status = status; ++ struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(iscsi_io); ++ struct bdev_iscsi_lun *lun = (struct bdev_iscsi_lun *)bdev_io->bdev->ctxt; ++ lun->unfinished_io_num--; + if (iscsi_io->submit_td != NULL) { + spdk_thread_send_msg(iscsi_io->submit_td, _bdev_iscsi_io_complete, iscsi_io); + } else { +@@ -206,6 +216,16 @@ bdev_iscsi_io_complete(struct bdev_iscsi_io *iscsi_io, enum spdk_bdev_io_status + static void + bdev_iscsi_command_cb(struct iscsi_context *context, int status, void *_task, void *_iscsi_io) + { ++ ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the _task memory may have been released. ++ * Therefore, _task are not released in this scenario. ++ */ ++ return; ++ } ++ + struct scsi_task *task = _task; + struct bdev_iscsi_io *iscsi_io = _iscsi_io; + +@@ -278,6 +298,17 @@ bdev_iscsi_destruct_cb(void *ctx) + { + struct bdev_iscsi_lun *lun = ctx; + ++ /* when main_td and no_main_ch_poller_td have different cores, ++ * ensure that the poller is deregistered before free lun. ++ */ ++ pthread_mutex_lock(&lun->mutex); ++ if (lun->ch_count > 0) { ++ pthread_mutex_unlock(&lun->mutex); ++ spdk_thread_send_msg(lun->no_main_ch_poller_td, bdev_iscsi_destruct_cb, lun); ++ return; ++ } ++ pthread_mutex_unlock(&lun->mutex); ++ + spdk_poller_unregister(&lun->no_main_ch_poller); + spdk_io_device_unregister(lun, _iscsi_free_lun); + } +@@ -379,7 +410,17 @@ bdev_iscsi_poll_lun(void *_lun) + return SPDK_POLLER_IDLE; + } + ++ /* When the default route is deleted, the TCP connection cannot be reconnected. ++ * Therefore, if fd not open, hot restart SSAM. ++ */ ++ if (pfd.revents == POLLNVAL) { ++ SPDK_WARNLOG("fd not open, hot restart SSAM.\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ + if (pfd.revents != 0) { ++ lun->event_tsc = spdk_get_ticks(); + if (iscsi_service(lun->context, pfd.revents) < 0) { + SPDK_ERRLOG("iscsi_service failed: %s\n", iscsi_get_error(lun->context)); + } +@@ -387,6 +428,22 @@ bdev_iscsi_poll_lun(void *_lun) + return SPDK_POLLER_BUSY; + } + ++ /* When the network is disconnected, the revent obtained by the poll() is always 0. ++ * As a result, the I/O cannot be responded, causing IO hang. ++ * Therefore, if no event is obtained within a period of time and there are unfinished I/Os, ++ * iscsi_service() will be called to obtain the disconnection event. ++ */ ++ uint64_t diff_tsc = spdk_get_ticks() - lun->event_tsc; ++ if (spdk_unlikely((diff_tsc / BDEV_ISCSI_TIMEOUT) >= spdk_get_ticks_hz() && ++ lun->unfinished_io_num != 0)) { ++ SPDK_ERRLOG("There is no event while the unfinished io num is not zero(%d)\n", ++ lun->unfinished_io_num); ++ if (iscsi_service(lun->context, pfd.revents) < 0) { ++ SPDK_ERRLOG("iscsi_service failed: %s\n", iscsi_get_error(lun->context)); ++ } ++ lun->event_tsc = spdk_get_ticks(); ++ } ++ + return SPDK_POLLER_IDLE; + } + +@@ -431,7 +488,12 @@ static void _bdev_iscsi_submit_request(void *_bdev_io) + struct spdk_bdev_io *bdev_io = _bdev_io; + struct bdev_iscsi_io *iscsi_io = (struct bdev_iscsi_io *)bdev_io->driver_ctx; + struct bdev_iscsi_lun *lun = (struct bdev_iscsi_lun *)bdev_io->bdev->ctxt; +- ++ if (lun->unfinished_io_num == UINT32_MAX) { ++ bdev_iscsi_io_complete(iscsi_io, SPDK_BDEV_IO_STATUS_FAILED); ++ SPDK_ERRLOG("Too many unfinished io jobs\n"); ++ return; ++ } ++ lun->unfinished_io_num++; + switch (bdev_io->type) { + case SPDK_BDEV_IO_TYPE_READ: + spdk_bdev_io_get_buf(bdev_io, bdev_iscsi_get_buf_cb, +@@ -538,6 +600,7 @@ _iscsi_destroy_cb(void *ctx) + + lun->main_td = NULL; + spdk_poller_unregister(&lun->poller); ++ SPDK_NOTICELOG("iscsi bdev %s unregister main poller\n", lun->bdev.name); + + pthread_mutex_unlock(&lun->mutex); + } +@@ -638,6 +701,7 @@ create_iscsi_lun(struct iscsi_context *context, int lun_id, char *url, char *ini + lun->lun_id = lun_id; + lun->url = url; + lun->initiator_iqn = initiator_iqn; ++ lun->event_tsc = spdk_get_ticks(); + + pthread_mutex_init(&lun->mutex, NULL); + +@@ -770,10 +834,34 @@ iscsi_bdev_conn_poll(void *arg) + + if (pfd.revents != 0) { + if (iscsi_service(context, pfd.revents) < 0) { ++ req->attempts += 1; ++ if (req->attempts >= 10) { ++ SPDK_ERRLOG("iscsi_service failed times: %d\n", req->attempts); ++ iscsi_connect_cb(context, -1, NULL, req); ++ _bdev_iscsi_conn_req_free(req); ++ continue; ++ } + SPDK_ERRLOG("iscsi_service failed: %s\n", iscsi_get_error(context)); + } + } + ++ if ((pfd.revents & POLLHUP) && (req->status == -1)) { ++ SPDK_NOTICELOG("req->bdev_name(%s) reconnect\n", req->bdev_name); ++ iscsi_destroy_context(req->context); ++ req->context = iscsi_create_context(req->initiator_iqn); ++ struct iscsi_url *iscsi_url = iscsi_parse_full_url(req->context, req->url); ++ int rc = iscsi_set_session_type(req->context, ISCSI_SESSION_NORMAL); ++ rc = rc ? rc : iscsi_set_header_digest(req->context, ISCSI_HEADER_DIGEST_NONE); ++ rc = rc ? rc : iscsi_set_targetname(req->context, iscsi_url->target); ++ rc = iscsi_full_connect_async(req->context, iscsi_url->portal, iscsi_url->lun, iscsi_connect_cb, ++ req); ++ if (rc < 0) { ++ SPDK_ERRLOG("Failed to connect provided URL=%s: %s\n", req->url, iscsi_get_error(req->context)); ++ } ++ iscsi_destroy_url(iscsi_url); ++ sleep(3); ++ } ++ + if (req->status == 0) { + /* + * The request completed successfully. +@@ -847,10 +935,12 @@ create_iscsi_disk(const char *bdev_name, const char *url, const char *initiator_ + + iscsi_destroy_url(iscsi_url); + req->status = -1; ++ req->attempts = 0; + TAILQ_INSERT_TAIL(&g_iscsi_conn_req, req, link); + if (!g_conn_poller) { + g_conn_poller = SPDK_POLLER_REGISTER(iscsi_bdev_conn_poll, NULL, BDEV_ISCSI_CONNECTION_POLL_US); + } ++ SPDK_NOTICELOG("iscsi bdev %s created by %s.\n", req->bdev_name, req->initiator_iqn); + + return 0; + +diff --git a/module/event/subsystems/Makefile b/module/event/subsystems/Makefile +index a78985e..0c1822d 100644 +--- a/module/event/subsystems/Makefile ++++ b/module/event/subsystems/Makefile +@@ -41,6 +41,7 @@ DIRS-y += nbd + endif + + DIRS-$(CONFIG_VHOST) += vhost ++DIRS-$(CONFIG_SSAM) += ssam + + # These dependencies are not based specifically on symbols, but rather + # the subsystem dependency tree defined within the event subsystem C files +@@ -52,6 +53,7 @@ DEPDIRS-nbd := bdev + DEPDIRS-nvmf := bdev + DEPDIRS-scsi := bdev + DEPDIRS-vhost := scsi ++DEPDIRS-ssam := scsi + + .PHONY: all clean $(DIRS-y) + +diff --git a/module/event/subsystems/ssam/Makefile b/module/event/subsystems/ssam/Makefile +new file mode 100644 +index 0000000..c71f568 +--- /dev/null ++++ b/module/event/subsystems/ssam/Makefile +@@ -0,0 +1,44 @@ ++# ++# BSD LICENSE ++# ++# Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++# ++# Redistribution and use in source and binary forms, with or without ++# modification, are permitted provided that the following conditions ++# are met: ++# ++# * Redistributions of source code must retain the above copyright ++# notice, this list of conditions and the following disclaimer. ++# * Redistributions in binary form must reproduce the above copyright ++# notice, this list of conditions and the following disclaimer in ++# the documentation and/or other materials provided with the ++# distribution. ++# * Neither the name of Intel Corporation nor the names of its ++# contributors may be used to endorse or promote products derived ++# from this software without specific prior written permission. ++# ++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++# ++ ++SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..) ++include $(SPDK_ROOT_DIR)/mk/spdk.common.mk ++ ++SO_VER := 3 ++SO_MINOR := 0 ++ ++C_SRCS = ssam.c ++LIBNAME = event_ssam ++ ++SPDK_MAP_FILE = $(SPDK_ROOT_DIR)/mk/spdk_blank.map ++ ++include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk +diff --git a/module/event/subsystems/ssam/ssam.c b/module/event/subsystems/ssam/ssam.c +new file mode 100644 +index 0000000..0b70fc3 +--- /dev/null ++++ b/module/event/subsystems/ssam/ssam.c +@@ -0,0 +1,71 @@ ++/*- ++ * BSD LICENSE ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Intel Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "spdk/stdinc.h" ++ ++#include "spdk/ssam.h" ++ ++#include "spdk_internal/event.h" ++ ++static void ++ssam_subsystem_init_done(int rc) ++{ ++ spdk_subsystem_init_next(rc); ++} ++ ++static void ++ssam_subsystem_init(void) ++{ ++ spdk_ssam_subsystem_init(ssam_subsystem_init_done); ++} ++ ++static void ++ssam_subsystem_fini_done(void) ++{ ++ spdk_subsystem_fini_next(); ++} ++ ++static void ++ssam_subsystem_fini(void) ++{ ++ spdk_ssam_subsystem_fini(ssam_subsystem_fini_done); ++} ++ ++static struct spdk_subsystem g_spdk_subsystem_ssam = { ++ .name = SSAM_SERVER_NAME, ++ .init = ssam_subsystem_init, ++ .fini = ssam_subsystem_fini, ++ .write_config_json = spdk_ssam_config_json, ++}; ++ ++SPDK_SUBSYSTEM_REGISTER(g_spdk_subsystem_ssam); ++SPDK_SUBSYSTEM_DEPEND(ssam, scsi) +diff --git a/scripts/hw_dpu_rpc.py b/scripts/hw_dpu_rpc.py +new file mode 100644 +index 0000000..b629a5c +--- /dev/null ++++ b/scripts/hw_dpu_rpc.py +@@ -0,0 +1,312 @@ ++#!/usr/bin/env python3 ++ ++import argparse ++import logging ++import rpc ++import sys ++from rpc.client import print_dict, JSONRPCException ++from rpc.helpers import deprecated_aliases ++import os ++import stat ++import pwd ++import grp ++ ++ ++def get_parser(): ++ parser = argparse.ArgumentParser( ++ description='SPDK RPC command line interface', usage='%(prog)s [options]', add_help=False) ++ ++ parser.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ parser.add_argument('-r', dest='conn_retries', ++ help='Retry connecting to the RPC server N times with 0.2s interval. Default: 0', ++ default=0, type=int) ++ parser.add_argument('-t', dest='timeout', ++ help='Timeout as a floating point number expressed in seconds, waiting for response. Default: 60.0', ++ default=60.0, type=float) ++ ++ parser.set_defaults(is_server=False) ++ parser.set_defaults(dry_run=False) ++ parser.set_defaults(port=5260) ++ parser.set_defaults(verbose="ERROR") ++ parser.set_defaults(server_addr='/var/tmp/spdk.sock') ++ return parser ++ ++ ++def init_rpc_func(): ++ parser = get_parser() ++ subparsers = parser.add_subparsers(help='RPC methods', dest='called_rpc_name', metavar='') ++ ++ @rpc.ssam.log_info ++ def create_blk_controller(args): ++ rpc.ssam.create_blk_controller(args.client, ++ dev_name=args.dev_name, ++ index=args.index, ++ readonly=args.readonly, ++ serial=args.serial) ++ ++ p = subparsers.add_parser('create_blk_controller', ++ help='Add a new block controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('dev_name', help='Name of block device') ++ p.add_argument('index', help='Function ID or dbdf') ++ p.add_argument("-r", "--readonly", action='store_true', help='Set controller as read-only') ++ p.add_argument("-s", "--serial", help='Set volume ID') ++ p.set_defaults(func=create_blk_controller) ++ ++ @rpc.ssam.log_info ++ def get_controllers(args): ++ print_dict(rpc.ssam.get_controllers(args.client, args.function_id, args.dbdf)) ++ ++ p = subparsers.add_parser('get_controllers', ++ help='List all or specific controller(s)', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('-f', '--function_id', help="Function ID of PCI device", type=int, required=False) ++ p.add_argument('-d', '--dbdf', help="Dbdf of PCI device", required=False) ++ p.set_defaults(func=get_controllers) ++ ++ @rpc.ssam.log_info ++ def get_scsi_controllers(args): ++ print_dict(rpc.ssam.get_scsi_controllers(args.client, args.name)) ++ ++ p = subparsers.add_parser('get_scsi_controllers', aliases=['scsi_controller_list'], ++ help='List all or specific scsi controller(s)', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('-n', '--name', help="Name of controller", required=False) ++ p.set_defaults(func=get_scsi_controllers) ++ ++ @rpc.ssam.log_info ++ def delete_controller(args): ++ rpc.ssam.delete_controller(args.client, index=args.index) ++ ++ p = subparsers.add_parser('delete_controller', ++ help='Delete a controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('index', help='Function ID or dbdf of PCI device') ++ p.set_defaults(func=delete_controller) ++ ++ @rpc.ssam.log_info ++ def delete_scsi_controller(args): ++ rpc.ssam.delete_scsi_controller(args.client, name=args.name) ++ ++ p = subparsers.add_parser('delete_scsi_controller', aliases=['scsi_controller_delete'], ++ help='Delete a scsi controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller to be deleted', type=str) ++ p.set_defaults(func=delete_scsi_controller) ++ ++ @rpc.ssam.log_info ++ def bdev_resize(args): ++ rpc.ssam.bdev_resize(args.client, ++ function_id=args.function_id, ++ new_size_in_mb=args.new_size_in_mb) ++ ++ p = subparsers.add_parser('bdev_resize', ++ help='Resize a blk bdev by blk controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('function_id', help='Function ID of PCI device', type=int) ++ p.add_argument('new_size_in_mb', help='New size of bdev for resize operation. The unit is MiB', type=int) ++ p.set_defaults(func=bdev_resize) ++ ++ @rpc.ssam.log_info ++ def scsi_bdev_resize(args): ++ rpc.ssam.scsi_bdev_resize(args.client, ++ name=args.name, ++ tgt_id=args.tgt_id, ++ new_size_in_mb=args.new_size_in_mb) ++ ++ p = subparsers.add_parser('scsi_bdev_resize', ++ help='Resize a scsi bdev by scsi controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of Controller for the PCI device', type=str) ++ p.add_argument('tgt_id', help='Tgt ID of bdev', type=int) ++ p.add_argument('new_size_in_mb', help='New size of bdev for resize operation. The unit is MiB', type=int) ++ p.set_defaults(func=scsi_bdev_resize) ++ ++ @rpc.ssam.log_info ++ def bdev_aio_resize(args): ++ rpc.ssam.bdev_aio_resize(args.client, ++ name=args.name, ++ new_size_in_mb=args.new_size_in_mb) ++ ++ p = subparsers.add_parser('bdev_aio_resize', ++ help='Resize a bdev by bdev name', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of aio bdev', type=str) ++ p.add_argument('new_size_in_mb', help='New size of bdev for resize operation. The unit is MiB', type=int) ++ p.set_defaults(func=bdev_aio_resize) ++ ++ @rpc.ssam.log_info ++ def os_ready(args): ++ rpc.ssam.os_ready(args.client) ++ ++ p = subparsers.add_parser('os_ready', ++ help='Write ready flag for booting OS', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.set_defaults(func=os_ready) ++ ++ @rpc.ssam.log_info ++ def controller_get_iostat(args): ++ print_dict(rpc.ssam.controller_get_iostat(args.client, args.function_id, args.dbdf)) ++ ++ p = subparsers.add_parser('controller_get_iostat', ++ help='Show all or specific controller(s) iostat', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('-f', '--function_id', help="Function ID of PCI device", type=int, required=False) ++ p.add_argument('-d', '--dbdf', help="Dbdf of PCI device", required=False) ++ p.set_defaults(func=controller_get_iostat) ++ ++ @rpc.ssam.log_info ++ def controller_clear_iostat(args): ++ rpc.ssam.controller_clear_iostat(args.client, args.type) ++ ++ p = subparsers.add_parser('controller_clear_iostat', ++ help='Clear all controllers iostat', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('-t', '--type', help="Type of the controllers: blk, scsi, fs", type=str, required=False) ++ p.set_defaults(func=controller_clear_iostat) ++ ++ @rpc.ssam.log_info ++ def create_scsi_controller(args): ++ rpc.ssam.create_scsi_controller(args.client, ++ dbdf=args.dbdf, ++ name=args.name) ++ ++ p = subparsers.add_parser('create_scsi_controller', aliases=['scsi_controller_create'], ++ help='Add a new scsi controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('dbdf', help='The pci dbdf of virtio scsi controller, which is obtained by \'device_pcie_list\'', type=str) ++ p.add_argument('name', help='Name of controller to be created', type=str) ++ p.set_defaults(func=create_scsi_controller) ++ ++ @rpc.ssam.log_info ++ def scsi_controller_add_target(args): ++ rpc.ssam.scsi_controller_add_target(args.client, ++ name=args.name, ++ scsi_tgt_num=int(args.scsi_tgt_num), ++ bdev_name=args.bdev_name) ++ ++ p = subparsers.add_parser('scsi_controller_add_target', ++ help='Add LUN to ssam scsi controller target', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller where lun is added', type=str) ++ p.add_argument('scsi_tgt_num', help='ID of target to use') ++ p.add_argument('bdev_name', help='Name of bdev to be added to target') ++ p.set_defaults(func=scsi_controller_add_target) ++ ++ @rpc.ssam.log_info ++ def scsi_controller_remove_target(args): ++ rpc.ssam.scsi_controller_remove_target(args.client, ++ name=args.name, ++ scsi_tgt_num=int(args.scsi_tgt_num)) ++ ++ p = subparsers.add_parser('scsi_controller_remove_target', ++ help='Remove LUN from ssam scsi controller target', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller to remove lun', type=str) ++ p.add_argument('scsi_tgt_num', help='ID of target to use') ++ p.set_defaults(func=scsi_controller_remove_target) ++ ++ @rpc.ssam.log_info ++ def scsi_device_iostat(args): ++ print_dict(rpc.ssam.scsi_device_iostat(args.client, ++ name=args.name, ++ scsi_tgt_num=int(args.scsi_tgt_num))) ++ ++ p = subparsers.add_parser('scsi_device_iostat', ++ help='Show iostat of scsi device', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller', type=str) ++ p.add_argument('scsi_tgt_num', help='Target number', type=int) ++ p.set_defaults(func=scsi_device_iostat) ++ ++ @rpc.ssam.log_info ++ def device_pcie_list(args): ++ print_dict(rpc.ssam.device_pcie_list(args.client)) ++ ++ p = subparsers.add_parser('device_pcie_list', ++ help='Show storage device pcie list', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.set_defaults(func=device_pcie_list) ++ ++ @rpc.ssam.log_info ++ def fs_controller_create(args): ++ rpc.ssam.fs_controller_create(args.client, ++ dbdf=args.dbdf, ++ fs_name=args.fs_name, ++ name=args.name, ++ max_threads=args.max_threads) ++ ++ p = subparsers.add_parser('fs_controller_create', help='Create a new fs controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('dbdf', help='The pci dbdf of virtio fs controller', type=str) ++ p.add_argument('fs_name', help='Path to file system', type=str) ++ p.add_argument('name', help='Name of fs controller', type=str) ++ p.add_argument('-t', '--max_threads', help="Max threads of fs controller", type=int, required=False) ++ p.set_defaults(func=fs_controller_create) ++ ++ @rpc.ssam.log_info ++ def fs_controller_delete(args): ++ rpc.ssam.fs_controller_delete(args.client, ++ name=args.name, ++ force=args.force) ++ ++ p = subparsers.add_parser('fs_controller_delete', ++ help='Delete a fs controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of fs controller', type=str) ++ p.add_argument('-f', '--force', dest='force', action='store_true', help="Force to delete when io exists") ++ p.set_defaults(force=False) ++ p.set_defaults(func=fs_controller_delete) ++ ++ @rpc.ssam.log_info ++ def fs_controller_list(args): ++ print_dict(rpc.ssam.fs_controller_list(args.client, ++ name=args.name)) ++ ++ p = subparsers.add_parser('fs_controller_list', ++ help='Get fs_controller info', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('-n', '--name', help='Name of controller', required=False) ++ p.set_defaults(func=fs_controller_list) ++ ++ @rpc.ssam.log_info ++ def fs_device_iostat(args): ++ print_dict(rpc.ssam.fs_device_iostat(args.client, ++ name=args.name)) ++ ++ p = subparsers.add_parser('fs_device_iostat', ++ help='Show iostat of fs device', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('-n', '--name', help='Name of controller', type=str, required=False) ++ p.set_defaults(func=fs_device_iostat) ++ ++ return parser ++ ++ ++if __name__ == "__main__": ++ def call_rpc_func(args): ++ args.func(args) ++ check_called_name(args.called_rpc_name) ++ ++ def check_called_name(name): ++ if name in deprecated_aliases: ++ print("{} is deprecated, use {} instead.".format(name, deprecated_aliases[name]), file=sys.stderr) ++ ++ parser = init_rpc_func() ++ args = parser.parse_args() ++ ++ if sys.stdin.isatty() and not hasattr(args, 'func'): ++ # No arguments and no data piped through stdin ++ parser.print_help() ++ exit(1) ++ ++ if args.called_rpc_name != "get_version": ++ args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, ++ log_level=getattr(logging, args.verbose.upper()), ++ conn_retries=args.conn_retries) ++ ++ try: ++ call_rpc_func(args) ++ except JSONRPCException as ex: ++ print(ex.message) ++ exit(1) +diff --git a/scripts/rpc/__init__.py b/scripts/rpc/__init__.py +index e8fa41e..1c037d9 100644 +--- a/scripts/rpc/__init__.py ++++ b/scripts/rpc/__init__.py +@@ -22,6 +22,7 @@ from . import pmem + from . import subsystem + from . import trace + from . import vhost ++from . import ssam + from . import vmd + from . import sock + from . import client as rpc_client +diff --git a/scripts/rpc/ssam.py b/scripts/rpc/ssam.py +new file mode 100644 +index 0000000..7b5ab6a +--- /dev/null ++++ b/scripts/rpc/ssam.py +@@ -0,0 +1,305 @@ ++from .helpers import deprecated_alias ++from getpass import getuser ++ ++ ++def log_command_info(client, event): ++ """log event info. ++ Args: ++ user_name: event user ++ event: function id of PCI device ++ src_addr: queue number of ssam ++ """ ++ params = { ++ 'user_name': getuser(), ++ 'event': event, ++ 'src_addr': "localhost", ++ } ++ return client.call('log_command_info', params) ++ ++ ++def log_info(func): ++ def wrapper_log_info(arg, *args, **kw): ++ log_command_info(arg.client, func.__name__) ++ return func(arg, *args, **kw) ++ return wrapper_log_info ++ ++ ++def create_blk_controller(client, dev_name, index, readonly=None, serial=None): ++ """Create ssam BLK controller. ++ Args: ++ dev_name: device name to add to controller ++ index: function id or dbdf of PCI device ++ queues: queue number of ssam ++ readonly: set controller as read-only ++ serial: set volume id ++ """ ++ params = { ++ 'dev_name': dev_name, ++ 'index': index, ++ } ++ if readonly: ++ params['readonly'] = readonly ++ if serial: ++ params['serial'] = serial ++ return client.call('create_blk_controller', params) ++ ++ ++def get_controllers(client, function_id=None, dbdf=None): ++ """Get information about configured ssam controllers. ++ ++ Args: ++ function_id: function id of PCI device ++ dbdf: dbdf of PCI device ++ ++ Returns: ++ List of ssam controllers. ++ """ ++ params = {} ++ if function_id is not None: ++ params['function_id'] = function_id ++ if dbdf is not None: ++ params['dbdf'] = dbdf ++ return client.call('get_controllers', params) ++ ++ ++def get_scsi_controllers(client, name=None): ++ """Get information about configured ssam controllers. ++ ++ Args: ++ name: name of scsi controller ++ ++ Returns: ++ List of ssam scsi controllers. ++ """ ++ params = {} ++ if name is not None: ++ params['name'] = name ++ return client.call('get_scsi_controllers', params) ++ ++ ++def delete_controller(client, index): ++ """Delete ssam controller from configuration. ++ Args: ++ index: function id or dbdf of PCI device ++ """ ++ params = {'index': index} ++ return client.call('delete_controller', params) ++ ++ ++def delete_scsi_controller(client, name): ++ """Delete ssam controller from configuration. ++ Args: ++ name: scsi controller name to be delete ++ """ ++ params = {'name': name} ++ return client.call('delete_scsi_controller', params) ++ ++ ++def controller_get_iostat(client, function_id=None, dbdf=None): ++ """Get iostat about configured ssam controllers. ++ ++ Args: ++ function_id: function id of PCI device ++ dbdf: dbdf of PCI device ++ ++ Returns: ++ List of iostat of ssam controllers. ++ """ ++ params = {} ++ if function_id is not None: ++ params['function_id'] = function_id ++ if dbdf is not None: ++ params['dbdf'] = dbdf ++ return client.call('controller_get_iostat', params) ++ ++ ++def controller_clear_iostat(client, type=None): ++ """Clear iostat about configured ssam controllers. ++ ++ Args: ++ type: blk,scsi,fs ++ ++ """ ++ params = {} ++ if type is not None: ++ params['type'] = type ++ return client.call('controller_clear_iostat', params) ++ ++ ++def bdev_resize(client, function_id, new_size_in_mb): ++ """Resize bdev in the system. ++ Args: ++ function_id: function id of PCI device ++ new_size_in_mb: new bdev size for resize operation. The unit is MiB ++ """ ++ params = { ++ 'function_id': function_id, ++ 'new_size_in_mb': new_size_in_mb, ++ } ++ return client.call('bdev_resize', params) ++ ++ ++def scsi_bdev_resize(client, name, tgt_id, new_size_in_mb): ++ """Resize scsi bdev in the system. ++ Args: ++ name: controller name of PCI device ++ tgt_id: tgt id of bdev ++ new_size_in_mb: new bdev size for resize operation. The unit is MiB ++ """ ++ params = { ++ 'name': name, ++ 'tgt_id': tgt_id, ++ 'new_size_in_mb': new_size_in_mb, ++ } ++ return client.call('scsi_bdev_resize', params) ++ ++ ++def bdev_aio_resize(client, name, new_size_in_mb): ++ """Resize aio bdev in the system. ++ Args: ++ name: aio bdev name ++ new_size_in_mb: new bdev size for resize operation. The unit is MiB ++ """ ++ params = { ++ 'name': name, ++ 'new_size_in_mb': new_size_in_mb, ++ } ++ return client.call('bdev_aio_resize', params) ++ ++ ++def os_ready(client): ++ """Write ready flag for booting OS. ++ ++ """ ++ return client.call('os_ready') ++ ++ ++def create_scsi_controller(client, dbdf, name): ++ """Create ssam scsi controller. ++ Args: ++ dbdf: the pci dbdf of virtio scsi controller ++ name: controller name to be create ++ """ ++ params = { ++ 'dbdf': dbdf, ++ 'name': name, ++ } ++ ++ return client.call('create_scsi_controller', params) ++ ++ ++def scsi_controller_add_target(client, name, scsi_tgt_num, bdev_name): ++ """Add LUN to ssam scsi controller target. ++ Args: ++ name: controller name where add lun ++ scsi_tgt_num: target number to use ++ bdev_name: name of bdev to add to target ++ """ ++ params = { ++ 'name': name, ++ 'scsi_tgt_num': scsi_tgt_num, ++ 'bdev_name': bdev_name, ++ } ++ return client.call('scsi_controller_add_target', params) ++ ++ ++def scsi_controller_remove_target(client, name, scsi_tgt_num): ++ """Remove LUN from ssam scsi controller target. ++ Args: ++ name: controller name to remove lun ++ scsi_tgt_num: target number to use ++ """ ++ params = { ++ 'name': name, ++ 'scsi_tgt_num': scsi_tgt_num, ++ } ++ return client.call('scsi_controller_remove_target', params) ++ ++ ++def scsi_device_iostat(client, name, scsi_tgt_num): ++ """Get iostat about scsi device. ++ ++ Args: ++ name: controller name ++ scsi_tgt_num: target number ++ ++ Returns: ++ List of iostat of ssam controllers. ++ """ ++ params = { ++ 'name': name, ++ 'scsi_tgt_num': scsi_tgt_num, ++ } ++ return client.call('scsi_device_iostat', params) ++ ++ ++def device_pcie_list(client): ++ """Show storage device pcie list. ++ ++ Returns: ++ List of storage device pcie. ++ """ ++ ++ return client.call('device_pcie_list') ++ ++ ++def fs_controller_create(client, dbdf, fs_name, name, max_threads=None): ++ """Create ssam fs controller. ++ Args: ++ dbdf: the pci dbdf of virtio fs controller ++ fs_name: path to file system ++ name: fs controller name ++ max_threads: max threads of fs controller ++ """ ++ params = { ++ 'dbdf': dbdf, ++ 'fs_name': fs_name, ++ 'name': name, ++ } ++ if max_threads is not None: ++ params['max_threads'] = max_threads ++ ++ return client.call('fs_controller_create', params) ++ ++ ++def fs_controller_delete(client, name, force): ++ """Delete ssam fs controller. ++ Args: ++ name: fs controller name ++ force: if force delete controller ++ """ ++ params = { ++ 'name': name, ++ 'force': force, ++ } ++ return client.call('fs_controller_delete', params) ++ ++ ++def fs_controller_list(client, name=None): ++ """Get information about configured fs controllers. ++ ++ Args: ++ name: name of fs controller ++ ++ Returns: ++ List of ssam fs controllers. ++ """ ++ params = {} ++ if name is not None: ++ params['name'] = name ++ return client.call('fs_controller_list', params) ++ ++ ++def fs_device_iostat(client, name=None): ++ """Get iostat about fs device. ++ ++ Args: ++ name: controller name ++ ++ Returns: ++ List of iostat of ssam fs controllers. ++ """ ++ params = {} ++ if name is not None: ++ params['name'] = name ++ return client.call('fs_device_iostat', params) diff --git a/spdk-23.01.patch b/spdk-23.01.patch new file mode 100644 index 0000000000000000000000000000000000000000..a016b0ee75b80855800556f1d5f1705abc21afde --- /dev/null +++ b/spdk-23.01.patch @@ -0,0 +1,16412 @@ +diff --git a/CONFIG b/CONFIG +index 0f23513..f49af84 100644 +--- a/CONFIG ++++ b/CONFIG +@@ -57,7 +57,7 @@ CONFIG_TESTS=y + CONFIG_UNIT_TESTS=y + + # Build examples +-CONFIG_EXAMPLES=y ++CONFIG_EXAMPLES=n + + # Build apps + CONFIG_APPS=y +@@ -119,6 +119,9 @@ CONFIG_UBLK=n + # Build vhost library. + CONFIG_VHOST=y + ++# Build ssam library. ++CONFIG_SSAM=y ++ + # Build vhost initiator (Virtio) driver. + CONFIG_VIRTIO=y + +diff --git a/app/Makefile b/app/Makefile +index 4d02c60..3c4ca1c 100644 +--- a/app/Makefile ++++ b/app/Makefile +@@ -12,6 +12,7 @@ DIRS-y += nvmf_tgt + DIRS-y += iscsi_tgt + DIRS-y += spdk_tgt + DIRS-y += spdk_lspci ++DIRS-y += ssam + ifneq ($(OS),Windows) + # TODO - currently disabled on Windows due to lack of support for curses + DIRS-y += spdk_top +diff --git a/app/ssam/Makefile b/app/ssam/Makefile +new file mode 100644 +index 0000000..639c62b +--- /dev/null ++++ b/app/ssam/Makefile +@@ -0,0 +1,31 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright (C) 2021-2025 Huawei Technologies Co. ++# All rights reserved. ++# ++ ++SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..) ++include $(SPDK_ROOT_DIR)/mk/spdk.common.mk ++include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk ++ ++APP = ssam ++ ++C_SRCS := ssam.c ++ ++SYS_LIBS += -lcap ++SPDK_LIB_LIST = $(ALL_MODULES_LIST) event_ssam event ssam ++ ++ifeq ($(OS),Linux) ++SPDK_LIB_LIST += event_nbd ++endif ++ ++ifeq ($(SPDK_ROOT_DIR)/lib/env_dpdk,$(CONFIG_ENV)) ++SPDK_LIB_LIST += env_dpdk_rpc ++endif ++ ++include $(SPDK_ROOT_DIR)/mk/spdk.app.mk ++ ++install: $(APP) ++ $(INSTALL_APP) ++ ++uninstall: ++ $(UNINSTALL_APP) +diff --git a/app/ssam/ssam.c b/app/ssam/ssam.c +new file mode 100644 +index 0000000..7ad7cab +--- /dev/null ++++ b/app/ssam/ssam.c +@@ -0,0 +1,77 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "spdk/ssam.h" ++#include "spdk/string.h" ++ ++#define IOVA_MODE_PA "pa" ++ ++static bool g_start_flag = false; ++ ++bool ++spdk_ssam_is_starting(void) ++{ ++ return g_start_flag; ++} ++ ++static void ++ssam_started(void *ctx) ++{ ++ spdk_ssam_poller_start(); ++ SPDK_NOTICELOG("hot restart %d\n", spdk_ssam_get_hot_restart()); ++ spdk_ssam_set_hot_restart(false); ++ g_start_flag = false; ++ SPDK_NOTICELOG("%s server started.\n", SSAM_SERVER_NAME); ++} ++ ++int ++main(int argc, char *argv[]) ++{ ++ struct spdk_app_opts opts = {}; ++ int rc; ++ int shm_id; ++ ++ spdk_app_opts_init(&opts, sizeof(opts)); ++ opts.name = SSAM_SERVER_NAME; ++ opts.iova_mode = IOVA_MODE_PA; ++ opts.num_entries = 0; ++ g_start_flag = true; ++ ++ rc = spdk_ssam_user_config_init(); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam user config init failed: %s\n", spdk_strerror(-rc)); ++ exit(rc); ++ } ++ ++ shm_id = shm_open(SSAM_SHM, O_RDWR, SSAM_SHM_PERMIT); ++ if (shm_id < 0) { ++ SPDK_NOTICELOG("ssam share memory hasn't been created.\n"); ++ g_start_flag = false; ++ } else { ++ spdk_ssam_set_shm_created(true); ++ SPDK_NOTICELOG("ssam share memory has been created.\n"); ++ } ++ ++ rc = spdk_ssam_rc_preinit(); ++ if (rc < 0) { ++ exit(rc); ++ } ++ ++ rc = spdk_app_parse_args(argc, argv, &opts, NULL, NULL, NULL, NULL); ++ if (rc != SPDK_APP_PARSE_ARGS_SUCCESS) { ++ SPDK_ERRLOG("spdk app parse args fail: %d\n", rc); ++ exit(rc); ++ } ++ spdk_ssam_set_hot_restart(opts.hot_restart); ++ ++ /* Blocks until the application is exiting */ ++ rc = spdk_app_start(&opts, ssam_started, NULL); ++ spdk_ssam_exit(); ++ ++ spdk_app_fini(); ++ SPDK_NOTICELOG("%s server exited.\n", SSAM_SERVER_NAME); ++ ++ return rc; ++} +diff --git a/configure b/configure +index 5935ff5..428dd66 100644 +--- a/configure ++++ b/configure +@@ -127,6 +127,8 @@ function usage() { + echo " --without-sma No path required." + echo " --with-avahi Build with Avahi mDNS discovery client service enabled in bdev-nvme module." + echo " --without-avahi No path required." ++ echo " --with-ssam Support to build ssam for DPU storage accel." ++ echo " --without-ssam No path required." + echo "" + echo "Environment variables:" + echo "" +@@ -616,6 +618,15 @@ for i in "$@"; do + --without-fuse) + CONFIG[FUSE]=n + ;; ++ --with-ssam) ++ CONFIG[SSAM]=y ++ ;; ++ --without-ssam) ++ CONFIG[SSAM]=n ++ ;; ++ --with-ssam-only) ++ CONFIG[SSAM_ONLY]=y ++ ;; + --with-nvme-cuse) + CONFIG[NVME_CUSE]=y + ;; +@@ -1157,6 +1168,13 @@ if [[ "${CONFIG[FUSE]}" = "y" ]]; then + fi + fi + ++if [[ "${CONFIG[SSAM_ONLY]}" = "y" ]]; then ++ if [[ "${CONFIG[SSAM]}" = "n" ]]; then ++ echo "--with-ssam-only requires --with-ssam." ++ exit 1 ++ fi ++fi ++ + if [ "${CONFIG[CET]}" = "y" ]; then + if ! echo -e 'int main(void) { return 0; }\n' | "${BUILD_CMD[@]}" -fcf-protection - 2> /dev/null; then + echo "--enable-cet requires compiler/linker that supports CET." +diff --git a/doc/jsonrpc.md b/doc/jsonrpc.md +index a712d2a..ec12383 100644 +--- a/doc/jsonrpc.md ++++ b/doc/jsonrpc.md +@@ -493,7 +493,26 @@ Example response: + "bdev_lvol_create_lvstore", + "bdev_daos_delete", + "bdev_daos_create", +- "bdev_daos_resize" ++ "bdev_daos_resize", ++ "log_command_info", ++ "create_blk_controller", ++ "delete_controller", ++ "delete_scsi_controller", ++ "get_controllers", ++ "get_scsi_controllers", ++ "controller_get_iostat", ++ "blk_device_iostat", ++ "controller_clear_iostat", ++ "bdev_resize", ++ "scsi_bdev_resize", ++ "bdev_aio_resize", ++ "os_ready", ++ "os_not_ready", ++ "create_scsi_controller", ++ "scsi_controller_add_target", ++ "scsi_controller_remove_target", ++ "scsi_device_iostat", ++ "device_pcie_list" + ] + } + ~~~ +@@ -11937,3 +11956,845 @@ Example response: + } + ] + ~~~ ++ ++### log_command_info {#rpc_ssam_log_command_info} ++ ++Record operation logs ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++event | Required | string | Function id of PCI device ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "event": "create_blk_controller" ++ }, ++ "jsonrpc": "2.0", ++ "method": "log_command_info", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### create_blk_controller {#rpc_ssam_create_blk_controller} ++ ++Create ssam blk controller ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++dev_name | Required | string | Device name to add to controller ++index | Required | string | Function id or dbdf of PCI device ++readonly | Optional | bool | Set controller as read-only ++serial | Optional | string | Set volume id ++vqueue | Optional | number | Set virtio queue num ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "dev_name": "aio0", ++ "index": "16", ++ "readonly": true, ++ "serial": "blk_disk", ++ "vqueue": 16 ++ }, ++ "jsonrpc": "2.0", ++ "method": "create_blk_controller", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### delete_controller {#rpc_ssam_delete_controller} ++ ++Delete ssam controller from configuration ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++index | Required | string | Function id or dbdf of PCI device ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "index": "16" ++ }, ++ "jsonrpc": "2.0", ++ "method": "delete_controller", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### delete_scsi_controller {#rpc_ssam_delete_scsi_controller} ++ ++Delete ssam scsi controller from configuration ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Required | string | Scsi controller name to be delete ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "scsi0" ++ }, ++ "jsonrpc": "2.0", ++ "method": "delete_scsi_controller", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### get_controllers {#rpc_ssam_get_controllers} ++ ++Get information about configured ssam controllers ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++function_id | Optional | number | Function id of PCI device ++dbdf | Optional | string | Dbdf of PCI device ++ ++#### Result ++ ++List of ssam controllers. ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "function_id": null, ++ "dbdf": null ++ }, ++ "jsonrpc": "2.0", ++ "method": "get_controllers", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": [ ++ { ++ "ctrlr": "ssam.0", ++ "cpumask": "0x1", ++ "session_num": 1, ++ "backend_specific": { ++ "session": [ ++ { ++ "name": "ssam.0_blk_16", ++ "function_id": 16, ++ "queues": 8, ++ "block": { ++ "readonly": false, ++ "bdev": "Malloc0" ++ } ++ } ++ ] ++ } ++ } ++ ] ++} ++~~~ ++ ++### get_scsi_controllers {#rpc_ssam_get_scsi_controllers} ++ ++Get information about configured ssam scsi controllers ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Optional | string | Name of scsi controller ++ ++#### Result ++ ++List of ssam scsi controllers. ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "scsi0" ++ }, ++ "jsonrpc": "2.0", ++ "method": "get_scsi_controllers", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": [] ++} ++~~~ ++ ++### controller_get_iostat {#rpc_ssam_controller_get_iostat} ++ ++Get iostat about configured ssam controllers ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++function_id | Optional | number | Function id of PCI device ++dbdf | Optional | string | Dbdf of PCI device ++ ++#### Result ++ ++List of iostat of ssam controllers. ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "function_id": null, ++ "dbdf": null ++ }, ++ "jsonrpc": "2.0", ++ "method": "controller_get_iostat", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": { ++ "tick_rate": 100000000, ++ "dbdfs": [ ++ { ++ "name": "ssam.0", ++ "flight_io": 0, ++ "discard_io_num": 0, ++ "wait_io": 0, ++ "wait_io_r": 0 ++ }, ++ { ++ "function_id": 16, ++ "poll_lat": "0.000000268", ++ "bdev_name": "Malloc0", ++ "bytes_read": 0, ++ "num_read_ops": 0, ++ "bytes_written": 0, ++ "num_write_ops": 0, ++ "read_latency_ticks": 0, ++ "write_latency_ticks": 0, ++ "complete_read_ios": 0, ++ "err_read_ios": 0, ++ "complete_write_ios": 0, ++ "err_write_ios": 0, ++ "flush_ios": 0, ++ "complete_flush_ios": 0, ++ "err_flush_ios": 0, ++ "other_ios": 0, ++ "complete_other_ios": 0, ++ "err_other_ios": 0, ++ "fatal_ios": 0, ++ "io_retry": 0, ++ "counters": { ++ "start_count": 0, ++ "dma_count": 0, ++ "dma_complete_count": 0, ++ "bdev_count": 0, ++ "bdev_complete_count": 0 ++ }, ++ "details": { ++ "count": 0, ++ "total_lat": "0.000000000", ++ "dma_lat": "0.000000000", ++ "bdev_lat": "0.000000000", ++ "bdev_submit_lat": "0.000000000", ++ "complete_lat": "0.000000000", ++ "internal_lat": "0.000000000" ++ } ++ } ++ ] ++ } ++} ++~~~ ++ ++### blk_device_iostat {#rpc_ssam_blk_device_iostat} ++ ++Get iostat about blk device ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++index | Required | number | Function id or dbdf of PCI device ++tid | Optional | number | Tid ++vq_idx | Optional | number | Index of vqueue ++ ++#### Result ++ ++List of iostat of ssam blk controllers. ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "index": "16", ++ "tid": null, ++ "vq_idx": null ++ }, ++ "jsonrpc": "2.0", ++ "method": "blk_device_iostat", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": { ++ "tick_rate": 100000000, ++ "dbdfs": [ ++ { ++ "function_id": 16, ++ "poll_lat": "0.000000267", ++ "bdev_name": "Malloc0", ++ "bytes_read": 0, ++ "num_read_ops": 0, ++ "bytes_written": 0, ++ "num_write_ops": 0, ++ "read_latency_ticks": 0, ++ "write_latency_ticks": 0, ++ "complete_read_ios": 0, ++ "err_read_ios": 0, ++ "complete_write_ios": 0, ++ "err_write_ios": 0, ++ "flush_ios": 0, ++ "complete_flush_ios": 0, ++ "err_flush_ios": 0, ++ "other_ios": 0, ++ "complete_other_ios": 0, ++ "err_other_ios": 0, ++ "fatal_ios": 0, ++ "io_retry": 0, ++ "counters": { ++ "start_count": 0, ++ "dma_count": 0, ++ "dma_complete_count": 0, ++ "bdev_count": 0, ++ "bdev_complete_count": 0 ++ }, ++ "details": { ++ "count": 0, ++ "total_lat": "0.000000000", ++ "dma_lat": "0.000000000", ++ "bdev_lat": "0.000000000", ++ "bdev_submit_lat": "0.000000000", ++ "complete_lat": "0.000000000", ++ "internal_lat": "0.000000000" ++ } ++ } ++ ] ++ } ++} ++~~~ ++ ++### controller_clear_iostat {#rpc_ssam_controller_clear_iostat} ++ ++Clear iostat about configured ssam controllers ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "method": "controller_clear_iostat", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### bdev_resize {#rpc_ssam_bdev_resize} ++ ++Resize bdev in the system ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++function_id | Required | number | Function id of PCI device ++new_size_in_mb | Required | number | New bdev size for resize operation. The unit is MiB ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "function_id": "16", ++ "new_size_in_mb": 1024 ++ }, ++ "jsonrpc": "2.0", ++ "method": "bdev_resize", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### scsi_bdev_resize {#rpc_ssam_scsi_bdev_resize} ++ ++Resize scsi bdev in the system ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Required | string | Controller name of PCI device ++tgt_id | Required | number | Tgt id of bdev ++new_size_in_mb | Required | number | New bdev size for resize operation. The unit is MiB ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "scsi0", ++ "function_id": "0", ++ "new_size_in_mb": 1024 ++ }, ++ "jsonrpc": "2.0", ++ "method": "scsi_bdev_resize", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### bdev_aio_resize {#rpc_ssam_bdev_aio_resize} ++ ++Resize aio bdev in the system ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Required | string | Aio bdev name ++new_size_in_mb | Required | number | New bdev size for resize operation. The unit is MiB ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "aio0", ++ "new_size_in_mb": 1024 ++ }, ++ "jsonrpc": "2.0", ++ "method": "bdev_aio_resize", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### os_ready {#rpc_os_ready} ++ ++Write ready flag for booting OS ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "method": "os_ready", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### os_not_ready {#rpc_set_os_status} ++ ++Write not ready flag for booting OS ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "method": "os_not_ready", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### create_scsi_controller {#rpc_ssam_create_scsi_controller} ++ ++Create ssam scsi controller ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++dbdf | Required | string | The pci dbdf of virtio scsi controller ++name | Required | string | Controller name to be create ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "dbdf": "0000:01:02.0", ++ "name": "scsi0" ++ }, ++ "jsonrpc": "2.0", ++ "method": "create_scsi_controller", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### scsi_controller_add_target {#rpc_ssam_scsi_controller_add_target} ++ ++Add LUN to ssam scsi controller target ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Required | string | Controller name where add lun ++scsi_tgt_num | Required | number | Target number to use ++bdev_name | Required | string | Name of bdev to add to target ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "scsi0", ++ "scsi_tgt_num": 0, ++ "bdev_name": "aio0" ++ }, ++ "jsonrpc": "2.0", ++ "method": "scsi_controller_add_target", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### scsi_controller_remove_target {#rpc_ssam_scsi_controller_remove_target} ++ ++Remove LUN from ssam scsi controller target ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Required | string | Controller name where remove lun ++scsi_tgt_num | Required | number | Target number to use ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "scsi0", ++ "scsi_tgt_num": 0 ++ }, ++ "jsonrpc": "2.0", ++ "method": "scsi_controller_remove_target", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### scsi_device_iostat {#rpc_ssam_scsi_device_iostat} ++ ++Get iostat about scsi device ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Required | string | Controller name ++scsi_tgt_num | Required | number | Target number ++ ++#### Result ++ ++List of iostat of ssam scsi controllers. ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "scsi0", ++ "scsi_tgt_num": 0 ++ }, ++ "jsonrpc": "2.0", ++ "method": "scsi_device_iostat", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": [] ++} ++~~~ ++ ++### device_pcie_list {#rpc_ssam_device_pcie_list} ++ ++Show storage device pcie list ++ ++#### Result ++ ++List of storage device pcie. ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "method": "device_pcie_list", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": { ++ "device_pcie_list": [ ++ { ++ "index": 16, ++ "dbdf": "0001:75:02.0", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 17, ++ "dbdf": "0001:75:02.1", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 18, ++ "dbdf": "0001:75:02.2", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 19, ++ "dbdf": "0001:75:02.3", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 20, ++ "dbdf": "0001:75:02.4", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 21, ++ "dbdf": "0001:75:02.5", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 22, ++ "dbdf": "0001:75:02.6", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 23, ++ "dbdf": "0001:75:02.7", ++ "type": "virtio-blk" ++ } ++ ] ++ } ++} ++~~~ +diff --git a/examples/bdev/fio_plugin/fio_plugin.c b/examples/bdev/fio_plugin/fio_plugin.c +index 92690be..402206d 100644 +--- a/examples/bdev/fio_plugin/fio_plugin.c ++++ b/examples/bdev/fio_plugin/fio_plugin.c +@@ -265,7 +265,7 @@ spdk_fio_bdev_init_done(int rc, void *cb_arg) + { + *(bool *)cb_arg = true; + +- if (spdk_rpc_initialize(g_rpc_listen_addr) == 0) { ++ if (spdk_rpc_initialize(g_rpc_listen_addr, RPC_SELECT_INTERVAL) == 0) { + spdk_rpc_set_state(SPDK_RPC_RUNTIME); + } + } +diff --git a/examples/nvmf/nvmf/nvmf.c b/examples/nvmf/nvmf/nvmf.c +index 035170a..d684806 100644 +--- a/examples/nvmf/nvmf/nvmf.c ++++ b/examples/nvmf/nvmf/nvmf.c +@@ -675,7 +675,7 @@ nvmf_subsystem_init_done(int rc, void *cb_arg) + { + fprintf(stdout, "bdev subsystem init successfully\n"); + +- rc = spdk_rpc_initialize(g_rpc_addr); ++ rc = spdk_rpc_initialize(g_rpc_addr, RPC_SELECT_INTERVAL); + if (rc) { + spdk_app_stop(rc); + return; +diff --git a/include/spdk/env.h b/include/spdk/env.h +index bac976c..6844f13 100644 +--- a/include/spdk/env.h ++++ b/include/spdk/env.h +@@ -64,6 +64,7 @@ struct spdk_env_opts { + /** Opaque context for use of the env implementation. */ + void *env_context; + const char *vf_token; ++ bool hot_restart; + }; + + /** +diff --git a/include/spdk/event.h b/include/spdk/event.h +index be8c3ee..2143c30 100644 +--- a/include/spdk/event.h ++++ b/include/spdk/event.h +@@ -143,9 +143,10 @@ struct spdk_app_opts { + */ + bool disable_signal_handlers; + +- /* Hole at bytes 185-191. */ +- uint8_t reserved185[7]; ++ /* Hole at bytes 185-190. */ ++ uint8_t reserved185[6]; + ++ bool hot_restart; + /** + * The allocated size for the message pool used by the threading library. + * +@@ -239,6 +240,8 @@ void spdk_app_stop(int rc); + */ + int spdk_app_get_shm_id(void); + ++bool spdk_get_shutdown_sig_received(void); ++ + /** + * Convert a string containing a CPU core mask into a bitmask + * +diff --git a/include/spdk/init.h b/include/spdk/init.h +index 3bba865..a9db632 100644 +--- a/include/spdk/init.h ++++ b/include/spdk/init.h +@@ -18,6 +18,7 @@ extern "C" { + #endif + + #define SPDK_DEFAULT_RPC_ADDR "/var/tmp/spdk.sock" ++#define RPC_SELECT_INTERVAL 4000 /* 4ms */ + + /** + * Create the SPDK JSON-RPC server and listen at the provided address. The RPC server is optional and is +@@ -27,7 +28,7 @@ extern "C" { + * + * \return Negated errno on failure. 0 on success. + */ +-int spdk_rpc_initialize(const char *listen_addr); ++int spdk_rpc_initialize(const char *listen_addr, int internval); + + /** + * Shut down the SPDK JSON-RPC target +@@ -72,6 +73,10 @@ typedef void (*spdk_subsystem_fini_fn)(void *ctx); + */ + void spdk_subsystem_fini(spdk_subsystem_fini_fn cb_fn, void *cb_arg); + ++void spdk_ssam_set_hot_restart(bool value); ++ ++bool spdk_ssam_get_hot_restart(void); ++ + #ifdef __cplusplus + } + #endif +diff --git a/include/spdk/ssam.h b/include/spdk/ssam.h +new file mode 100644 +index 0000000..7225023 +--- /dev/null ++++ b/include/spdk/ssam.h +@@ -0,0 +1,212 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#ifndef SSAM_H ++#define SSAM_H ++ ++#include ++ ++#include "spdk/stdinc.h" ++#include "spdk/cpuset.h" ++#include "spdk/json.h" ++#include "spdk/thread.h" ++#include "spdk/event.h" ++ ++#include "../../lib/ssam/ssam_driver/dpak_ssam.h" ++ ++#ifdef DEBUG ++#define ASSERT(f) assert(f) ++#else ++#define ASSERT(f) ((void)0) ++#endif ++ ++#define SPDK_INVALID_TID UINT16_MAX ++#define SPDK_SESSION_TYPE_MAX_LEN 64 ++ ++#define SPDK_SESSION_TYPE_BLK "blk" ++#define SPDK_SESSION_TYPE_SCSI "scsi" ++ ++#define SSAM_SHM "ssam_shm" ++#define SSAM_SHM_PERMIT 0640 ++#define SSAM_STORAGE_READY_FILE "/proc/sdi_storage/storage_ready" ++ ++enum virtio_type { ++ VIRTIO_TYPE_UNKNOWN, ++ VIRTIO_TYPE_BLK, ++ VIRTIO_TYPE_SCSI, ++}; ++ ++/** ++ * ssam subsystem init callback ++ * ++ * \param rc The preceding processing result, ++ * 0 on success, negative errno on error. ++ */ ++typedef void (*spdk_ssam_init_cb)(int rc); ++ ++/** ++ * ssam subsystem fini callback ++ */ ++typedef void (*spdk_ssam_fini_cb)(void); ++ ++/** ++ * ssam dump config json ++ */ ++void spdk_ssam_config_json(struct spdk_json_write_ctx *w); ++ ++/** ++ * Check if ssam support the global vf id. ++ * ++ * \param gfunc_id ssam global vf id. ++ * ++ * \return -EINVAL indicate gfunc_id invalid, -ENODEV indicate no such vf or ++ * 0 indicate gfunc_id valid. ++ */ ++int ssam_check_gfunc_id(uint16_t gfunc_id); ++ ++/** ++ * Find a ssam session by global vf id. ++ * ++ * \param gfunc_id ssam global vf id. ++ * ++ * \return ssam session or NULL indicate not find. ++ */ ++struct spdk_ssam_session *ssam_session_find(uint16_t gfunc_id); ++ ++/** ++ * Get gfunc id by controller name. ++ * ++ * \param name controller name. ++ * ++ * \return gfunc id or SPDK_INVALID_GFUNC_ID gfunc id not find. ++ */ ++uint16_t ssam_get_gfunc_id_by_name(char *name); ++ ++/** ++ * Get the next ssam device. If there's no more devices to iterate ++ * through, NULL will be returned. ++ * ++ * \param smdev ssam device. If NULL, this function will return the ++ * very first device. ++ * ++ * \return smdev ssam device or NULL indicate no more devices ++ */ ++struct spdk_ssam_dev *ssam_dev_next(const struct spdk_ssam_dev *smdev); ++ ++/** ++ * Lock the global ssam mutex synchronizing all the ssam device accesses. ++ */ ++void ssam_lock(void); ++ ++/** ++ * Lock the global ssam mutex synchronizing all the ssam device accesses. ++ * ++ * \return 0 if the mutex could be locked immediately, negative errno otherwise. ++ */ ++int ssam_trylock(void); ++ ++/** ++ * Unlock the global ssam mutex. ++ */ ++void ssam_unlock(void); ++ ++/** ++ * \param smsession ssam session. ++ * \param arg user-provided parameter. ++ * ++ * \return 0 on success, negative if failed ++ */ ++typedef int (*spdk_ssam_session_fn)(struct spdk_ssam_session *smsession, void **arg); ++ ++/** ++ * \param smsession ssam session. ++ * \param arg user-provided parameter. ++ */ ++typedef void (*spdk_ssam_session_cpl_fn)(struct spdk_ssam_session *smsession, void **arg); ++ ++/** ++ * \param arg user-provided parameter. ++ * \param rsp spdk_ssam_session_fn call back response value, 0 success, negative if failed. ++ */ ++typedef void (*spdk_ssam_session_rsp_fn)(void *arg, int rsp); ++ ++struct spdk_ssam_session_reg_info { ++ char type_name[SPDK_SESSION_TYPE_MAX_LEN]; ++ spdk_ssam_session_rsp_fn rsp_fn; ++ void *rsp_ctx; ++ uint16_t gfunc_id; ++ uint16_t tid; ++ uint16_t queues; ++ const struct spdk_ssam_session_backend *backend; ++ uint32_t session_ctx_size; ++ char *name; ++ char *dbdf; ++}; ++ ++/** ++ * Construct a ssam blk device. This will create a ssam ++ * blk device and then create a session. Creating the smdev will ++ * start an I/O poller and hog a CPU. If already exist a ssam ++ * blk device, then it will only create a session to this device. ++ * All sessions in the same device share one I/O poller and one CPU. ++ * ssam blk device is tightly associated with given SPDK bdev. ++ * Given bdev can not be changed, unless it has been hotremoved. This ++ * would result in all I/O failing with virtio VIRTIO_BLK_S_IOERR ++ * error code. ++ * ++ * This function is thread-safe. ++ * ++ * \param info session register information. ++ * \param dev_name bdev name to associate with this vhost device ++ * \param readonly if set, all writes to the device will fail with ++ * VIRTIO_BLK_S_IOERR error code. ++ * \param serial means volume id. ++ * ++ * \return 0 on success, negative errno on error. ++ */ ++int ssam_blk_construct(struct spdk_ssam_session_reg_info *info, ++ const char *dev_name, bool readonly, char *serial); ++ ++/** ++ * ssam user config init. ++ */ ++int spdk_ssam_user_config_init(void); ++ ++/** ++ * ssam get tid which has minimum device. ++ */ ++uint16_t ssam_get_tid(void); ++ ++void spdk_ssam_exit(void); ++ ++void spdk_ssam_subsystem_fini(spdk_ssam_fini_cb fini_cb); ++ ++void spdk_ssam_subsystem_init(spdk_ssam_init_cb init_cb); ++ ++int ssam_scsi_construct(struct spdk_ssam_session_reg_info *info); ++ ++int ssam_scsi_dev_add_tgt(struct spdk_ssam_session *smsession, int target_num, ++ const char *bdev_name); ++ ++int ssam_scsi_dev_remove_tgt(struct spdk_ssam_session *smsession, ++ unsigned scsi_tgt_num, spdk_ssam_session_rsp_fn cb_fn, void *cb_arg); ++ ++void spdk_ssam_set_shm_created(bool shm_created); ++ ++bool spdk_ssam_get_shm_created(void); ++ ++void spdk_ssam_poller_start(void); ++ ++void ssam_deinit_device_pcie_list(void); ++ ++int ssam_init_device_pcie_list(void); ++ ++bool spdk_ssam_is_starting(void); ++ ++void ssam_dump_device_pcie_list(struct spdk_json_write_ctx *w); ++ ++uint32_t ssam_get_device_pcie_list_size(void); ++ ++#endif /* SSAM_H */ +diff --git a/lib/Makefile b/lib/Makefile +index 5cf00b8..9aefe94 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -20,6 +20,7 @@ endif + DIRS-$(CONFIG_OCF) += env_ocf + DIRS-$(CONFIG_IDXD) += idxd + DIRS-$(CONFIG_VHOST) += vhost ++DIRS-$(CONFIG_SSAM) += ssam + DIRS-$(CONFIG_VIRTIO) += virtio + DIRS-$(CONFIG_VBDEV_COMPRESS) += reduce + DIRS-$(CONFIG_RDMA) += rdma +diff --git a/lib/bdev/bdev.c b/lib/bdev/bdev.c +index c7bd1a3..0e34001 100644 +--- a/lib/bdev/bdev.c ++++ b/lib/bdev/bdev.c +@@ -23,6 +23,7 @@ + #include "spdk/bdev_module.h" + #include "spdk/log.h" + #include "spdk/string.h" ++#include "spdk/event.h" + + #include "bdev_internal.h" + #include "spdk_internal/trace_defs.h" +@@ -645,6 +646,9 @@ bdev_ok_to_examine(struct spdk_bdev *bdev) + static void + bdev_examine(struct spdk_bdev *bdev) + { ++ if (spdk_ssam_get_hot_restart() == true) { ++ return; ++ } + struct spdk_bdev_module *module; + uint32_t action; + +@@ -3207,6 +3211,7 @@ bdev_channel_destroy_resource(struct spdk_bdev_channel *ch) + { + struct spdk_bdev_shared_resource *shared_resource; + struct lba_range *range; ++ struct spdk_bdev_io *bdev_io, *tmp; + + bdev_free_io_stat(ch->stat); + #ifdef SPDK_CONFIG_VTUNE +@@ -3222,6 +3227,11 @@ bdev_channel_destroy_resource(struct spdk_bdev_channel *ch) + spdk_put_io_channel(ch->channel); + + shared_resource = ch->shared_resource; ++ ch->shared_resource = NULL; ++ ++ TAILQ_FOREACH_SAFE(bdev_io, &ch->io_submitted, internal.ch_link, tmp) { ++ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED); ++ } + + assert(TAILQ_EMPTY(&ch->io_locked)); + assert(TAILQ_EMPTY(&ch->io_submitted)); +@@ -6466,6 +6476,15 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta + struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch; + struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource; + ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the bdev buf memory may have been released. ++ * Therefore, do not need to continue. ++ */ ++ return; ++ } ++ + bdev_io->internal.status = status; + + if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_RESET)) { +diff --git a/lib/event/app.c b/lib/event/app.c +index 72ff985..7623301 100644 +--- a/lib/event/app.c ++++ b/lib/event/app.c +@@ -62,6 +62,12 @@ spdk_app_get_shm_id(void) + return g_spdk_app.shm_id; + } + ++bool ++spdk_get_shutdown_sig_received(void) ++{ ++ return g_shutdown_sig_received; ++} ++ + /* append one empty option to indicate the end of the array */ + static const struct option g_cmdline_options[] = { + #define CONFIG_FILE_OPT_IDX 'c' +@@ -130,6 +136,8 @@ static const struct option g_cmdline_options[] = { + {"vfio-vf-token", required_argument, NULL, ENV_VF_TOKEN_OPT_IDX}, + #define MSG_MEMPOOL_SIZE_OPT_IDX 270 + {"msg-mempool-size", required_argument, NULL, MSG_MEMPOOL_SIZE_OPT_IDX}, ++#define HOT_RESTART_OPT_IDX 271 ++ {"hot-restart", no_argument, NULL, HOT_RESTART_OPT_IDX}, + }; + + static void +@@ -274,7 +282,7 @@ app_start_rpc(int rc, void *arg1) + + spdk_rpc_set_allowlist(g_spdk_app.rpc_allowlist); + +- rc = spdk_rpc_initialize(g_spdk_app.rpc_addr); ++ rc = spdk_rpc_initialize(g_spdk_app.rpc_addr, RPC_SELECT_INTERVAL); + if (rc) { + spdk_app_stop(rc); + return; +@@ -342,6 +350,7 @@ app_setup_env(struct spdk_app_opts *opts) + env_opts.env_context = opts->env_context; + env_opts.iova_mode = opts->iova_mode; + env_opts.vf_token = opts->vf_token; ++ env_opts.hot_restart = opts->hot_restart; + + rc = spdk_env_init(&env_opts); + free(env_opts.pci_blocked); +@@ -473,7 +482,7 @@ bootstrap_fn(void *arg1) + } else { + spdk_rpc_set_allowlist(g_spdk_app.rpc_allowlist); + +- rc = spdk_rpc_initialize(g_spdk_app.rpc_addr); ++ rc = spdk_rpc_initialize(g_spdk_app.rpc_addr, RPC_SELECT_INTERVAL); + if (rc) { + spdk_app_stop(rc); + return; +@@ -520,6 +529,7 @@ app_copy_opts(struct spdk_app_opts *opts, struct spdk_app_opts *opts_user, size_ + SET_FIELD(log); + SET_FIELD(base_virtaddr); + SET_FIELD(disable_signal_handlers); ++ SET_FIELD(hot_restart); + SET_FIELD(msg_mempool_size); + SET_FIELD(rpc_allowlist); + SET_FIELD(vf_token); +@@ -882,6 +892,7 @@ usage(void (*app_usage)(void)) + printf(" --rpcs-allowed comma-separated list of permitted RPCS\n"); + printf(" --env-context Opaque context for use of the env implementation\n"); + printf(" --vfio-vf-token VF token (UUID) shared between SR-IOV PF and VFs for vfio_pci driver\n"); ++ printf(" --hot-restart enable hot restart\n"); + spdk_log_usage(stdout, "-L"); + spdk_trace_mask_usage(stdout, "-e"); + if (app_usage) { +@@ -1156,6 +1167,9 @@ spdk_app_parse_args(int argc, char **argv, struct spdk_app_opts *opts, + printf(SPDK_VERSION_STRING"\n"); + retval = SPDK_APP_PARSE_ARGS_HELP; + goto out; ++ case HOT_RESTART_OPT_IDX: ++ opts->hot_restart = true; ++ break; + case '?': + /* + * In the event getopt() above detects an option +diff --git a/lib/event/spdk_event.map b/lib/event/spdk_event.map +index 2d6d0dd..25b3a64 100644 +--- a/lib/event/spdk_event.map ++++ b/lib/event/spdk_event.map +@@ -16,6 +16,7 @@ + spdk_event_call; + spdk_framework_enable_context_switch_monitor; + spdk_framework_context_switch_monitor_enabled; ++ spdk_get_shutdown_sig_received; + + # Public scheduler functions + spdk_scheduler_set; +diff --git a/lib/init/json_config.c b/lib/init/json_config.c +index 0d39506..ae35763 100644 +--- a/lib/init/json_config.c ++++ b/lib/init/json_config.c +@@ -19,6 +19,8 @@ + #include "spdk_internal/event.h" + + #define SPDK_DEBUG_APP_CFG(...) SPDK_DEBUGLOG(app_config, __VA_ARGS__) ++#define SPDK_JSON_CONFIG_HOT_RESTART_INTERVAL 4 /* 4us */ ++#define SPDK_JSON_CONFIG_SELECT_INTERNAL 4000 /* 4ms */ + + /* JSON configuration format is as follows + * +@@ -331,6 +333,15 @@ app_json_config_load_subsystem_config_entry(void *_ctx) + uint32_t state_mask = 0, cur_state_mask, startup_runtime = SPDK_RPC_STARTUP | SPDK_RPC_RUNTIME; + int rc; + ++ if (spdk_get_shutdown_sig_received()) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * rpc and thread may have been released. ++ * Therefore, dont continue. ++ */ ++ return; ++ } ++ + if (ctx->config_it == NULL) { + SPDK_DEBUG_APP_CFG("Subsystem '%.*s': configuration done.\n", ctx->subsystem_name->len, + (char *)ctx->subsystem_name->start); +@@ -557,6 +568,19 @@ err: + return rc; + } + ++static bool g_hot_restart_flag = false; ++bool ++spdk_ssam_get_hot_restart(void) ++{ ++ return g_hot_restart_flag; ++} ++ ++void ++spdk_ssam_set_hot_restart(bool value) ++{ ++ g_hot_restart_flag = value; ++} ++ + void + spdk_subsystem_init_from_json_config(const char *json_config_file, const char *rpc_addr, + spdk_subsystem_init_fn cb_fn, void *cb_arg, +@@ -564,6 +588,7 @@ spdk_subsystem_init_from_json_config(const char *json_config_file, const char *r + { + struct load_json_config_ctx *ctx = calloc(1, sizeof(*ctx)); + int rc; ++ int internal; + + assert(cb_fn); + if (!ctx) { +@@ -618,7 +643,12 @@ spdk_subsystem_init_from_json_config(const char *json_config_file, const char *r + goto fail; + } + +- rc = spdk_rpc_initialize(ctx->rpc_socket_path_temp); ++ if (spdk_ssam_get_hot_restart() == true) { ++ internal = SPDK_JSON_CONFIG_HOT_RESTART_INTERVAL; ++ } else { ++ internal = SPDK_JSON_CONFIG_SELECT_INTERNAL; ++ } ++ rc = spdk_rpc_initialize(ctx->rpc_socket_path_temp, internal); + if (rc) { + goto fail; + } +diff --git a/lib/init/rpc.c b/lib/init/rpc.c +index 04be7c6..23d6fa4 100644 +--- a/lib/init/rpc.c ++++ b/lib/init/rpc.c +@@ -11,8 +11,6 @@ + #include "spdk/log.h" + #include "spdk/rpc.h" + +-#define RPC_SELECT_INTERVAL 4000 /* 4ms */ +- + static struct spdk_poller *g_rpc_poller = NULL; + + static int +@@ -23,7 +21,7 @@ rpc_subsystem_poll(void *arg) + } + + int +-spdk_rpc_initialize(const char *listen_addr) ++spdk_rpc_initialize(const char *listen_addr, int internval) + { + int rc; + +@@ -48,7 +46,7 @@ spdk_rpc_initialize(const char *listen_addr) + spdk_rpc_set_state(SPDK_RPC_STARTUP); + + /* Register a poller to periodically check for RPCs */ +- g_rpc_poller = SPDK_POLLER_REGISTER(rpc_subsystem_poll, NULL, RPC_SELECT_INTERVAL); ++ g_rpc_poller = SPDK_POLLER_REGISTER(rpc_subsystem_poll, NULL, internval); + + return 0; + } +diff --git a/lib/init/spdk_init.map b/lib/init/spdk_init.map +index c6061c4..dc5bb0f 100644 +--- a/lib/init/spdk_init.map ++++ b/lib/init/spdk_init.map +@@ -12,6 +12,8 @@ + + spdk_rpc_initialize; + spdk_rpc_finish; ++ spdk_ssam_get_hot_restart; ++ spdk_ssam_set_hot_restart; + + local: *; + }; +diff --git a/lib/scsi/lun.c b/lib/scsi/lun.c +index a106182..fbfd58d 100644 +--- a/lib/scsi/lun.c ++++ b/lib/scsi/lun.c +@@ -10,6 +10,8 @@ + #include "spdk/thread.h" + #include "spdk/util.h" + #include "spdk/likely.h" ++#include "spdk/event.h" ++#include "spdk/bdev_module.h" + + static void scsi_lun_execute_tasks(struct spdk_scsi_lun *lun); + static void _scsi_lun_execute_mgmt_task(struct spdk_scsi_lun *lun); +@@ -342,6 +344,16 @@ _scsi_lun_hot_remove(void *arg1) + { + struct spdk_scsi_lun *lun = arg1; + ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the task and bdev_io memory may have been released. ++ * Therefore, outstanding task are not executed in this scenario. ++ */ ++ scsi_lun_notify_hot_remove(lun); ++ return; ++ } ++ + /* If lun->removed is set, no new task can be submitted to the LUN. + * Execute previously queued tasks, which will be immediately aborted. + */ +diff --git a/lib/ssam/Makefile b/lib/ssam/Makefile +new file mode 100644 +index 0000000..bf76d14 +--- /dev/null ++++ b/lib/ssam/Makefile +@@ -0,0 +1,26 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright (C) 2021-2025 Huawei Technologies Co. ++# All rights reserved. ++# ++ ++SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..) ++include $(SPDK_ROOT_DIR)/mk/spdk.common.mk ++ ++SO_VER := 1 ++SO_MINOR := 0 ++ ++CFLAGS += -I. -I../../dpdk/lib/eal/common ++CFLAGS += $(ENV_CFLAGS) ++ ++C_SRCS = ssam.c ssam_blk.c ssam_rpc.c \ ++ ssam_config.c ssam_scsi.c ssam_malloc.c ssam_device_pcie.c ++C_SRCS += ssam_driver/ssam_driver.c ++C_SRCS += ssam_driver/ssam_dbdf.c ++C_SRCS += ssam_driver/ssam_mempool.c ++C_SRCS += ssam_driver/ssam_driver_adapter.c ++ ++LIBNAME = ssam ++ ++SPDK_MAP_FILE = $(abspath $(CURDIR)/spdk_ssam.map) ++ ++include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk +diff --git a/lib/ssam/spdk_ssam.map b/lib/ssam/spdk_ssam.map +new file mode 100644 +index 0000000..9bef6f9 +--- /dev/null ++++ b/lib/ssam/spdk_ssam.map +@@ -0,0 +1,16 @@ ++{ ++ global: ++ ++ # public functions ++ spdk_ssam_user_config_init; ++ spdk_ssam_exit; ++ spdk_ssam_subsystem_fini; ++ spdk_ssam_subsystem_init; ++ spdk_ssam_config_json; ++ spdk_ssam_set_shm_created; ++ spdk_ssam_get_shm_created; ++ spdk_ssam_poller_start; ++ spdk_ssam_rc_preinit; ++ ++ local: *; ++}; +diff --git a/lib/ssam/ssam.c b/lib/ssam/ssam.c +new file mode 100644 +index 0000000..3709b0e +--- /dev/null ++++ b/lib/ssam/ssam.c +@@ -0,0 +1,1713 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include ++#include ++ ++#include "spdk/scsi_spec.h" ++#include "spdk/scsi.h" ++#include "spdk/stdinc.h" ++#include "spdk/env.h" ++#include "spdk/likely.h" ++#include "spdk/string.h" ++#include "spdk/util.h" ++#include "spdk/memory.h" ++#include "spdk/barrier.h" ++#include "spdk/bdev_module.h" ++#include "spdk/bdev.h" ++#include "spdk/endian.h" ++ ++#include "ssam_internal.h" ++ ++#define SSAM_PF_NUM_MAX_VAL 31 ++#define SSAM_PF_PLUS_VF_NUM_MAX_VAL 4096 ++ ++#define INQUIRY_OFFSET(field) \ ++ offsetof(struct spdk_scsi_cdb_inquiry_data, field) + \ ++ sizeof(((struct spdk_scsi_cdb_inquiry_data *)0x0)->field) ++ ++#define IO_STUCK_TIMEOUT 120 ++#define SEND_EVENT_WAIT_TIME 10 ++#define VMIO_TYPE_VIRTIO_SCSI_CTRL 4 ++#define DEVICE_READY_TIMEOUT 15 ++#define DEVICE_READY_WAIT_TIME 100000 ++ ++bool g_ssam_subsystem_exit = false; ++ ++struct ssam_event_user_ctx { ++ bool session_freed; /* true if session has been freed */ ++ bool async_done; /* true if session event done */ ++ void *ctx; /* store user context pointer */ ++}; ++ ++struct ssam_session_fn_ctx { ++ /* Device session pointer obtained before enqueuing the event */ ++ struct spdk_ssam_session *smsession; ++ ++ spdk_ssam_session_rsp_fn *rsp_fn; ++ ++ void *rsp_ctx; ++ ++ /* User provided function to be executed on session's thread. */ ++ spdk_ssam_session_fn cb_fn; ++ /** ++ * User provided function to be called on the init thread ++ * after iterating through all sessions. ++ */ ++ spdk_ssam_session_cpl_fn cpl_fn; ++ ++ /* Custom user context */ ++ struct ssam_event_user_ctx user_ctx; ++ ++ /* Session start event time */ ++ uint64_t start_tsc; ++ ++ bool need_async; ++ ++ int rsp; ++}; ++ ++/* ssam total infomation */ ++struct spdk_ssam_info { ++ ssam_mempool_t *mp[SSAM_MAX_CORE_NUM]; ++}; ++ ++static struct spdk_ssam_info g_ssam_info; ++ ++/* Thread performing all ssam management operations */ ++static struct spdk_thread *g_ssam_init_thread; ++ ++static TAILQ_HEAD(, spdk_ssam_dev) g_ssam_devices = ++ TAILQ_HEAD_INITIALIZER(g_ssam_devices); ++ ++static pthread_mutex_t g_ssam_mutex = PTHREAD_MUTEX_INITIALIZER; ++ ++/* Save cpu mask when ssam management thread started */ ++static struct spdk_cpuset g_ssam_core_mask; ++ ++/* Call back when ssam_fini complete */ ++static spdk_ssam_fini_cb g_ssam_fini_cpl_cb; ++ ++static int ssam_init(void); ++ ++static int ++ssam_sessions_init(struct spdk_ssam_session ***smsession) ++{ ++ *smsession = (struct spdk_ssam_session **)calloc( ++ SSAM_MAX_SESSION_PER_DEV, sizeof(struct spdk_ssam_session *)); ++ if (*smsession == NULL) { ++ SPDK_ERRLOG("calloc sessions failed\n"); ++ return -ENOMEM; ++ } ++ return 0; ++} ++ ++static int ++ssam_sessions_insert(struct spdk_ssam_session **smsessions, struct spdk_ssam_session *smsession) ++{ ++ uint16_t i = smsession->gfunc_id; ++ ++ if (smsessions[i] != NULL) { ++ SPDK_ERRLOG("smsessions already have such sesseion\n"); ++ return -ENOSPC; ++ } ++ ++ smsessions[i] = smsession; ++ ++ return 0; ++} ++ ++void ++ssam_sessions_remove(struct spdk_ssam_session **smsessions, struct spdk_ssam_session *smsession) ++{ ++ uint16_t i = smsession->gfunc_id; ++ ++ if (smsessions[i] == NULL) { ++ SPDK_WARNLOG("smsessions no such sesseion\n"); ++ return; ++ } ++ ++ smsessions[i] = NULL; ++ return; ++} ++ ++static struct spdk_ssam_session * ++ssam_sessions_first(int begin, struct spdk_ssam_session **smsessions) ++{ ++ int i; ++ ++ for (i = begin; i < SSAM_MAX_SESSION_PER_DEV; i++) { ++ if (smsessions[i] != NULL) { ++ return smsessions[i]; ++ } ++ } ++ return NULL; ++} ++ ++bool ++ssam_sessions_empty(struct spdk_ssam_session **smsessions) ++{ ++ struct spdk_ssam_session *session; ++ ++ session = ssam_sessions_first(0, smsessions); ++ if (session == NULL) { ++ return true; ++ } ++ ++ return false; ++} ++ ++struct spdk_ssam_session * ++ssam_sessions_next(struct spdk_ssam_session **smsessions, struct spdk_ssam_session *smsession) ++{ ++ if (smsession == NULL) { ++ return ssam_sessions_first(0, smsessions); ++ } ++ if (smsession->gfunc_id == SSAM_MAX_SESSION_PER_DEV) { ++ return NULL; ++ } ++ return ssam_sessions_first(smsession->gfunc_id + 1, smsessions); ++} ++ ++void ++ssam_session_insert_io_wait(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_session_io_wait *io_wait) ++{ ++ TAILQ_INSERT_TAIL(&smsession->smdev->io_wait_queue, io_wait, link); ++ smsession->smdev->io_wait_cnt++; ++} ++ ++static void ++ssam_session_remove_io_wait(struct spdk_ssam_dev *smdev, ++ struct spdk_ssam_session_io_wait *session_io_wait) ++{ ++ TAILQ_REMOVE(&smdev->io_wait_queue, session_io_wait, link); ++ smdev->io_wait_cnt--; ++} ++ ++void ++ssam_session_insert_io_wait_r(struct spdk_ssam_dev *smdev, ++ struct spdk_ssam_session_io_wait_r *io_wait_r) ++{ ++ TAILQ_INSERT_TAIL(&smdev->io_wait_queue_r, io_wait_r, link); ++ smdev->io_wait_r_cnt++; ++} ++ ++static void ++ssam_session_remove_io_wait_r(struct spdk_ssam_dev *smdev, ++ struct spdk_ssam_session_io_wait_r *session_io_wait_r) ++{ ++ TAILQ_REMOVE(&smdev->io_wait_queue_r, session_io_wait_r, link); ++ smdev->io_wait_r_cnt--; ++} ++ ++void ++ssam_session_destroy(struct spdk_ssam_session *smsession) ++{ ++ if (smsession == NULL || smsession->smdev == NULL) { ++ return; ++ } ++ /* Remove smsession from the queue in advance to prevent access by the poller thread. */ ++ if (!ssam_sessions_empty(smsession->smdev->smsessions)) { ++ ssam_sessions_remove(smsession->smdev->smsessions, smsession); ++ } ++ /* The smdev poller is not deleted here, but at the end of the app. */ ++} ++ ++uint64_t ++ssam_get_diff_tsc(uint64_t tsc) ++{ ++ return spdk_get_ticks() - tsc; ++} ++ ++int ++ssam_check_gfunc_id(uint16_t gfunc_id) ++{ ++ enum ssam_device_type type; ++ ++ if (gfunc_id == SPDK_INVALID_GFUNC_ID) { ++ SPDK_ERRLOG("Check gfunc_id(%u) error\n", gfunc_id); ++ return -EINVAL; ++ } ++ ++ type = ssam_get_virtio_type(gfunc_id); ++ if (type >= SSAM_DEVICE_VIRTIO_MAX) { ++ SPDK_ERRLOG("Check gfunc_id(%u) virtio type(%d) error\n", gfunc_id, type); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++/* Find a tid which has minimum device */ ++static uint16_t ++ssam_get_min_payload_tid(uint16_t cpu_num) ++{ ++ if (cpu_num == 0) { ++ return SPDK_INVALID_TID; ++ } ++ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ /* All tid have been used, find a tid which has minimum device */ ++ uint32_t min = UINT32_MAX; ++ uint16_t tid = 0; ++ ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ if (smdev->active_session_num < min) { ++ min = smdev->active_session_num; ++ tid = smdev->tid; ++ } ++ } ++ ++ return tid; ++} ++ ++/* Get a tid number */ ++uint16_t ++ssam_get_tid(void) ++{ ++ uint32_t cpu_num; ++ ++ cpu_num = spdk_cpuset_count(&g_ssam_core_mask); ++ if ((cpu_num == 0) || (cpu_num > UINT16_MAX)) { ++ /* If cpu_num > UINT16_MAX, the result of tid will overflow */ ++ SPDK_ERRLOG("CPU num %u not valid.\n", cpu_num); ++ return SPDK_INVALID_TID; ++ } ++ ++ return ssam_get_min_payload_tid((uint16_t)cpu_num); ++} ++ ++void ++ssam_lock(void) ++{ ++ pthread_mutex_lock(&g_ssam_mutex); ++} ++ ++int ++ssam_trylock(void) ++{ ++ return pthread_mutex_trylock(&g_ssam_mutex); ++} ++ ++void ++ssam_unlock(void) ++{ ++ pthread_mutex_unlock(&g_ssam_mutex); ++} ++ ++static struct spdk_ssam_session * ++ssam_session_find_in_dev(const struct spdk_ssam_dev *smdev, ++ uint16_t gfunc_id) ++{ ++ return smdev->smsessions[gfunc_id]; ++} ++ ++void ++ssam_dump_info_json(struct spdk_ssam_dev *smdev, uint16_t gfunc_id, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ ++ spdk_json_write_named_array_begin(w, "session"); ++ if (gfunc_id == UINT16_MAX) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ smsession->backend->dump_info_json(smsession, w); ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ } else { ++ smsession = ssam_session_find_in_dev(smdev, gfunc_id); ++ smsession->backend->dump_info_json(smsession, w); ++ } ++ ++ spdk_json_write_array_end(w); ++} ++ ++const char * ++ssam_dev_get_name(const struct spdk_ssam_dev *smdev) ++{ ++ if (!smdev) { ++ return ""; ++ } ++ return smdev->name; ++} ++ ++const char * ++ssam_session_get_name(const struct spdk_ssam_session *smsession) ++{ ++ if (!smsession) { ++ return ""; ++ } ++ return smsession->name; ++} ++ ++struct spdk_ssam_dev * ++ssam_dev_next(const struct spdk_ssam_dev *smdev) ++{ ++ if (smdev == NULL) { ++ return TAILQ_FIRST(&g_ssam_devices); ++ } ++ ++ return TAILQ_NEXT(smdev, tailq); ++} ++ ++struct spdk_ssam_session * ++ssam_session_find(uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ smsession = ssam_session_find_in_dev(smdev, gfunc_id); ++ if (smsession != NULL) { ++ return smsession; ++ } ++ } ++ ++ return NULL; ++} ++ ++uint16_t ++ssam_get_gfunc_id_by_name(char *name) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ uint16_t gfunc_id; ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ if (smdev != NULL && smdev->active_session_num > 0) { ++ for (gfunc_id = 0; gfunc_id <= SSAM_PF_NUM_MAX_VAL; gfunc_id++) { ++ smsession = ssam_session_find_in_dev(smdev, gfunc_id); ++ if (smsession != NULL && strcmp(name, smsession->name) == 0) { ++ return gfunc_id; ++ } ++ } ++ } ++ } ++ ++ SPDK_WARNLOG("controller(%s) is not existed\n", name); ++ return SPDK_INVALID_GFUNC_ID; ++} ++ ++static struct spdk_ssam_dev * ++ssam_dev_find(uint16_t tid) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ if (smdev->tid == tid) { ++ return smdev; ++ } ++ } ++ ++ return NULL; ++} ++ ++int ++ssam_mount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id) ++{ ++ uint16_t gfunc_id = smsession->gfunc_id; ++ uint16_t tid = gfunc_id % ssam_get_core_num(); ++ ++ return ssam_function_mount(gfunc_id, lun_id, SSAM_MOUNT_NORMAL, tid); ++} ++ ++int ++ssam_umount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id) ++{ ++ int rc; ++ ++ rc = ssam_function_umount(smsession->gfunc_id, lun_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: function umount failed when add scsi tgt, %d.\n", smsession->name, rc); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_remount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id) ++{ ++ return ssam_function_mount(smsession->gfunc_id, lun_id, SSAM_MOUNT_NORMAL, smsession->smdev->tid); ++} ++ ++static int ++ssam_remove_session(struct spdk_ssam_session *smsession) ++{ ++ int rc; ++ ++ if (smsession->backend->remove_session != NULL) { ++ rc = smsession->backend->remove_session(smsession); ++ if (rc != 0) { ++ SPDK_ERRLOG("session: %s can not be removed, task cnt %d.\n", ++ smsession->name, smsession->task_cnt); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_dev_thread_exit(void *unused) ++{ ++ (void)unused; ++ spdk_thread_exit(spdk_get_thread()); ++} ++ ++static int ++ssam_tid_to_cpumask(uint16_t tid, struct spdk_cpuset *cpumask) ++{ ++ uint32_t core; ++ uint32_t lcore; ++ uint32_t cnt; ++ ++ for (lcore = 0, cnt = 0; lcore < SPDK_CPUSET_SIZE - 1; lcore++) { ++ if (spdk_cpuset_get_cpu(&g_ssam_core_mask, lcore)) { ++ if (cnt == tid) { ++ core = lcore; ++ spdk_cpuset_set_cpu(cpumask, core, true); ++ return 0; ++ } ++ cnt++; ++ } ++ } ++ ++ return -1; ++} ++ ++void ++ssam_session_start_done(struct spdk_ssam_session *smsession, int response) ++{ ++ if (response == 0) { ++ if (smsession->smdev->active_session_num == UINT32_MAX) { ++ SPDK_ERRLOG("smsession %s: active session num reached upper limit %u\n", ++ smsession->name, smsession->smdev->active_session_num); ++ return; ++ } ++ smsession->smdev->active_session_num++; ++ } ++} ++ ++void ++ssam_set_session_be_freed(void **ctx) ++{ ++ struct ssam_event_user_ctx *_ctx; ++ ++ if (ctx == NULL) { ++ return; ++ } ++ ++ _ctx = SPDK_CONTAINEROF(ctx, struct ssam_event_user_ctx, ctx); ++ _ctx->session_freed = true; ++} ++ ++void ++ssam_send_event_async_done(void **ctx) ++{ ++ struct ssam_event_user_ctx *_ctx; ++ ++ if (ctx == NULL) { ++ return; ++ } ++ ++ _ctx = SPDK_CONTAINEROF(ctx, struct ssam_event_user_ctx, ctx); ++ _ctx->async_done = true; ++} ++ ++void ++ssam_session_stop_done(struct spdk_ssam_session *smsession, int rsp, void **ctx) ++{ ++ if (rsp == 0) { ++ if (smsession->smdev->active_session_num > 0) { ++ smsession->smdev->active_session_num--; ++ } else { ++ SPDK_ERRLOG("smsession %s: active session num reached lower limit %u\n", ++ smsession->name, smsession->smdev->active_session_num); ++ } ++ } ++ /* Smdev cannot be free here */ ++ ++ /* Stop process need async */ ++ ssam_send_event_async_done(ctx); ++} ++ ++void ++ssam_session_unreg_response_cb(struct spdk_ssam_session *smsession) ++{ ++ smsession->rsp_fn = NULL; ++ smsession->rsp_ctx = NULL; ++} ++ ++static int ++ssam_dev_create_register(struct spdk_ssam_dev *smdev, uint16_t tid) ++{ ++ char name[NAME_MAX]; ++ struct spdk_cpuset cpumask; ++ int rc; ++ ++ smdev->tid = tid; ++ ++ rc = snprintf(name, NAME_MAX, "%s%u", "ssam.", smdev->tid); ++ if (rc < 0 || rc >= NAME_MAX) { ++ SPDK_ERRLOG("ssam dev name is too long, tid %u\n", tid); ++ return -EINVAL; ++ } ++ ++ spdk_cpuset_zero(&cpumask); ++ if (ssam_tid_to_cpumask(tid, &cpumask)) { ++ SPDK_ERRLOG("Can not find cpu for tid %u\n", tid); ++ return -EINVAL; ++ } ++ ++ smdev->name = strdup(name); ++ if (smdev->name == NULL) { ++ SPDK_ERRLOG("Failed to create name for ssam controller %s.\n", name); ++ return -EIO; ++ } ++ ++ smdev->thread = spdk_thread_create(smdev->name, &cpumask); ++ if (smdev->thread == NULL) { ++ SPDK_ERRLOG("Failed to create thread for ssam controller %s.\n", name); ++ free(smdev->name); ++ smdev->name = NULL; ++ return -EIO; ++ } ++ ++ rc = ssam_sessions_init(&smdev->smsessions); ++ if (rc != 0) { ++ return rc; ++ } ++ TAILQ_INSERT_TAIL(&g_ssam_devices, smdev, tailq); ++ TAILQ_INIT(&smdev->io_wait_queue); ++ TAILQ_INIT(&smdev->io_wait_queue_r); ++ ++ SPDK_NOTICELOG("Controller %s: new controller added, tid %u\n", smdev->name, tid); ++ ++ return 0; ++} ++ ++void ++ssam_dev_unregister(struct spdk_ssam_dev **dev) ++{ ++ struct spdk_ssam_dev *smdev = *dev; ++ struct spdk_thread *thread = smdev->thread; ++ ++ if (!ssam_sessions_empty(smdev->smsessions)) { ++ SPDK_NOTICELOG("Controller %s still has valid session.\n", ++ smdev->name); ++ return; ++ } ++ memset(smdev->smsessions, 0, SSAM_MAX_SESSION_PER_DEV * sizeof(struct spdk_ssam_session *)); ++ free(smdev->smsessions); ++ smdev->smsessions = NULL; ++ ++ /* Used for hot restart. */ ++ if (smdev->stop_poller != NULL) { ++ spdk_poller_unregister(&smdev->stop_poller); ++ smdev->stop_poller = NULL; ++ } ++ ++ SPDK_NOTICELOG("Controller %s: removed\n", smdev->name); ++ ++ free(smdev->name); ++ smdev->name = NULL; ++ ssam_lock(); ++ TAILQ_REMOVE(&g_ssam_devices, smdev, tailq); ++ ssam_unlock(); ++ ++ free(smdev); ++ smdev = NULL; ++ *dev = NULL; ++ ++ spdk_thread_send_msg(thread, ssam_dev_thread_exit, NULL); ++ ++ return; ++} ++ ++static int ++ssam_init_session_fields(struct spdk_ssam_session_reg_info *info, ++ struct spdk_ssam_dev *smdev, struct spdk_ssam_session *smsession) ++{ ++ smsession->mp = g_ssam_info.mp[smdev->tid % ssam_get_core_num()]; ++ smsession->initialized = true; ++ smsession->registered = true; ++ smsession->thread = smdev->thread; ++ smsession->backend = info->backend; ++ smsession->smdev = smdev; ++ smsession->gfunc_id = info->gfunc_id; ++ smsession->started = false; ++ smsession->rsp_fn = info->rsp_fn; ++ smsession->rsp_ctx = info->rsp_ctx; ++ smsession->max_queues = info->queues; ++ smsession->queue_size = SPDK_SSAM_DEFAULT_VQ_SIZE; ++ if (info->name == NULL) { ++ smsession->name = spdk_sprintf_alloc("%s_%s_%d", smdev->name, info->type_name, info->gfunc_id); ++ } else { ++ smsession->name = strdup(info->name); ++ } ++ if (smsession->name == NULL) { ++ SPDK_ERRLOG("smsession name alloc failed\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_add_session(struct spdk_ssam_session_reg_info *info, ++ struct spdk_ssam_dev *smdev, struct spdk_ssam_session **smsession) ++{ ++ struct spdk_ssam_session *l_stsession = NULL; ++ size_t with_ctx_len = sizeof(*l_stsession) + info->session_ctx_size; ++ int rc; ++ ++ if (smdev->active_session_num == SSAM_MAX_SESSION_PER_DEV) { ++ SPDK_ERRLOG("%s reached upper limit %u\n", smdev->name, SSAM_MAX_SESSION_PER_DEV); ++ return -EAGAIN; ++ } ++ ++ if (g_ssam_info.mp == NULL) { ++ SPDK_ERRLOG("No memory pool\n"); ++ return -ENOMEM; ++ } ++ ++ rc = posix_memalign((void **)&l_stsession, SPDK_CACHE_LINE_SIZE, with_ctx_len); ++ if (rc != 0) { ++ SPDK_ERRLOG("smsession alloc failed\n"); ++ return -ENOMEM; ++ } ++ memset(l_stsession, 0, with_ctx_len); ++ ++ rc = ssam_init_session_fields(info, smdev, l_stsession); ++ if (rc != 0) { ++ free(l_stsession); ++ l_stsession = NULL; ++ return rc; ++ } ++ ++ rc = ssam_sessions_insert(smdev->smsessions, l_stsession); ++ if (rc != 0) { ++ return rc; ++ } ++ *smsession = l_stsession; ++ if (smdev->type == VIRTIO_TYPE_UNKNOWN) { ++ smdev->type = info->backend->type; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_dev_register(struct spdk_ssam_dev **dev, uint16_t tid) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ int rc; ++ ++ smdev = calloc(1, sizeof(*smdev)); ++ if (smdev == NULL) { ++ SPDK_ERRLOG("Couldn't alloc device for tid %u.\n", tid); ++ return -1; ++ } ++ ++ rc = ssam_dev_create_register(smdev, tid); ++ if (rc != 0) { ++ free(smdev); ++ smdev = NULL; ++ return -1; ++ } ++ ++ *dev = smdev; ++ ++ return 0; ++} ++ ++int ++ssam_session_register(struct spdk_ssam_session_reg_info *info, ++ struct spdk_ssam_session **smsession) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ int rc; ++ ++ if (ssam_session_find(info->gfunc_id) && (strcmp(info->type_name, SPDK_SESSION_TYPE_BLK) != 0)) { ++ SPDK_ERRLOG("Session with function id %d already exists.\n", info->gfunc_id); ++ return -EEXIST; ++ } ++ ++ smdev = ssam_dev_find(info->tid); ++ if (smdev == NULL) { ++ /* The smdev has been started during process initialization. Do not need to start the poller here. */ ++ SPDK_ERRLOG("No device with function id %d tid %u.\n", info->gfunc_id, info->tid); ++ return -ENODEV; ++ } ++ ++ rc = ssam_add_session(info, smdev, smsession); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_session_unregister(struct spdk_ssam_session *smsession, bool blk_force_delete) ++{ ++ int rc; ++ ++ if (smsession == NULL) { ++ SPDK_ERRLOG("smsession null.\n"); ++ return -EINVAL; ++ } ++ ++ if (smsession->pending_async_op_num != 0) { ++ SPDK_ERRLOG("[OFFLOAD_SNIC] %s has internal events(%d) and cannot be deleted.\n", ++ smsession->name, smsession->pending_async_op_num); ++ return -EBUSY; ++ } ++ ++ rc = ssam_remove_session(smsession); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_io_queue_handle(struct spdk_ssam_dev *smdev) ++{ ++ uint64_t count = 0; ++ uint64_t io_wait_cnt = smdev->io_wait_cnt; ++ while (count < io_wait_cnt) { ++ struct spdk_ssam_session_io_wait *io_wait = TAILQ_FIRST(&smdev->io_wait_queue); ++ ssam_session_remove_io_wait(smdev, io_wait); ++ if (io_wait->cb_fn != NULL) { ++ io_wait->cb_fn(io_wait->cb_arg); ++ } ++ count++; ++ } ++} ++ ++struct forward_ctx { ++ struct spdk_ssam_session *smsession; ++ struct ssam_request *io_req; ++}; ++ ++static void ++ssam_handle_forward_req(void *_ctx) ++{ ++ struct forward_ctx *ctx = (struct forward_ctx *)_ctx; ++ ctx->smsession->backend->request_worker(ctx->smsession, ctx->io_req); ++ free(ctx); ++} ++/* The resent request that is polled at the beginning of the hot restart is not the smsession of this smdev ++ * and needs to be forwarded to the corresponding smdev. ++ * If the forwarding is successful, true is returned. Otherwise, false is returned. ++ */ ++static bool ++ssam_dev_forward_req(struct ssam_request *io_req) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct forward_ctx *ctx = NULL; ++ int rc; ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ if (smdev->smsessions[io_req->gfunc_id] != NULL && ++ smdev->smsessions[io_req->gfunc_id]->started == true) { ++ ctx = calloc(1, sizeof(struct forward_ctx)); ++ if (!ctx) { ++ SPDK_ERRLOG("%s: calloc failed.\n", smdev->name); ++ goto out; ++ } ++ ctx->smsession = smdev->smsessions[io_req->gfunc_id]; ++ ctx->io_req = io_req; ++ rc = spdk_thread_send_msg(smdev->smsessions[io_req->gfunc_id]->thread, ssam_handle_forward_req, ++ ctx); ++ if (rc) { ++ SPDK_ERRLOG("%s: send msg error %d.\n", smdev->name, rc); ++ free(ctx); ++ goto out; ++ } ++ ssam_unlock(); ++ return true; ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++out: ++ ssam_unlock(); ++ return false; ++} ++ ++struct ssam_dev_io_complete_arg { ++ struct spdk_ssam_dev *smdev; ++ struct ssam_io_response io_resp; ++}; ++ ++static void ++ssam_dev_io_complete_cb(void *arg) ++{ ++ struct ssam_dev_io_complete_arg *cb_arg = (struct ssam_dev_io_complete_arg *)arg; ++ int rc = ssam_io_complete(cb_arg->smdev->tid, &cb_arg->io_resp); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_dev_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_dev_io_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, bool success) ++{ ++ struct ssam_io_response io_resp; ++ struct ssam_virtio_res *virtio_res = (struct ssam_virtio_res *)&io_resp.data; ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ struct iovec io_vec; ++ struct virtio_scsi_cmd_resp resp = {0}; ++ enum ssam_device_type type; ++ uint8_t res_status; ++ int rc; ++ type = ssam_get_virtio_type(io_req->gfunc_id); ++ ++ if (success) { ++ switch (type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ res_status = VIRTIO_BLK_S_OK; ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ res_status = VIRTIO_SCSI_S_OK; ++ break; ++ default: ++ res_status = 0; /* unknown type, maybe 0 means ok */ ++ } ++ } else { ++ SPDK_INFOLOG(ssam, "%s: io complete return error gfunc_id %u type %d.\n", ++ smdev->name, io_req->gfunc_id, type); ++ switch (type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ res_status = VIRTIO_BLK_S_IOERR; ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ res_status = VIRTIO_SCSI_S_FAILURE; ++ break; ++ default: ++ res_status = 1; /* unknown type, maybe 1 means error */ ++ } ++ } ++ ++ memset(&io_resp, 0, sizeof(io_resp)); ++ io_resp.gfunc_id = io_req->gfunc_id; ++ io_resp.iocb_id = io_req->iocb_id; ++ io_resp.status = io_req->status; ++ io_resp.flr_seq = io_req->flr_seq; ++ io_resp.req = io_req; ++ ++ virtio_res->iovs = &io_vec; ++ if (type == SSAM_DEVICE_VIRTIO_SCSI && io_cmd->writable) { ++ virtio_res->iovs->iov_base = io_cmd->iovs[1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[1].iov_len; ++ } else { ++ virtio_res->iovs->iov_base = io_cmd->iovs[io_cmd->iovcnt - 1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[io_cmd->iovcnt - 1].iov_len; ++ } ++ virtio_res->iovcnt = 1; ++ if (type == SSAM_DEVICE_VIRTIO_SCSI && io_req->type != VMIO_TYPE_VIRTIO_SCSI_CTRL) { ++ resp.response = res_status; ++ virtio_res->rsp = &resp; ++ virtio_res->rsp_len = sizeof(struct virtio_scsi_cmd_resp); ++ } else { ++ virtio_res->rsp = &res_status; ++ virtio_res->rsp_len = sizeof(res_status); ++ } ++ ++ rc = ssam_io_complete(smdev->tid, &io_resp); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_dev_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_dev_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smdev; ++ cb_arg->io_resp = io_resp; ++ io_wait_r->cb_fn = ssam_dev_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smdev, io_wait_r); ++ } ++} ++ ++static void ++ssam_dev_io_request(struct spdk_ssam_dev *smdev, struct ssam_request *io_req) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ ++ SPDK_INFOLOG(ssam_blk_data, "handling io tid=%u gfunc_id=%u type=%d rw=%u vqid=%u reqid=%u.\n", ++ smdev->tid, io_req->gfunc_id, io_req->type, io_req->req.cmd.writable, ++ io_req->req.cmd.virtio.vq_idx, io_req->req.cmd.virtio.req_idx); ++ ++ smsession = smdev->smsessions[io_req->gfunc_id]; ++ if (smsession == NULL || smsession->started == false) { ++ if (!ssam_dev_forward_req(io_req)) { ++ SPDK_INFOLOG(ssam, "%s: not have gfunc_id %u yet in io request.\n", ++ smdev->name, io_req->gfunc_id); ++ ssam_dev_io_complete(smdev, io_req, false); ++ } ++ return; ++ } ++ ++ smsession->backend->request_worker(smsession, io_req); ++ return; ++} ++ ++static void ++ssam_io_wait_r_queue_handle(struct spdk_ssam_dev *smdev) ++{ ++ uint64_t count = 0; ++ uint64_t io_wait_r_cnt = smdev->io_wait_r_cnt > SSAM_MAX_REQ_POLL_SIZE ? SSAM_MAX_REQ_POLL_SIZE : ++ smdev->io_wait_r_cnt; ++ while (count < io_wait_r_cnt) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = TAILQ_FIRST(&smdev->io_wait_queue_r); ++ ssam_session_remove_io_wait_r(smdev, io_wait_r); ++ if (io_wait_r->cb_fn != NULL) { ++ io_wait_r->cb_fn(io_wait_r->cb_arg); ++ } ++ count++; ++ free(io_wait_r); ++ io_wait_r = NULL; ++ } ++} ++ ++static int ++ssam_dev_request_worker(void *arg) ++{ ++ int io_num; ++ struct ssam_request *io_req[SSAM_MAX_REQ_POLL_SIZE] = {0}; ++ struct spdk_ssam_dev *smdev = arg; ++ bool poll_busy_flag = false; ++ ++ if (spdk_unlikely(smdev->io_wait_r_cnt > 0)) { ++ ssam_io_wait_r_queue_handle(smdev); ++ poll_busy_flag = true; ++ } ++ ++ /* The I/O waiting due to insufficient memory needs to be processed first. */ ++ if (spdk_unlikely(smdev->io_wait_cnt > 0)) { ++ ssam_io_queue_handle(smdev); ++ return SPDK_POLLER_BUSY; ++ } ++ ++ io_num = ssam_request_poll(smdev->tid, SSAM_MAX_REQ_POLL_SIZE, io_req); ++ if ((io_num <= 0) || (io_num > SSAM_MAX_REQ_POLL_SIZE)) { ++ /* ++ * The rpc delete callback is registered when the bdev deleting. spdk_put_io_channel ++ * executed the RPC delete callback.The stdev_io_no_data_request function continuously ++ * determines whether to perform the spdk_put_io_channel operation to ensure that the ++ * deletion of the bdev does not time out. ++ */ ++ if (spdk_unlikely(smdev->io_wait_r_cnt > 0)) { ++ ssam_io_wait_r_queue_handle(smdev); ++ poll_busy_flag = true; ++ } ++ return poll_busy_flag == true ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; ++ } ++ ++ if (spdk_unlikely(smdev->io_wait_r_cnt > 0)) { ++ ssam_io_wait_r_queue_handle(smdev); ++ } ++ ++ for (int i = 0; i < io_num; i++) { ++ ssam_dev_io_request(smdev, io_req[i]); ++ } ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static void ++ssam_dev_io_response(struct spdk_ssam_dev *smdev, const struct ssam_dma_rsp *dma_rsp) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ const struct spdk_ssam_dma_cb *dma_cb = (const struct spdk_ssam_dma_cb *)&dma_rsp->cb; ++ ++ SPDK_INFOLOG(ssam_blk_data, ++ "handle dma resp tid=%u gfunc_id=%u rw=%u vqid=%u task_idx=%u statuc=%u.\n", ++ smdev->tid, dma_cb->gfunc_id, dma_cb->req_dir, ++ dma_cb->vq_idx, dma_cb->task_idx, dma_cb->status); ++ ++ smsession = smdev->smsessions[dma_cb->gfunc_id]; ++ if (smsession == NULL) { ++ smdev->discard_io_num++; ++ SPDK_ERRLOG("smsessions not have gfunc_id %u yet in io response.\n", dma_cb->gfunc_id); ++ return; ++ } ++ ++ smsession->backend->response_worker(smsession, (void *)dma_rsp); ++ ++ return; ++} ++ ++static void ++ssam_dev_print_stuck_io(struct spdk_ssam_dev *smdev) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ int i; ++ ++ for (i = 0; i < SSAM_MAX_SESSION_PER_DEV; i++) { ++ smsession = smdev->smsessions[i]; ++ if (smsession == NULL) { ++ continue; ++ } ++ if (smsession->task_cnt > 0) { ++ SPDK_ERRLOG("%s: %d IO stuck for %ds\n", smsession->name, ++ smsession->task_cnt, IO_STUCK_TIMEOUT); ++ if (smsession->backend->print_stuck_io_info != NULL) { ++ smsession->backend->print_stuck_io_info(smsession); ++ } ++ } ++ } ++} ++ ++static void ++ssam_dev_io_stuck_check(struct spdk_ssam_dev *smdev) ++{ ++ uint64_t diff_tsc = spdk_get_ticks() - smdev->io_stuck_tsc; ++ ++ if (smdev->io_num == 0) { ++ smdev->io_stuck_tsc = spdk_get_ticks(); ++ return; ++ } ++ ++ if ((diff_tsc / IO_STUCK_TIMEOUT) > spdk_get_ticks_hz()) { ++ ssam_dev_print_stuck_io(smdev); ++ smdev->io_stuck_tsc = spdk_get_ticks(); ++ } ++} ++ ++void ++ssam_dev_io_dec(struct spdk_ssam_dev *smdev) ++{ ++ smdev->io_num--; ++} ++ ++static int ++ssam_dev_response_worker(void *arg) ++{ ++ int io_num; ++ struct spdk_ssam_dev *smdev = arg; ++ struct ssam_dma_rsp dma_rsp[SSAM_MAX_RESP_POLL_SIZE] = {0}; ++ bool poll_busy_flag = false; ++ ++ uint64_t ticks = spdk_get_ticks(); ++ if (smdev->stat.poll_cur_tsc == 0) { ++ smdev->stat.poll_cur_tsc = ticks; ++ } else { ++ smdev->stat.poll_tsc += ticks - smdev->stat.poll_cur_tsc; ++ smdev->stat.poll_count++; ++ smdev->stat.poll_cur_tsc = ticks; ++ } ++ ++ do { ++ io_num = ssam_dma_rsp_poll(smdev->tid, SSAM_MAX_RESP_POLL_SIZE, dma_rsp); ++ if (io_num <= 0 || io_num > SSAM_MAX_RESP_POLL_SIZE) { ++ ssam_dev_io_stuck_check(smdev); ++ return poll_busy_flag == true ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; ++ } ++ ++ if (smdev->io_num < ((uint64_t)(uint32_t)io_num)) { ++ SPDK_ERRLOG("%s: DMA response IO num too much, should be %lu but %d\n", ++ smdev->name, smdev->io_num, io_num); ++ smdev->discard_io_num += io_num; ++ return poll_busy_flag == true ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; ++ } ++ smdev->io_stuck_tsc = spdk_get_ticks(); ++ ++ for (int i = 0; i < io_num; i++) { ++ ssam_dev_io_response(smdev, dma_rsp + i); ++ } ++ poll_busy_flag = true; ++ } while (io_num == SSAM_MAX_RESP_POLL_SIZE); ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++int ++ssam_dev_register_worker_poller(struct spdk_ssam_dev *smdev) ++{ ++ SPDK_NOTICELOG("%s: worker starting.\n", smdev->name); ++ if (smdev->requestq_poller == NULL) { ++ smdev->requestq_poller = SPDK_POLLER_REGISTER(ssam_dev_request_worker, smdev, 0); ++ if (smdev->requestq_poller == NULL) { ++ SPDK_WARNLOG("%s: stdev_request_worker start failed.\n", smdev->name); ++ return -1; ++ } ++ ++ SPDK_INFOLOG(ssam, "%s: started stdev_request_worker poller on lcore %d\n", ++ smdev->name, spdk_env_get_current_core()); ++ } ++ ++ if (smdev->responseq_poller == NULL) { ++ smdev->responseq_poller = SPDK_POLLER_REGISTER(ssam_dev_response_worker, smdev, 0); ++ if (smdev->responseq_poller == NULL) { ++ SPDK_WARNLOG("%s: stdev_response_worker start failed.\n", smdev->name); ++ return -1; ++ } ++ ++ SPDK_INFOLOG(ssam, "%s: started stdev_response_worker poller on lcore %d\n", ++ smdev->name, spdk_env_get_current_core()); ++ } ++ return 0; ++} ++ ++void ++ssam_dev_unregister_worker_poller(struct spdk_ssam_dev *smdev) ++{ ++ if (!ssam_sessions_empty(smdev->smsessions)) { ++ return; ++ } ++ ++ if (smdev->requestq_poller != NULL) { ++ spdk_poller_unregister(&smdev->requestq_poller); ++ smdev->requestq_poller = NULL; ++ } ++ ++ if (smdev->responseq_poller != NULL) { ++ spdk_poller_unregister(&smdev->responseq_poller); ++ smdev->responseq_poller = NULL; ++ } ++} ++/* When stopping the worker, need to stop the two pollers first */ ++/* and wait until all sessions are deleted, and then free smdev. */ ++static int ++ssam_dev_stop_poller(void *arg) ++{ ++ struct spdk_ssam_dev *smdev = arg; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ /* special processing is required for virtio-scsi, ++ * because In scsi scenarios, smsessions are not actively or passively removed. ++ */ ++ if (smdev->type == VIRTIO_TYPE_SCSI && smdev->active_session_num > 0) { ++ for (int i = 0; i < SSAM_MAX_SESSION_PER_DEV; i++) { ++ if (smdev->smsessions[i] != NULL) { ++ smsession = smdev->smsessions[i]; ++ smsession->backend->remove_self(smsession); /* remove session */ ++ } ++ } ++ } ++ ++ /* 等待session全部被移除 */ ++ if (smdev->active_session_num != 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ /* 删除smdev的资源 */ ++ ssam_dev_unregister(&smdev); ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static void ++ssam_dev_stop_worker_poller(void *args) ++{ ++ struct spdk_ssam_dev *smdev = (struct spdk_ssam_dev *)args; ++ ++ if (smdev->requestq_poller != NULL) { ++ spdk_poller_unregister(&smdev->requestq_poller); ++ smdev->requestq_poller = NULL; ++ } ++ ++ if (smdev->responseq_poller != NULL) { ++ spdk_poller_unregister(&smdev->responseq_poller); ++ smdev->responseq_poller = NULL; ++ } ++ ++ SPDK_NOTICELOG("%s: poller stopped.\n", smdev->name); ++ smdev->stop_poller = SPDK_POLLER_REGISTER(ssam_dev_stop_poller, smdev, 0); ++ if (smdev->stop_poller == NULL) { ++ SPDK_WARNLOG("%s: ssam_dev stop failed.\n", smdev->name); ++ } ++} ++/* When starting the worker, need to start the two pollers first */ ++static void ++ssam_dev_start_worker_poller(void *args) ++{ ++ struct spdk_ssam_dev *smdev = (struct spdk_ssam_dev *)args; ++ ssam_dev_register_worker_poller(smdev); ++} ++ ++static void ++ssam_send_event_response(struct ssam_session_fn_ctx *ev_ctx) ++{ ++ if (ev_ctx->user_ctx.session_freed == true) { ++ goto out; ++ } ++ ++ if (*ev_ctx->rsp_fn != NULL) { ++ (*ev_ctx->rsp_fn)(ev_ctx->rsp_ctx, ev_ctx->rsp); ++ *ev_ctx->rsp_fn = NULL; ++ } ++ ++out: ++ /* ev_ctx be allocated by another thread */ ++ free(ev_ctx); ++ ev_ctx = NULL; ++} ++ ++static void ++ssam_check_send_event_timeout(struct ssam_session_fn_ctx *ev_ctx, spdk_msg_fn fn) ++{ ++ uint64_t diff_tsc = spdk_get_ticks() - ev_ctx->start_tsc; ++ struct spdk_ssam_session *smsession = ev_ctx->smsession; ++ ++ if ((diff_tsc / SEND_EVENT_WAIT_TIME) > spdk_get_ticks_hz()) { ++ /* If timeout, finish send msg, end the process */ ++ SPDK_ERRLOG("Send event to session %s time out.\n", smsession->name); ++ ev_ctx->rsp = -ETIMEDOUT; ++ ssam_send_event_response(ev_ctx); ++ return; ++ } ++ ++ spdk_thread_send_msg(spdk_get_thread(), fn, (void *)ev_ctx); ++ ++ return; ++} ++ ++static void ++ssam_send_event_finish(void *ctx) ++{ ++ struct ssam_session_fn_ctx *ev_ctx = ctx; ++ struct spdk_ssam_session *smsession = ev_ctx->smsession; ++ ++ if ((ev_ctx->rsp == 0) && (ev_ctx->need_async) && (ev_ctx->user_ctx.async_done == false)) { ++ ssam_check_send_event_timeout(ev_ctx, ssam_send_event_finish); ++ return; ++ } ++ ++ if (ssam_trylock() != 0) { ++ ssam_check_send_event_timeout(ev_ctx, ssam_send_event_finish); ++ return; ++ } ++ ++ if (smsession->pending_async_op_num > 0) { ++ smsession->pending_async_op_num--; ++ } else { ++ SPDK_ERRLOG("[OFFLOAD_SNIC] smsession %s: internal error.\n", smsession->name); ++ } ++ ++ /* If ev_ctx->cb_fn proccess failed, ev_ctx->cpl_fn will not excute */ ++ if ((ev_ctx->rsp == 0) && (ev_ctx->cpl_fn != NULL)) { ++ ev_ctx->cpl_fn(smsession, &ev_ctx->user_ctx.ctx); ++ } ++ ++ ssam_unlock(); ++ ++ ssam_send_event_response(ev_ctx); ++} ++ ++static void ++ssam_send_event(void *ctx) ++{ ++ struct ssam_session_fn_ctx *ev_ctx = ctx; ++ struct spdk_ssam_session *smsession = ev_ctx->smsession; ++ ++ if (ssam_trylock() != 0) { ++ ssam_check_send_event_timeout(ev_ctx, ssam_send_event); ++ return; ++ } ++ ++ if (smsession->initialized && (ev_ctx->cb_fn != NULL)) { ++ ev_ctx->user_ctx.async_done = false; ++ ev_ctx->rsp = ev_ctx->cb_fn(smsession, &ev_ctx->user_ctx.ctx); ++ } else { ++ ev_ctx->rsp = 0; ++ ev_ctx->user_ctx.async_done = true; ++ } ++ ++ ssam_unlock(); ++ /* The judgment logic is used to adapt to the hot-restart. ++ * Because the session has been released during the hot restart, ++ * the following ssam_send_event_finish is not required. ++ */ ++ if (ev_ctx->user_ctx.session_freed) { ++ free(ev_ctx); ++ return; ++ } else { ++ ev_ctx->start_tsc = spdk_get_ticks(); ++ spdk_thread_send_msg(g_ssam_init_thread, ssam_send_event_finish, ctx); ++ } ++} ++ ++static spdk_ssam_session_rsp_fn g_rsp_fn = NULL; ++ ++int ++ssam_send_event_to_session(struct spdk_ssam_session *smsession, spdk_ssam_session_fn fn, ++ spdk_ssam_session_cpl_fn cpl_fn, struct spdk_ssam_send_event_flag send_event_flag, void *ctx) ++{ ++ struct ssam_session_fn_ctx *ev_ctx; ++ int rc; ++ ++ ev_ctx = calloc(1, sizeof(*ev_ctx)); ++ if (ev_ctx == NULL) { ++ SPDK_ERRLOG("Failed to alloc ssam event.\n"); ++ return -ENOMEM; ++ } ++ ++ ev_ctx->smsession = smsession; ++ ev_ctx->cb_fn = fn; ++ ev_ctx->cpl_fn = cpl_fn; ++ ev_ctx->need_async = send_event_flag.need_async; ++ if (send_event_flag.need_rsp == true) { ++ ev_ctx->rsp_fn = &smsession->rsp_fn; ++ ev_ctx->rsp_ctx = smsession->rsp_ctx; ++ } else { ++ ev_ctx->rsp_fn = &g_rsp_fn; ++ ev_ctx->rsp_ctx = NULL; ++ } ++ ++ ev_ctx->user_ctx.ctx = ctx; ++ ev_ctx->user_ctx.session_freed = false; ++ ++ if (smsession->pending_async_op_num < UINT32_MAX) { ++ smsession->pending_async_op_num++; ++ } else { ++ SPDK_ERRLOG("[OFFLOAD_SNIC] smsession %s: internel error, events stuck too much\n", ++ smsession->name); ++ } ++ ++ ev_ctx->start_tsc = spdk_get_ticks(); ++ rc = spdk_thread_send_msg(smsession->thread, ssam_send_event, ev_ctx); ++ if (rc != 0) { ++ SPDK_ERRLOG("send thread msg failed\n"); ++ free(ev_ctx); ++ return rc; ++ } ++ return 0; ++} ++ ++void ++spdk_ssam_config_json(struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ spdk_json_write_array_begin(w); ++ ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ smsession->backend->write_config_json(smsession, w); ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ ++ smdev = ssam_dev_next(smdev); ++ } ++ ++ ssam_unlock(); ++ ++ spdk_json_write_array_end(w); ++} ++ ++int ++ssam_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues) ++{ ++ const struct spdk_ssam_session_backend *backend = smsession->backend; ++ ++ if (backend->ssam_get_config == NULL) { ++ return -1; ++ } ++ ++ return backend->ssam_get_config(smsession, config, len, queues); ++} ++ ++struct dev_destroy_ctx { ++ struct spdk_ssam_session *smsession; ++ void *args; ++}; ++ ++static void ++ssam_dev_destroy(void *arg) ++{ ++ struct dev_destroy_ctx *ctx = (struct dev_destroy_ctx *)arg; ++ ctx->smsession->backend->destroy_bdev_device(ctx->smsession, ctx->args); ++ free(ctx); ++} ++ ++void ++ssam_send_dev_destroy_msg(struct spdk_ssam_session *smsession, void *args) ++{ ++ struct dev_destroy_ctx *ctx = calloc(1, sizeof(struct dev_destroy_ctx)); ++ if (ctx == NULL) { ++ SPDK_ERRLOG("%s: out of memory, destroy dev failed\n", smsession->name); ++ return; ++ } ++ ctx->smsession = smsession; ++ ctx->args = args; ++ spdk_thread_send_msg(g_ssam_init_thread, ssam_dev_destroy, ctx); ++} ++ ++void ++spdk_ssam_poller_start(void) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ tmp = ssam_dev_next(smdev); ++ /* Send the message to each smdev to start the worker on the smdev. */ ++ spdk_thread_send_msg(smdev->thread, ssam_dev_start_worker_poller, smdev); ++ smdev = tmp; ++ } ++ ssam_unlock(); ++} ++ ++static void ++ssam_fini(void *arg) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ SPDK_WARNLOG("ssam is finishing\n"); ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ tmp = ssam_dev_next(smdev); ++ /* Send the message to each smdev to stop the worker on the smdev. */ ++ spdk_thread_send_msg(smdev->thread, ssam_dev_stop_worker_poller, smdev); ++ smdev = tmp; ++ } ++ ssam_unlock(); ++ ++ spdk_cpuset_zero(&g_ssam_core_mask); ++ ++ g_ssam_fini_cpl_cb(); ++} ++ ++static void * ++ssam_session_shutdown(void *arg) ++{ ++ SPDK_INFOLOG(ssam, "ssam sesssion Exiting\n"); ++ spdk_thread_send_msg(g_ssam_init_thread, ssam_fini, NULL); ++ ++ return NULL; ++} ++ ++void ++spdk_ssam_subsystem_fini(spdk_ssam_fini_cb fini_cb) ++{ ++ if (spdk_get_thread() != g_ssam_init_thread) { ++ SPDK_ERRLOG("ssam finish thread not equal init thread, internel error\n"); ++ } ++ ++ g_ssam_fini_cpl_cb = fini_cb; ++ ++ ssam_session_shutdown(NULL); ++ ++ ssam_unregister_hpd_poller(); ++} ++ ++void ++spdk_ssam_subsystem_init(spdk_ssam_init_cb init_cb) ++{ ++ uint32_t i; ++ int ret; ++ int shm_id; ++ ++ g_ssam_init_thread = spdk_get_thread(); ++ if (g_ssam_init_thread == NULL) { ++ ret = -EBUSY; ++ SPDK_ERRLOG("get thread error\n"); ++ goto exit; ++ } ++ ++ /* init ssam core mask */ ++ spdk_cpuset_zero(&g_ssam_core_mask); ++ SPDK_ENV_FOREACH_CORE(i) { ++ spdk_cpuset_set_cpu(&g_ssam_core_mask, i, true); ++ } ++ ++ ret = ssam_set_core_num(spdk_cpuset_count(&g_ssam_core_mask)); ++ if (ret != 0) { ++ goto exit; ++ } ++ ++ ret = ssam_init(); ++ if (ret != 0) { ++ goto exit; ++ } ++ ++ if (!spdk_ssam_get_shm_created()) { ++ shm_id = shm_open(SSAM_SHM, O_CREAT | O_EXCL | O_RDWR, SSAM_SHM_PERMIT); ++ if (shm_id < 0) { ++ SPDK_ERRLOG("failed to create shared memory %s\n", SSAM_SHM); ++ ret = -1; ++ goto exit; ++ } ++ spdk_ssam_set_shm_created(true); ++ } ++ ++exit: ++ init_cb(ret); ++ return; ++} ++ ++/* Initialize all smdev modules during submodule initialization. */ ++static int ++ssam_smdev_init(void) ++{ ++ int rc = 0; ++ struct spdk_ssam_dev *smdev; ++ struct spdk_ssam_dev *tmp = NULL; ++ uint16_t core_num = ssam_get_core_num(); ++ for (uint16_t i = 0; i < core_num; ++i) { ++ rc = ssam_dev_register(&smdev, i); ++ if (rc != 0) { ++ goto out; ++ } ++ } ++ ++ rc = ssam_get_hot_upgrade_state(); ++ if (rc != 0) { ++ SPDK_ERRLOG(": virtio upgrade state failed.\n"); ++ return rc; ++ } ++ ++ return 0; ++out: ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ tmp = ssam_dev_next(smdev); ++ ssam_dev_unregister(&smdev); ++ smdev = tmp; ++ } ++ return rc; ++} ++ ++static int ++ssam_server_init(void) ++{ ++ uint32_t core_num = ssam_get_core_num(); ++ uint32_t mempool_size = (ssam_get_mempool_size() / core_num) & (~0U - 1); ++ uint32_t i; ++ ++ /* Disable dummy I/O for hot restart */ ++ ++ for (i = 0; i < core_num; i++) { ++ g_ssam_info.mp[i] = ssam_mempool_create(mempool_size * SSAM_MB, SSAM_DEFAULT_MEMPOOL_EXTRA_SIZE); ++ if (g_ssam_info.mp[i] == NULL) { ++ SPDK_ERRLOG("ssam create mempool[%d] failed, mempool_size = %uMB.\n", i, mempool_size); ++ return -ENOMEM; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_server_exit(void) ++{ ++ uint32_t core_num = ssam_get_core_num(); ++ uint32_t i; ++ ++ for (i = 0; i < core_num; i++) { ++ if (g_ssam_info.mp[i] != NULL) { ++ ssam_mempool_destroy(g_ssam_info.mp[i]); ++ g_ssam_info.mp[i] = NULL; ++ } ++ } ++ ++ memset(&g_ssam_info, 0x0, sizeof(struct spdk_ssam_info)); ++} ++ ++ ++static int ++ssam_check_device_status(void) ++{ ++ uint8_t ready = 0; ++ int times = 0; ++ int rc; ++ ++ do { ++ rc = ssam_check_device_ready(0, 0, &ready); ++ if (rc != 0) { ++ SPDK_ERRLOG("device check failed.\n"); ++ return rc; ++ } ++ ++ if (ready != 0) { ++ break; ++ } ++ ++ usleep(DEVICE_READY_WAIT_TIME); ++ times++; ++ } while (times < DEVICE_READY_TIMEOUT); ++ ++ if (ready == 0) { ++ SPDK_ERRLOG("device has not been ready after 1.5s.\n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++ ++static int ++ssam_init(void) ++{ ++ int rc; ++ ++ rc = ssam_check_device_status(); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ rc = ssam_config_init(); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ rc = ssam_server_init(); ++ if (rc != 0) { ++ ssam_config_exit(); ++ return rc; ++ } ++ ++ rc = ssam_smdev_init(); ++ if (rc != 0) { ++ ssam_server_exit(); ++ ssam_config_exit(); ++ } ++ ++ return rc; ++} ++ ++void ++spdk_ssam_exit(void) ++{ ++ ssam_deinit_device_pcie_list(); ++ ssam_config_exit(); ++ ssam_server_exit(); ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam) +diff --git a/lib/ssam/ssam_blk.c b/lib/ssam/ssam_blk.c +new file mode 100644 +index 0000000..17761ea +--- /dev/null ++++ b/lib/ssam/ssam_blk.c +@@ -0,0 +1,2689 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include ++#include ++ ++#include "spdk/env.h" ++#include "spdk/bdev.h" ++#include "spdk/bdev_module.h" ++#include "spdk/thread.h" ++#include "spdk/likely.h" ++#include "spdk/string.h" ++#include "spdk/util.h" ++ ++#include "ssam_internal.h" ++ ++#define SESSION_STOP_POLLER_PERIOD 1000 ++#define HPD_DEL_POLLER_PERIOD (1000 * 1000) ++#define ENQUEUE_TIMES_PER_IO 1000 ++ ++#define IOV_HEADER_TAIL_NUM 2 ++ ++#define SECTOR_SIZE 512 ++#define ALIGNMENT_2M (2048 * 1024) ++#define SERIAL_STRING_LEN 128 ++#define SMSESSION_STOP_TIMEOUT 2 /* s */ ++#define PERF_STAT ++ ++#define HPD_DEL_POLLER_NUM 5 ++ ++/* Related to (SPDK_SSAM_IOVS_MAX * SPDK_SSAM_MAX_SEG_SIZE) */ ++#define PAYLOAD_SIZE_MAX (2048U * 2048) ++ ++#define RETRY_TIMEOUT 120 ++ ++/* Minimal set of features supported by every virtio-blk device */ ++#define SPDK_SSAM_BLK_FEATURES_BASE (SPDK_SSAM_FEATURES | \ ++ (1ULL << VIRTIO_BLK_F_SIZE_MAX) | (1ULL << VIRTIO_BLK_F_SEG_MAX) | \ ++ (1ULL << VIRTIO_BLK_F_GEOMETRY) | (1ULL << VIRTIO_BLK_F_BLK_SIZE) | \ ++ (1ULL << VIRTIO_BLK_F_TOPOLOGY) | (1ULL << VIRTIO_BLK_F_BARRIER) | \ ++ (1ULL << VIRTIO_BLK_F_SCSI) | (1ULL << VIRTIO_BLK_F_CONFIG_WCE) | \ ++ (1ULL << VIRTIO_BLK_F_MQ)) ++ ++extern bool g_ssam_subsystem_exit; ++ ++static int g_gfunc_session_number = 0; ++extern int g_delete_flag; ++ ++extern uint8_t g_hpd_delete_session_times[2000]; ++bool g_hpd_to_async[2000] = {0}; ++static uint8_t g_blk_set_times[2000] = {0}; ++static bool g_hpd_del_flag[2000] = {0}; ++ ++enum ssam_hpd_del { ++ SSAM_HPD_RM_SUC, ++ SSAM_HPD_DEL_REM_FAIL, ++ SSAM_HPD_INVAL_ID, ++ SSAM_HPD_RM_REPEAT, ++}; ++ ++enum ssam_hpd_del_check { ++ SSAM_HPD_CHECK_SUC, ++ SSAM_HPD_RM_FAIL, ++ SSAM_HPD_RETRY, ++}; ++ ++struct ssam_task_stat { ++ uint64_t start_tsc; ++ uint64_t dma_start_tsc; ++ uint64_t dma_end_tsc; ++ uint64_t bdev_start_tsc; ++ uint64_t bdev_func_tsc; ++ uint64_t bdev_end_tsc; ++ uint64_t complete_start_tsc; ++ uint64_t complete_end_tsc; ++}; ++ ++struct spdk_ssam_blk_task { ++ /* Returned status of I/O processing, it can be VIRTIO_BLK_S_OK, ++ * VIRTIO_BLK_S_IOERR or VIRTIO_BLK_S_UNSUPP ++ */ ++ volatile uint8_t *status; ++ ++ /* Number of bytes processed successfully */ ++ uint32_t used_len; ++ ++ /* Records the amount of valid data in the struct iovec iovs array. */ ++ uint32_t iovcnt; ++ struct ssam_iovec iovs; ++ ++ /* If set, the task is currently used for I/O processing. */ ++ bool used; ++ ++ /* For bdev io wait */ ++ struct spdk_bdev_io_wait_entry bdev_io_wait; ++ struct spdk_ssam_session_io_wait session_io_wait; ++ struct spdk_ssam_blk_session *bsmsession; ++ ++ /* Size of whole payload in bytes */ ++ uint32_t payload_size; ++ ++ /* ssam request data */ ++ struct ssam_request *io_req; ++ ++ uint16_t vq_idx; ++ uint16_t req_idx; ++ uint16_t task_idx; ++ struct ssam_task_stat task_stat; ++}; ++ ++struct ssam_blk_stat { ++ uint64_t count; ++ uint64_t start_count; ++ uint64_t total_tsc; /* pre_dma <- -> post_return */ ++ uint64_t dma_tsc; /* pre_dma <- -> post_dma */ ++ uint64_t dma_count; ++ uint64_t dma_complete_count; ++ uint64_t bdev_tsc; /* pre_bdev <- -> post_bdev */ ++ uint64_t bdev_submit_tsc; /* <- spdk_bdev_xxx -> */ ++ uint64_t bdev_count; ++ uint64_t bdev_complete_count; ++ uint64_t complete_tsc; /* pre_return <- -> post_return */ ++ uint64_t internel_tsc; /* total_tsc - dma_tsc - bdev_tsc - complete_tsc */ ++ ++ uint64_t complete_read_ios; /* Number of successfully completed read requests */ ++ uint64_t err_read_ios; /* Number of failed completed read requests */ ++ uint64_t complete_write_ios; /* Number of successfully completed write requests */ ++ uint64_t err_write_ios; /* Number of failed completed write requests */ ++ uint64_t flush_ios; /* Total number of flush requests */ ++ uint64_t complete_flush_ios; /* Number of successfully completed flush requests */ ++ uint64_t err_flush_ios; /* Number of failed completed flush requests */ ++ uint64_t other_ios; ++ uint64_t complete_other_ios; ++ uint64_t err_other_ios; ++ uint64_t fatal_ios; /* Number of discarded requests */ ++ uint64_t io_retry; ++}; ++ ++struct spdk_ssam_blk_session { ++ /* The parent session must be the very first field in this struct */ ++ struct spdk_ssam_session smsession; ++ struct spdk_poller *stop_poller; ++ struct spdk_bdev *bdev; ++ struct spdk_bdev_desc *bdev_desc; ++ struct spdk_io_channel *io_channel; ++ ++ /* volume id */ ++ char *serial; ++ ++ /* accumulated I/O statistics */ ++ struct spdk_bdev_io_stat stat; ++ ++ /* Current count of bdev operations for hot-restart. */ ++ int32_t bdev_count; ++ ++ /* poller for waiting bdev finish when hot-restart */ ++ struct spdk_poller *stop_bdev_poller; ++ struct spdk_poller *stop_session_poller; ++ struct spdk_poller *hpd_stop_poller; ++ ++ /* controller statistics. */ ++ struct ssam_blk_stat blk_stat; ++ ++ /* accumulated I/O statistics */ ++ struct spdk_bdev_io_stat vq_stat[SPDK_SSAM_MAX_VQUEUES]; ++ ++ /* controller vq statistics. */ ++ struct ssam_blk_stat vq_blk_stat[SPDK_SSAM_MAX_VQUEUES]; ++ ++ /* if set, all writes to the device will fail with ++ * VIRTIO_BLK_S_IOERR error code ++ */ ++ bool readonly; ++ ++ /* if set, indicate the session not have a bdev, all writes to the device ++ * will fail with VIRTIO_BLK_S_IOERR error code ++ */ ++ bool no_bdev; ++ ++ bool need_write_config; ++ ++ int hot_plug_poller_number; ++}; ++ ++struct ssam_blk_session_ctx { ++ struct spdk_ssam_blk_session *bsmsession; ++ void **user_ctx; ++}; ++ ++static const struct spdk_ssam_session_backend g_ssam_blk_session_backend; ++static int ssam_blk_remove_session(struct spdk_ssam_session *smsession); ++static void ssam_blk_request_worker(struct spdk_ssam_session *smsession, void *arg); ++static void ssam_blk_destroy_bdev_device(struct spdk_ssam_session *smsession, void *args); ++static void ssam_blk_response_worker(struct spdk_ssam_session *smsession, void *arg); ++static void ssam_blk_no_data_request_worker(struct spdk_ssam_session *smsession); ++static inline void ssam_request_queue_io(struct spdk_ssam_blk_task *task); ++static void ssam_task_complete(struct spdk_ssam_blk_task *task, uint8_t status); ++static void ssam_data_request_para(struct ssam_dma_request *dma_req, ++ struct spdk_ssam_blk_task *task, uint32_t type, uint8_t status); ++static void ssam_blk_print_stuck_io_info(struct spdk_ssam_session *smsession); ++static int ssam_process_blk_request(struct spdk_ssam_blk_task *task); ++static void ssam_free_task_pool(struct spdk_ssam_blk_session *bsmsession); ++static int ssam_blk_io_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, ++ uint8_t status); ++static void ssam_session_io_resubmit(void *arg); ++static void ssam_blk_roback(struct spdk_ssam_session_reg_info *info, const char *dev_name, ++ bool readonly, char *serial); ++static void ssam_blk_del_recover(struct spdk_ssam_session *smsession, ++ struct spdk_bdev *bdev_roback); ++static int ssam_destroy_session_cb(void *arg); ++ ++static inline struct spdk_ssam_blk_session * ++ssam_to_blk_session(struct spdk_ssam_session *smsession) ++{ ++ return (struct spdk_ssam_blk_session *)smsession; ++} ++ ++static void ++ssam_blk_dump_info_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "name", ssam_session_get_name(smsession)); ++ spdk_json_write_named_uint32(w, "function_id", (uint32_t)smsession->gfunc_id); ++ spdk_json_write_named_uint32(w, "queues", (uint32_t)smsession->max_queues); ++ ++ spdk_json_write_named_object_begin(w, "block"); ++ spdk_json_write_named_bool(w, "readonly", bsmsession->readonly); ++ spdk_json_write_name(w, "bdev"); ++ if (bsmsession->bdev != NULL) { ++ spdk_json_write_string(w, spdk_bdev_get_name(bsmsession->bdev)); ++ } else { ++ spdk_json_write_null(w); ++ } ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_dev_bdev_remove_cpl_cb(struct spdk_ssam_session *smsession, void **unnused) ++{ ++ /* All sessions have been notified, time to close the bdev */ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ if (bsmsession == NULL) { ++ return; ++ } ++ ++ if (bsmsession->bdev_desc != NULL) { ++ spdk_bdev_close(bsmsession->bdev_desc); ++ bsmsession->bdev_desc = NULL; ++ } ++ ++ /* bdev not create by ssam blk, no need be freed here */ ++ bsmsession->bdev = NULL; ++} ++ ++static void ++ssam_hpd_del_release_resource(void *arg) ++{ ++ struct spdk_ssam_blk_session *bsmsession = (struct spdk_ssam_blk_session *)arg; ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ spdk_ssam_session_rsp_fn rsp_fn = smsession->rsp_fn; ++ struct spdk_bdev *bdev_roback = bsmsession->bdev; ++ void *rsp_ctx = smsession->rsp_ctx; ++ int rc; ++ ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, 0); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed when remove session.\n", smsession->name); ++ } ++ ++ if (ssam_get_hash_mode() == SSAM_VQ_HASH_MODE) { ++ rc = ssam_virtio_vq_unbind_core(smsession->gfunc_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk vq unbind core failed.\n", smsession->name); ++ } ++ } ++ ++ if ((smsession->gfunc_id > SSAM_PF_MAX_NUM) || (ssam_get_en_hpd() == true)) { ++ rc = ssam_virtio_blk_release_resource(smsession->gfunc_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk release vq failed.\n", smsession->name); ++ } ++ } ++ ++ SPDK_NOTICELOG("BLK controller %s deleted\n", smsession->name); ++ spdk_poller_unregister(&bsmsession->hpd_stop_poller); ++ ++ if (smsession->name != NULL) { ++ free(smsession->name); ++ smsession->name = NULL; ++ } ++ ++ memset(bsmsession, 0, sizeof(*bsmsession)); ++ free(bsmsession); ++ rc = 0; ++ ++ if (rsp_fn != NULL) { ++ rsp_fn(rsp_ctx, rc); ++ rsp_fn = NULL; ++ } ++} ++ ++static int ++ssam_hpd_del_poller_cb(void *arg) ++{ ++ struct spdk_ssam_blk_session *bsmsession = (struct spdk_ssam_blk_session *)arg; ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ spdk_ssam_session_rsp_fn rsp_fn = smsession->rsp_fn; ++ struct spdk_bdev *bdev_roback = bsmsession->bdev; ++ void *rsp_ctx = smsession->rsp_ctx; ++ int rc; ++ ++ if (ssam_trylock() != 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ rc = ssam_hotplug_del_async_check(smsession->gfunc_id); ++ if (rc == SSAM_HPD_CHECK_SUC) { ++ ssam_hpd_del_release_resource(smsession); ++ ssam_unlock(); ++ return SPDK_POLLER_BUSY; ++ } else if (rc == SSAM_HPD_RETRY) { ++ bsmsession->hot_plug_poller_number++; ++ if (bsmsession->hot_plug_poller_number >= HPD_DEL_POLLER_NUM) { ++ SPDK_ERRLOG("BLK controller %s hotplug deleted failed\n", smsession->name); ++ spdk_poller_unregister(&bsmsession->hpd_stop_poller); ++ ssam_blk_del_recover(smsession, bdev_roback); ++ rc = -ETIMEDOUT; ++ } else { ++ ssam_unlock(); ++ return SPDK_POLLER_BUSY; ++ } ++ } else { ++ SPDK_ERRLOG("BLK controller %s hotplug deleted failed\n", smsession->name); ++ spdk_poller_unregister(&bsmsession->hpd_stop_poller); ++ ssam_blk_del_recover(smsession, bdev_roback); ++ } ++ ++ if (rsp_fn != NULL) { ++ rsp_fn(rsp_ctx, rc); ++ rsp_fn = NULL; ++ } ++ ssam_unlock(); ++ return SPDK_POLLER_BUSY; ++} ++ ++static void ++ssam_blk_stop_cpl_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ spdk_ssam_session_rsp_fn rsp_fn = smsession->rsp_fn; ++ void *rsp_ctx = smsession->rsp_ctx; ++ int rc = -1; ++ struct spdk_bdev *bdev_roback = bsmsession->bdev; ++ bsmsession->hpd_stop_poller = NULL; ++ ++ g_hpd_delete_session_times[smsession->gfunc_id]++; ++ ssam_dev_bdev_remove_cpl_cb(smsession, NULL); ++ ++ if (g_hpd_delete_session_times[smsession->gfunc_id] == ssam_get_core_num()) { ++ ++ if (ssam_get_en_hpd() == true) { ++ rc = ssam_hotplug_del_async(smsession->gfunc_id); ++ if (rc != SSAM_HPD_RM_SUC) { ++ if (rc == SSAM_HPD_DEL_REM_FAIL) { ++ SPDK_ERRLOG("BLK controller %s hotplug deleted failed\n", smsession->name); ++ } ++ if (rc == SSAM_HPD_INVAL_ID) { ++ SPDK_ERRLOG("the func_id %d is invalid\n", smsession->gfunc_id); ++ } ++ if (rc == SSAM_HPD_RM_REPEAT) { ++ SPDK_ERRLOG("the func_id %d is repeated\n", smsession->gfunc_id); ++ } ++ ssam_blk_del_recover(smsession, bdev_roback); ++ goto out; ++ } ++ ++ bsmsession->bdev = bdev_roback; ++ bsmsession->hot_plug_poller_number = 0; ++ bsmsession->hpd_stop_poller = SPDK_POLLER_REGISTER(ssam_hpd_del_poller_cb, ++ smsession, HPD_DEL_POLLER_PERIOD); ++ return; ++ } ++ ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, 0); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed when remove session.\n", smsession->name); ++ } ++ if (ssam_get_hash_mode() == SSAM_VQ_HASH_MODE) { ++ rc = ssam_virtio_vq_unbind_core(smsession->gfunc_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk vq unbind core failed.\n", smsession->name); ++ } ++ } ++ if ((smsession->gfunc_id > SSAM_PF_MAX_NUM) || (ssam_get_en_hpd() == true)) { ++ rc = ssam_virtio_blk_release_resource(smsession->gfunc_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk release vq failed.\n", smsession->name); ++ } ++ } ++ ++ SPDK_NOTICELOG("BLK controller %s deleted\n", smsession->name); ++ } else { ++ rsp_fn = NULL; ++ } ++ ++ if (smsession->name != NULL) { ++ free(smsession->name); ++ smsession->name = NULL; ++ } ++ ++ ssam_set_session_be_freed(ctx); ++ memset(bsmsession, 0, sizeof(*bsmsession)); ++ free(bsmsession); ++ rc = 0; ++out: ++ if (rsp_fn != NULL) { ++ rsp_fn(rsp_ctx, rc); ++ rsp_fn = NULL; ++ } ++} ++ ++static void ++ssam_task_stat_tick(uint64_t *tsc) ++{ ++#ifdef PERF_STAT ++ *tsc = spdk_get_ticks(); ++#endif ++ return; ++} ++ ++static void ++ssam_blk_stat_statistics(struct spdk_ssam_blk_task *task, struct spdk_bdev_io_stat *stat, ++ struct ssam_blk_stat *blk_stat, uint8_t status) ++{ ++#ifdef PERF_STAT ++ uint64_t dma_tsc = task->task_stat.dma_end_tsc - task->task_stat.dma_start_tsc; ++ uint64_t bdev_tsc = task->task_stat.bdev_end_tsc - task->task_stat.bdev_start_tsc; ++ uint64_t bdev_submit_tsc = task->task_stat.bdev_func_tsc - task->task_stat.bdev_start_tsc; ++ uint64_t complete_tsc = task->task_stat.complete_end_tsc - task->task_stat.complete_start_tsc; ++ uint64_t total_tsc = task->task_stat.complete_end_tsc - task->task_stat.start_tsc; ++ struct virtio_blk_outhdr *req = (struct virtio_blk_outhdr *)task->io_req->req.cmd.header; ++ ++ if (req->type == VIRTIO_BLK_T_IN) { /* read */ ++ stat->read_latency_ticks += total_tsc; ++ stat->bytes_read += task->payload_size; ++ stat->num_read_ops++; ++ if (status == VIRTIO_BLK_S_OK) { ++ blk_stat->complete_read_ios++; ++ } else { ++ blk_stat->err_read_ios++; ++ } ++ } else if (req->type == VIRTIO_BLK_T_OUT) { /* write */ ++ stat->write_latency_ticks += total_tsc; ++ stat->bytes_written += task->payload_size; ++ stat->num_write_ops++; ++ if (status == VIRTIO_BLK_S_OK) { ++ blk_stat->complete_write_ios++; ++ } else { ++ blk_stat->err_write_ios++; ++ } ++ } else if (req->type == VIRTIO_BLK_T_FLUSH) { /* flush */ ++ blk_stat->flush_ios++; ++ if (status == VIRTIO_BLK_S_OK) { ++ blk_stat->complete_flush_ios++; ++ } else { ++ blk_stat->err_flush_ios++; ++ } ++ } else { ++ blk_stat->other_ios++; ++ if (status == VIRTIO_BLK_S_OK) { ++ blk_stat->complete_other_ios++; ++ } else { ++ blk_stat->err_other_ios++; ++ } ++ } ++ ++ blk_stat->dma_tsc += dma_tsc; ++ blk_stat->bdev_tsc += bdev_tsc; ++ blk_stat->bdev_submit_tsc += bdev_submit_tsc; ++ blk_stat->complete_tsc += complete_tsc; ++ blk_stat->total_tsc += total_tsc; ++ blk_stat->internel_tsc += total_tsc - complete_tsc - bdev_tsc - dma_tsc; ++ blk_stat->count += 1; ++#endif ++} ++ ++static void ++ssam_blk_configs(uint8_t *config, struct virtio_blk_config *blkcfg, ++ uint32_t len, struct spdk_bdev *bdev) ++{ ++ uint32_t cfg_len; ++ ++ /* minimum I/O size in blocks */ ++ blkcfg->min_io_size = 1; ++ ++ if (bdev && spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) { ++ /* 32768 sectors is 16MiB, expressed in 512 Bytes */ ++ blkcfg->max_discard_sectors = 32768; ++ blkcfg->max_discard_seg = 1; ++ /* expressed in 512 Bytes sectors */ ++ blkcfg->discard_sector_alignment = blkcfg->blk_size / SECTOR_SIZE; ++ } ++ if (bdev && spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) { ++ /* 32768 sectors is 16MiB, expressed in 512 Bytes */ ++ blkcfg->max_write_zeroes_sectors = 32768; ++ blkcfg->max_write_zeroes_seg = 1; ++ } ++ ++ cfg_len = sizeof(struct virtio_blk_config); ++ memcpy(config, blkcfg, (unsigned long)spdk_min(len, cfg_len)); ++ if (len < cfg_len) { ++ SPDK_NOTICELOG("Out config len %u < total config len %u\n", len, cfg_len); ++ } ++ ++ return; ++} ++ ++static int ++ssam_blk_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues) ++{ ++ struct virtio_blk_config blkcfg; ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ uint32_t blk_size; ++ uint64_t blkcnt; ++ ++ memset(&blkcfg, 0, sizeof(blkcfg)); ++ bsmsession = ssam_to_blk_session(smsession); ++ if (bsmsession == NULL) { ++ SPDK_ERRLOG("session is null.\n"); ++ return -1; ++ } ++ bdev = bsmsession->bdev; ++ if (bdev == NULL) { ++ return -1; ++ } ++ blk_size = spdk_bdev_get_block_size(bdev); ++ blkcnt = spdk_bdev_get_num_blocks(bdev); ++ /* ssam will use this configuration, this is the max capability of ++ * the ssam, configurations will be obtained through negotiation ++ * in the future. ++ */ ++ blkcfg.size_max = SPDK_SSAM_MAX_SEG_SIZE; ++ blkcfg.seg_max = SPDK_SSAM_IOVS_MAX; ++ ++ if (blk_size == 0) { ++ SPDK_ERRLOG("bdev's blk_size %u error.\n", blk_size); ++ return -1; ++ } ++ if (blkcnt > (UINT64_MAX / blk_size)) { ++ SPDK_ERRLOG("bdev's blkcnt %lu or blk_size %u out of range.\n", ++ blkcnt, blk_size); ++ return -1; ++ } ++ blkcfg.blk_size = blk_size; ++ /* expressed in 512 Bytes sectors */ ++ blkcfg.capacity = (blkcnt * blk_size) / 512; ++ blkcfg.num_queues = 1; ++ ssam_blk_configs(config, &blkcfg, len, bdev); ++ ++ return 0; ++} ++ ++static void ++ssam_blk_write_config_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ if (bsmsession == NULL || bsmsession->bdev == NULL || bsmsession->need_write_config != true) { ++ return; ++ } ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "method", "create_blk_controller"); ++ ++ spdk_json_write_named_object_begin(w, "params"); ++ spdk_json_write_named_string(w, "dev_name", spdk_bdev_get_name(bsmsession->bdev)); ++ char *gfunc_id = spdk_sprintf_alloc("%u", bsmsession->smsession.gfunc_id); ++ if (gfunc_id == NULL) { ++ SPDK_ERRLOG("alloc for gfunc_id failed\n"); ++ } else { ++ spdk_json_write_named_string(w, "index", gfunc_id); ++ free(gfunc_id); ++ } ++ spdk_json_write_named_bool(w, "readonly", bsmsession->readonly); ++ if (bsmsession->serial != NULL) { ++ spdk_json_write_named_string(w, "serial", bsmsession->serial); ++ } ++ if (bsmsession->smsession.gfunc_id > SSAM_PF_MAX_NUM || (ssam_get_en_hpd() == true)) { ++ spdk_json_write_named_int32(w, "vqueue", (int32_t)bsmsession->smsession.max_queues); ++ } ++ ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_blk_print_iostat_json(struct spdk_ssam_session *smsession, uint16_t vq_idx, ++ struct spdk_bdev_io_stat *stat, ++ struct ssam_blk_stat *blk_stat, struct spdk_json_write_ctx *w) ++{ ++ struct spdk_bdev *bdev = ssam_get_session_bdev(smsession); ++ uint64_t ticks_hz = spdk_get_ticks_hz(); ++ uint64_t poll_count = smsession->smdev->stat.poll_count; ++ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_uint32(w, "function_id", smsession->gfunc_id); ++ if (vq_idx != SPDK_INVALID_ID) { ++ spdk_json_write_named_uint32(w, "vq_idx", vq_idx); ++ spdk_json_write_named_uint32(w, "tid", smsession->smdev->tid); ++ } ++ if (smsession->smdev->stat.poll_count == 0) { ++ poll_count = 1; ++ } ++ spdk_json_write_named_string_fmt(w, "poll_lat", "%.9f", ++ (float)smsession->smdev->stat.poll_tsc / poll_count / ticks_hz); ++ spdk_json_write_named_string(w, "bdev_name", (bdev == NULL) ? "" : spdk_bdev_get_name(bdev)); ++ spdk_json_write_named_uint64(w, "bytes_read", stat->bytes_read); ++ spdk_json_write_named_uint64(w, "num_read_ops", stat->num_read_ops); ++ spdk_json_write_named_uint64(w, "bytes_written", stat->bytes_written); ++ spdk_json_write_named_uint64(w, "num_write_ops", stat->num_write_ops); ++ spdk_json_write_named_uint64(w, "read_latency_ticks", stat->read_latency_ticks); ++ spdk_json_write_named_uint64(w, "write_latency_ticks", stat->write_latency_ticks); ++ spdk_json_write_named_uint64(w, "complete_read_ios", blk_stat->complete_read_ios); ++ spdk_json_write_named_uint64(w, "err_read_ios", blk_stat->err_read_ios); ++ spdk_json_write_named_uint64(w, "complete_write_ios", blk_stat->complete_write_ios); ++ spdk_json_write_named_uint64(w, "err_write_ios", blk_stat->err_write_ios); ++ spdk_json_write_named_uint64(w, "flush_ios", blk_stat->flush_ios); ++ spdk_json_write_named_uint64(w, "complete_flush_ios", blk_stat->complete_flush_ios); ++ spdk_json_write_named_uint64(w, "err_flush_ios", blk_stat->err_flush_ios); ++ spdk_json_write_named_uint64(w, "other_ios", blk_stat->other_ios); ++ spdk_json_write_named_uint64(w, "complete_other_ios", blk_stat->complete_other_ios); ++ spdk_json_write_named_uint64(w, "err_other_ios", blk_stat->err_other_ios); ++ ++ spdk_json_write_named_uint64(w, "fatal_ios", blk_stat->fatal_ios); ++ spdk_json_write_named_uint64(w, "io_retry", blk_stat->io_retry); ++ spdk_json_write_named_object_begin(w, "counters"); ++ spdk_json_write_named_uint64(w, "start_count", blk_stat->start_count); ++ spdk_json_write_named_uint64(w, "dma_count", blk_stat->dma_count); ++ spdk_json_write_named_uint64(w, "dma_complete_count", blk_stat->dma_complete_count); ++ spdk_json_write_named_uint64(w, "bdev_count", blk_stat->bdev_count); ++ spdk_json_write_named_uint64(w, "bdev_complete_count", blk_stat->bdev_complete_count); ++ spdk_json_write_object_end(w); ++ spdk_json_write_named_object_begin(w, "details"); ++ spdk_json_write_named_uint64(w, "count", blk_stat->count); ++ if (blk_stat->count == 0) { ++ blk_stat->count = 1; ++ } ++ spdk_json_write_named_string_fmt(w, "total_lat", "%.9f", ++ (float)blk_stat->total_tsc / blk_stat->count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "dma_lat", "%.9f", ++ (float)blk_stat->dma_tsc / blk_stat->count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "bdev_lat", "%.9f", ++ (float)blk_stat->bdev_tsc / blk_stat->count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "bdev_submit_lat", "%.9f", ++ (float)blk_stat->bdev_submit_tsc / blk_stat->count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "complete_lat", "%.9f", ++ (float)blk_stat->complete_tsc / blk_stat->count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "internal_lat", "%.9f", ++ (float)blk_stat->internel_tsc / blk_stat->count / ticks_hz); ++ spdk_json_write_object_end(w); ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_blk_function_iostat_sum(uint16_t gfunc_id, struct spdk_bdev_io_stat *stat, ++ struct ssam_blk_stat *blk_stat) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ if (smdev->smsessions[gfunc_id] != NULL) { ++ bsmsession = ssam_to_blk_session(smdev->smsessions[gfunc_id]); ++ stat->bytes_read += bsmsession->stat.bytes_read; ++ stat->num_read_ops += bsmsession->stat.num_read_ops; ++ stat->bytes_written += bsmsession->stat.bytes_written; ++ stat->num_write_ops += bsmsession->stat.num_write_ops; ++ stat->bytes_unmapped += bsmsession->stat.bytes_unmapped; ++ stat->num_unmap_ops += bsmsession->stat.num_unmap_ops; ++ stat->bytes_copied += bsmsession->stat.bytes_copied; ++ stat->num_copy_ops += bsmsession->stat.num_copy_ops; ++ stat->read_latency_ticks += bsmsession->stat.read_latency_ticks; ++ stat->max_read_latency_ticks += bsmsession->stat.max_read_latency_ticks; ++ stat->min_read_latency_ticks += bsmsession->stat.min_read_latency_ticks; ++ stat->write_latency_ticks += bsmsession->stat.write_latency_ticks; ++ stat->max_write_latency_ticks += bsmsession->stat.max_write_latency_ticks; ++ stat->min_write_latency_ticks += bsmsession->stat.min_write_latency_ticks; ++ stat->unmap_latency_ticks += bsmsession->stat.unmap_latency_ticks; ++ stat->max_unmap_latency_ticks += bsmsession->stat.max_unmap_latency_ticks; ++ stat->min_unmap_latency_ticks += bsmsession->stat.min_unmap_latency_ticks; ++ stat->copy_latency_ticks += bsmsession->stat.copy_latency_ticks; ++ stat->max_copy_latency_ticks += bsmsession->stat.max_copy_latency_ticks; ++ stat->min_copy_latency_ticks += bsmsession->stat.min_copy_latency_ticks; ++ stat->ticks_rate += bsmsession->stat.ticks_rate; ++ ++ blk_stat->count += bsmsession->blk_stat.count; ++ blk_stat->start_count += bsmsession->blk_stat.start_count; ++ blk_stat->total_tsc += bsmsession->blk_stat.total_tsc; ++ blk_stat->dma_tsc += bsmsession->blk_stat.dma_tsc; ++ blk_stat->dma_count += bsmsession->blk_stat.dma_count; ++ blk_stat->dma_complete_count += bsmsession->blk_stat.dma_complete_count; ++ blk_stat->bdev_tsc += bsmsession->blk_stat.bdev_tsc; ++ blk_stat->bdev_submit_tsc += bsmsession->blk_stat.bdev_submit_tsc; ++ blk_stat->bdev_count += bsmsession->blk_stat.bdev_count; ++ blk_stat->bdev_complete_count += bsmsession->blk_stat.bdev_complete_count; ++ blk_stat->complete_tsc += bsmsession->blk_stat.complete_tsc; ++ blk_stat->internel_tsc += bsmsession->blk_stat.internel_tsc; ++ blk_stat->complete_read_ios += bsmsession->blk_stat.complete_read_ios; ++ blk_stat->err_read_ios += bsmsession->blk_stat.err_read_ios; ++ blk_stat->complete_write_ios += bsmsession->blk_stat.complete_write_ios; ++ blk_stat->err_write_ios += bsmsession->blk_stat.err_write_ios; ++ blk_stat->flush_ios += bsmsession->blk_stat.flush_ios; ++ blk_stat->complete_flush_ios += bsmsession->blk_stat.complete_flush_ios; ++ blk_stat->err_flush_ios += bsmsession->blk_stat.err_flush_ios; ++ blk_stat->other_ios += bsmsession->blk_stat.other_ios; ++ blk_stat->complete_other_ios += bsmsession->blk_stat.complete_other_ios; ++ blk_stat->err_other_ios += bsmsession->blk_stat.err_other_ios; ++ blk_stat->fatal_ios += bsmsession->blk_stat.fatal_ios; ++ blk_stat->io_retry += bsmsession->blk_stat.io_retry; ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ return; ++} ++ ++static void ++ssam_blk_show_iostat_json(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_show_iostat_args *args, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ struct spdk_bdev_io_stat stat = {0}; ++ struct ssam_blk_stat blk_stat = {0}; ++ ++ switch (args->mode) { ++ case SSAM_IOSTAT_NORMAL: ++ if (args->id == SPDK_INVALID_ID) { ++ memcpy(&stat, &bsmsession->stat, sizeof(struct spdk_bdev_io_stat)); ++ memcpy(&blk_stat, &bsmsession->blk_stat, sizeof(struct ssam_blk_stat)); ++ } else { ++ memcpy(&stat, &bsmsession->vq_stat[args->id], sizeof(struct spdk_bdev_io_stat)); ++ memcpy(&blk_stat, &bsmsession->vq_blk_stat[args->id], sizeof(struct ssam_blk_stat)); ++ } ++ ssam_blk_print_iostat_json(smsession, args->id, &stat, &blk_stat, w); ++ break; ++ case SSAM_IOSTAT_SUM: ++ if (bsmsession->need_write_config == false) { ++ return; ++ } ++ ssam_blk_function_iostat_sum(smsession->gfunc_id, &stat, &blk_stat); ++ ssam_blk_print_iostat_json(smsession, args->id, &stat, &blk_stat, w); ++ break; ++ case SSAM_IOSTAT_DUMP_VQ: ++ for (int i = 0; i < smsession->max_queues; i++) { ++ if (bsmsession->vq_blk_stat[i].start_count == 0) { ++ continue; ++ } ++ ssam_blk_print_iostat_json(smsession, i, &bsmsession->vq_stat[i], &bsmsession->vq_blk_stat[i], w); ++ } ++ break; ++ case SSAM_IOSTAT_SPARSE: ++ if (args->id == SPDK_INVALID_ID) { ++ memcpy(&stat, &bsmsession->stat, sizeof(struct spdk_bdev_io_stat)); ++ memcpy(&blk_stat, &bsmsession->blk_stat, sizeof(struct ssam_blk_stat)); ++ } else { ++ memcpy(&stat, &bsmsession->vq_stat[args->id], sizeof(struct spdk_bdev_io_stat)); ++ memcpy(&blk_stat, &bsmsession->vq_blk_stat[args->id], sizeof(struct ssam_blk_stat)); ++ } ++ if (blk_stat.start_count == 0) { ++ return; ++ } ++ ssam_blk_print_iostat_json(smsession, args->id, &stat, &blk_stat, w); ++ break; ++ default: ++ break; ++ } ++ return; ++} ++ ++static void ++ssam_blk_clear_iostat_json(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ memset(&bsmsession->stat, 0, sizeof(struct spdk_bdev_io_stat) - sizeof( ++ uint64_t)); /* exclude ticks_rate */ ++ memset(&bsmsession->blk_stat, 0, sizeof(struct ssam_blk_stat)); ++ for (int i = 0; i < smsession->max_queues; i++) { ++ memset(&bsmsession->vq_stat[i], 0, ++ sizeof(struct spdk_bdev_io_stat) - sizeof(uint64_t)); /* exclude ticks_rate */ ++ memset(&bsmsession->vq_blk_stat[i], 0, sizeof(struct ssam_blk_stat)); ++ } ++} ++ ++static struct spdk_bdev * ++ssam_blk_get_bdev(struct spdk_ssam_session *smsession, uint32_t id) ++{ ++ struct spdk_bdev *bdev = ssam_get_session_bdev(smsession); ++ ++ return bdev; ++} ++ ++static const struct spdk_ssam_session_backend g_ssam_blk_session_backend = { ++ .type = VIRTIO_TYPE_BLK, ++ .remove_session = ssam_blk_remove_session, ++ .request_worker = ssam_blk_request_worker, ++ .destroy_bdev_device = ssam_blk_destroy_bdev_device, ++ .response_worker = ssam_blk_response_worker, ++ .no_data_req_worker = ssam_blk_no_data_request_worker, ++ .ssam_get_config = ssam_blk_get_config, ++ .print_stuck_io_info = ssam_blk_print_stuck_io_info, ++ .dump_info_json = ssam_blk_dump_info_json, ++ .write_config_json = ssam_blk_write_config_json, ++ .show_iostat_json = ssam_blk_show_iostat_json, ++ .clear_iostat_json = ssam_blk_clear_iostat_json, ++ .get_bdev = ssam_blk_get_bdev, ++ .remove_self = NULL, ++}; ++ ++/* Clean Smsession */ ++static int ++ssam_destroy_poller_cb(void *arg) ++{ ++ struct spdk_ssam_blk_session *bsmsession = (struct spdk_ssam_blk_session *)arg; ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ struct spdk_ssam_dev *smdev = smsession->smdev; ++ ++ SPDK_NOTICELOG("%s: remaining %u tasks\n", smsession->name, smsession->task_cnt); ++ ++ /* stop poller */ ++ spdk_poller_unregister(&bsmsession->stop_bdev_poller); ++ ++ /* remove session */ ++ ssam_sessions_remove(smdev->smsessions, smsession); ++ if (smdev->active_session_num > 0) { ++ smdev->active_session_num--; ++ } ++ smsession->smdev = NULL; ++ ++ /* put ioChannle */ ++ if (bsmsession->io_channel != NULL) { ++ spdk_put_io_channel(bsmsession->io_channel); ++ bsmsession->io_channel = NULL; ++ } ++ ++ /* close bdev device, last step, async */ ++ ssam_send_dev_destroy_msg(smsession, NULL); ++ ++ /* free smsession not here, but after close bdev device; ++ * see ssam_blk_destroy_bdev_device() ++ */ ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static int ++ssam_session_bdev_remove_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ int rc = 0; ++ ++ /* smsession already removed */ ++ if (!smsession->started) { ++ return 0; ++ } else { ++ smsession->started = false; ++ } ++ ++ if (smsession->task_cnt == 0) { ++ g_hpd_delete_session_times[smsession->gfunc_id]++; ++ } ++ ++ if ((ssam_get_en_hpd() == true) && ++ (g_hpd_delete_session_times[smsession->gfunc_id] == ssam_get_core_num())) { ++ rc = ssam_hotplug_del(smsession->gfunc_id); ++ if (rc != 0) { ++ SPDK_ERRLOG("BLK controller %s hotplug deleted failed\n", smsession->name); ++ } ++ } ++ bsmsession->stop_bdev_poller = SPDK_POLLER_REGISTER(ssam_destroy_poller_cb, ++ bsmsession, 0); ++ if (g_hpd_delete_session_times[smsession->gfunc_id] == ssam_get_core_num()) { ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, 0); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed when remove session.\n", smsession->name); ++ } ++ ++ if (ssam_get_hash_mode() == SSAM_VQ_HASH_MODE) { ++ rc = ssam_virtio_vq_unbind_core(smsession->gfunc_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk vq unbind core failed.\n", smsession->name); ++ } ++ } ++ ++ if ((smsession->gfunc_id > SSAM_PF_MAX_NUM) || (ssam_get_en_hpd() == true)) { ++ rc = ssam_virtio_blk_release_resource(smsession->gfunc_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk release vq failed.\n", smsession->name); ++ } ++ } ++ } ++ ++ ssam_set_session_be_freed(ctx); ++ ssam_send_event_async_done(ctx); ++ ++ return 0; ++} ++ ++static void ++ssam_bdev_remove_cb(void *remove_ctx) ++{ ++ struct spdk_ssam_session *smsession = remove_ctx; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ ++ SPDK_WARNLOG("%s: hot-removing bdev - all further requests will be stucked.\n", ++ smsession->name); ++ ++ ssam_send_event_to_session(smsession, ssam_session_bdev_remove_cb, ++ NULL, send_event_flag, NULL); ++} ++ ++static void ++ssam_session_bdev_resize_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ int rc; ++ ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, bsmsession->bdev->blockcnt); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed.\n", smsession->name); ++ } ++} ++ ++static void ++ssam_blk_resize_cb(void *resize_ctx) ++{ ++ struct spdk_ssam_session *smsession = resize_ctx; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ ++ ssam_send_event_to_session(smsession, NULL, ssam_session_bdev_resize_cb, send_event_flag, NULL); ++} ++ ++static void ++ssam_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, ++ void *event_ctx) ++{ ++ SPDK_DEBUGLOG(ssam_blk, "Bdev event: type %d, name %s\n", ++ type, bdev->name); ++ ++ switch (type) { ++ case SPDK_BDEV_EVENT_REMOVE: ++ SPDK_NOTICELOG("bdev name (%s) received event(SPDK_BDEV_EVENT_REMOVE)\n", ++ bdev->name); ++ ssam_bdev_remove_cb(event_ctx); ++ break; ++ case SPDK_BDEV_EVENT_RESIZE: ++ SPDK_NOTICELOG("bdev name (%s) received event(SPDK_BDEV_EVENT_RESIZE)\n", ++ bdev->name); ++ ssam_blk_resize_cb(event_ctx); ++ break; ++ default: ++ SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); ++ break; ++ } ++} ++ ++static void ++ssam_free_task_pool(struct spdk_ssam_blk_session *bsmsession) ++{ ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = NULL; ++ uint16_t max_queues = smsession->max_queues; ++ uint16_t i; ++ ++ if (max_queues > SPDK_SSAM_MAX_VQUEUES) { ++ return; ++ } ++ ++ for (i = 0; i < max_queues; i++) { ++ vq = &smsession->virtqueue[i]; ++ if (vq->tasks != NULL) { ++ spdk_free(vq->tasks); ++ vq->tasks = NULL; ++ } ++ ++ if (vq->index != NULL) { ++ spdk_free(vq->index); ++ vq->index = NULL; ++ } ++ } ++} ++ ++static int ++ssam_alloc_task_pool(struct spdk_ssam_session *smsession, uint16_t vq_idx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[vq_idx]; ++ struct spdk_ssam_blk_task *task = NULL; ++ uint32_t task_cnt = smsession->queue_size; ++ uint16_t i; ++ ++ if (vq->tasks != NULL && vq->index != NULL) { ++ return 0; ++ } ++ ++ vq->smsession = smsession; ++ vq->num = task_cnt; ++ vq->use_num = 0; ++ vq->index_l = 0; ++ vq->index_r = 0; ++ if (vq->tasks == NULL) { ++ vq->tasks = spdk_zmalloc(sizeof(struct spdk_ssam_blk_task) * task_cnt, ++ SPDK_CACHE_LINE_SIZE, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ } ++ if (vq->index == NULL) { ++ vq->index = spdk_zmalloc(sizeof(uint32_t) * task_cnt, ++ SPDK_CACHE_LINE_SIZE, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ } ++ if (vq->tasks == NULL || vq->index == NULL) { ++ SPDK_ERRLOG("%s: failed to allocate %"PRIu32" tasks for virtqueue %"PRIu16"\n", ++ smsession->name, task_cnt, vq_idx); ++ if (vq->tasks != NULL) { ++ spdk_free(vq->tasks); ++ vq->tasks = NULL; ++ } ++ ++ if (vq->index != NULL) { ++ spdk_free(vq->index); ++ vq->index = NULL; ++ } ++ return -ENOMEM; ++ } ++ for (i = 0; i < task_cnt; i++) { ++ task = &((struct spdk_ssam_blk_task *)vq->tasks)[i]; ++ task->bsmsession = bsmsession; ++ task->task_idx = i; ++ vq->index[i] = i; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_blk_print_stuck_io_info(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_task *tasks; ++ struct spdk_ssam_blk_task *task; ++ int i, j; ++ ++ for (i = 0; i < smsession->max_queues; i++) { ++ for (j = 0; j < smsession->queue_size; j++) { ++ tasks = (struct spdk_ssam_blk_task *)smsession->virtqueue[i].tasks; ++ if (tasks == NULL) { ++ continue; ++ } ++ task = &tasks[j]; ++ if (task == NULL) { ++ continue; ++ } ++ if (task->used) { ++ SPDK_INFOLOG(ssam_blk, "%s: stuck io payload_size %u, vq_idx %u, req_idx %u\n", ++ smsession->name, task->payload_size, task->vq_idx, task->req_idx); ++ } ++ } ++ } ++} ++ ++static uint16_t ++get_req_idx(struct spdk_ssam_blk_task *task) ++{ ++ return task->io_req->req.cmd.virtio.req_idx; ++} ++ ++static void ++ssam_blk_task_init(struct spdk_ssam_blk_task *task) ++{ ++ task->used = true; ++ task->iovcnt = 0; ++ task->io_req = NULL; ++ task->payload_size = 0; ++ memset(&task->task_stat, 0, sizeof(task->task_stat)); ++ ssam_task_stat_tick(&task->task_stat.start_tsc); ++} ++ ++static void ++ssam_blk_task_finish(struct spdk_ssam_blk_task *task) ++{ ++ struct spdk_ssam_session *smsession = &task->bsmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[task->vq_idx]; ++ ++ if (smsession->task_cnt == 0) { ++ SPDK_ERRLOG("smsession %s: task internel error\n", smsession->name); ++ return; ++ } ++ ++ task->io_req = NULL; ++ task->payload_size = 0; ++ ++ if (task->iovs.virt.sges[0].iov_base != NULL) { ++ ssam_mempool_free(smsession->mp, task->iovs.virt.sges[0].iov_base); ++ task->iovs.virt.sges[0].iov_base = NULL; ++ } ++ ++ memset(&task->iovs, 0, sizeof(task->iovs)); ++ ++ task->iovcnt = 0; ++ smsession->task_cnt--; ++ task->used = false; ++ vq->index[vq->index_l] = task->task_idx; ++ vq->index_l = (vq->index_l + 1) & 0xFF; ++ vq->use_num--; ++} ++ ++static int ++ssam_blk_io_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, uint8_t status) ++{ ++ struct ssam_io_response io_resp; ++ struct ssam_virtio_res *virtio_res = (struct ssam_virtio_res *)&io_resp.data; ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ struct iovec io_vec; ++ uint8_t res_status = status; ++ int rc; ++ ++ if (status != VIRTIO_BLK_S_OK) { ++ SPDK_ERRLOG("ssam io complete return error tid=%u gfunc_id:%u.\n", smdev->tid, io_req->gfunc_id); ++ } ++ ++ memset(&io_resp, 0, sizeof(io_resp)); ++ io_resp.gfunc_id = io_req->gfunc_id; ++ io_resp.iocb_id = io_req->iocb_id; ++ io_resp.status = io_req->status; ++ io_resp.req = io_req; ++ io_resp.flr_seq = io_req->flr_seq; ++ ++ virtio_res->iovs = &io_vec; ++ virtio_res->iovs->iov_base = io_cmd->iovs[io_cmd->iovcnt - 1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[io_cmd->iovcnt - 1].iov_len; ++ virtio_res->iovcnt = 1; ++ virtio_res->rsp = &res_status; ++ virtio_res->rsp_len = sizeof(res_status); ++ ++ rc = ssam_io_complete(smdev->tid, &io_resp); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ ssam_dev_io_dec(smdev); ++ return 0; ++} ++ ++struct ssam_task_complete_arg { ++ struct spdk_ssam_blk_task *task; ++ uint8_t status; ++}; ++ ++static void ++ssam_task_complete_cb(void *arg) ++{ ++ struct ssam_task_complete_arg *cb_arg = (struct ssam_task_complete_arg *)arg; ++ struct spdk_ssam_session *smsession = &cb_arg->task->bsmsession->smsession; ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ struct spdk_ssam_blk_task *task = cb_arg->task; ++ int rc = ssam_blk_io_complete(smsession->smdev, task->io_req, cb_arg->status); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_task_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_task_stat_tick(&task->task_stat.complete_end_tsc); ++ ssam_blk_stat_statistics(task, &bsmsession->stat, &bsmsession->blk_stat, cb_arg->status); ++ ssam_blk_stat_statistics(task, &bsmsession->vq_stat[task->vq_idx], ++ &bsmsession->vq_blk_stat[task->vq_idx], ++ cb_arg->status); ++ ssam_blk_task_finish(task); ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_task_complete(struct spdk_ssam_blk_task *task, uint8_t status) ++{ ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ struct spdk_ssam_session *smsession = &task->bsmsession->smsession; ++ if (status != VIRTIO_BLK_S_OK) { ++ SPDK_ERRLOG("ssam task return error tid=%u gfunc_id:%u.\n", ++ smsession->smdev->tid, task->io_req->gfunc_id); ++ } ++ SPDK_INFOLOG(ssam_blk_data, "handled io tid=%u gfunc_id=%u rw=%u vqid=%u reqid=%u status=%u.\n", ++ smsession->smdev->tid, smsession->gfunc_id, task->io_req->req.cmd.writable, ++ task->io_req->req.cmd.virtio.vq_idx, task->io_req->req.cmd.virtio.req_idx, status); ++ ssam_task_stat_tick(&task->task_stat.complete_start_tsc); ++ int rc = ssam_blk_io_complete(smsession->smdev, task->io_req, status); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_task_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_task_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->status = status; ++ cb_arg->task = task; ++ io_wait_r->cb_fn = ssam_task_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_task_stat_tick(&task->task_stat.complete_end_tsc); ++ ssam_blk_stat_statistics(task, &bsmsession->stat, &bsmsession->blk_stat, status); ++ ssam_blk_stat_statistics(task, &bsmsession->vq_stat[task->vq_idx], ++ &bsmsession->vq_blk_stat[task->vq_idx], ++ status); ++ ssam_blk_task_finish(task); ++} ++ ++struct ssam_blk_dma_data_request_arg { ++ struct spdk_ssam_dev *smdev; ++ struct spdk_ssam_blk_task *task; ++ struct ssam_dma_request dma_req; ++}; ++ ++static void ++ssam_blk_dma_data_request_cb(void *arg) ++{ ++ struct ssam_blk_dma_data_request_arg *cb_arg = (struct ssam_blk_dma_data_request_arg *)arg; ++ int ret = ssam_dma_data_request(cb_arg->smdev->tid, &cb_arg->dma_req); ++ if (ret == -ENOMEM || ret == -EIO) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_blk_dma_data_request_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ if (ret < 0) { ++ SPDK_ERRLOG("%s: ssam dma data request failed:%s\n", ++ cb_arg->task->bsmsession->smsession.name, spdk_strerror(-ret)); ++ ssam_task_complete(cb_arg->task, VIRTIO_BLK_S_IOERR); ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_res_dma_process(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_blk_task *task, uint32_t type, uint8_t status) ++{ ++ struct ssam_dma_request dma_req = {0}; ++ uint16_t tid = smsession->smdev->tid; ++ int ret; ++ ++ ssam_data_request_para(&dma_req, task, type, status); ++ ssam_task_stat_tick(&task->task_stat.dma_start_tsc); ++ task->bsmsession->blk_stat.dma_count++; ++ task->bsmsession->vq_blk_stat[task->vq_idx].dma_count++; ++ ret = ssam_dma_data_request(tid, &dma_req); ++ if (ret == -ENOMEM || ret == -EIO) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_dma_data_request_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_dma_data_request_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->dma_req = dma_req; ++ cb_arg->task = task; ++ io_wait_r->cb_fn = ssam_blk_dma_data_request_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ++ if (ret < 0) { ++ SPDK_ERRLOG("%s: ssam dma data request failed:%s\n", smsession->name, spdk_strerror(-ret)); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ } ++} ++ ++static void ++ssam_blk_request_finish(bool success, struct spdk_ssam_blk_task *task) ++{ ++ uint8_t res_status = success ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR; ++ const struct virtio_blk_outhdr *req = NULL; ++ struct spdk_ssam_session *smsession = &task->bsmsession->smsession; ++ if (res_status != VIRTIO_BLK_S_OK) { ++ SPDK_ERRLOG("request finish return error gfunc_id=%u.\n", smsession->gfunc_id); ++ } ++ ++ req = (struct virtio_blk_outhdr *)task->io_req->req.cmd.header; ++ switch (req->type) { ++ case VIRTIO_BLK_T_IN: ++ case VIRTIO_BLK_T_GET_ID: ++ ssam_res_dma_process(smsession, task, SSAM_REQUEST_DATA_STORE, res_status); ++ break; ++ ++ case VIRTIO_BLK_T_OUT: ++ case VIRTIO_BLK_T_DISCARD: ++ case VIRTIO_BLK_T_WRITE_ZEROES: ++ case VIRTIO_BLK_T_FLUSH: ++ ssam_task_complete(task, res_status); ++ break; ++ ++ default: ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ SPDK_ERRLOG("Not supported request type '%"PRIu32"'.\n", req->type); ++ break; ++ } ++} ++ ++static void ++ssam_blk_req_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) ++{ ++ struct spdk_ssam_blk_task *task = cb_arg; ++ ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the task and bdev_io memory may have been released. ++ * Therefore, task and bdev_io are not released in this scenario. ++ */ ++ return; ++ } ++ ++ /* Second part start of read and write */ ++ SPDK_INFOLOG(ssam_blk_data, ++ "backend io finish tid=%u gfunc_id=%u rw=%u vqid=%u reqid=%u success=%d.\n", ++ task->bsmsession->smsession.smdev->tid, task->bsmsession->smsession.gfunc_id, ++ task->io_req->req.cmd.writable, task->io_req->req.cmd.virtio.vq_idx, ++ task->io_req->req.cmd.virtio.req_idx, ++ success); ++ task->bsmsession->bdev_count--; ++ task->bsmsession->blk_stat.bdev_complete_count++; ++ task->bsmsession->vq_blk_stat[task->vq_idx].bdev_complete_count++; ++ ssam_task_stat_tick(&task->task_stat.bdev_end_tsc); ++ ++ spdk_bdev_free_io(bdev_io); ++ ssam_blk_request_finish(success, task); ++} ++ ++static int ++ssam_request_rc_process(int rc, struct spdk_ssam_blk_task *task) ++{ ++ if (rc == 0) { ++ return rc; ++ } ++ ++ if (rc == -ENOMEM) { ++ SPDK_WARNLOG("No memory, start to queue io.\n"); ++ ssam_request_queue_io(task); ++ } else { ++ SPDK_ERRLOG("IO error, gfunc_id=%u.\n", task->bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ return rc; ++} ++ ++static bool ++ssam_is_req_sector_err(uint64_t sector) ++{ ++ if (sector > (UINT64_MAX / SECTOR_SIZE)) { ++ SPDK_ERRLOG("req sector out of range, need less or equal than %lu, actually %lu\n", ++ (UINT64_MAX / SECTOR_SIZE), sector); ++ return true; ++ } ++ ++ return false; ++} ++ ++static int ++ssam_virtio_read_write_process(struct spdk_ssam_blk_task *task, ++ const struct virtio_blk_outhdr *req) ++{ ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ struct ssam_io_message *io_cmd = NULL; ++ uint32_t payload_size = task->payload_size; ++ int rc; ++ ++ io_cmd = &task->io_req->req.cmd; ++ ++ if (ssam_is_req_sector_err(req->sector)) { ++ SPDK_ERRLOG("rw check sector error, gfunc_id=%u.\n", bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (spdk_unlikely(payload_size == 0 || (payload_size & (SECTOR_SIZE - 1)) != 0)) { ++ SPDK_ERRLOG("%s - passed IO buffer is not multiple of 512 Bytes (req_idx = %"PRIu16"), " ++ "payload_size = %u, iovcnt = %u.\n", req->type ? "WRITE" : "READ", ++ get_req_idx(task), payload_size, io_cmd->iovcnt); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ if (req->type == VIRTIO_BLK_T_IN) { ++ bsmsession->bdev_count++; ++ ssam_task_stat_tick(&task->task_stat.bdev_start_tsc); ++ rc = spdk_bdev_readv(bsmsession->bdev_desc, bsmsession->io_channel, ++ task->iovs.virt.sges, task->iovcnt, req->sector * SECTOR_SIZE, ++ payload_size, ssam_blk_req_complete_cb, task); ++ ssam_task_stat_tick(&task->task_stat.bdev_func_tsc); ++ } else if (!bsmsession->readonly) { ++ bsmsession->bdev_count++; ++ ssam_task_stat_tick(&task->task_stat.bdev_start_tsc); ++ rc = spdk_bdev_writev(bsmsession->bdev_desc, bsmsession->io_channel, ++ task->iovs.virt.sges, task->iovcnt, req->sector * SECTOR_SIZE, ++ payload_size, ssam_blk_req_complete_cb, task); ++ ssam_task_stat_tick(&task->task_stat.bdev_func_tsc); ++ } else { ++ SPDK_DEBUGLOG(ssam_blk, "Device is in read-only mode!\n"); ++ rc = -1; ++ } ++ ++ return ssam_request_rc_process(rc, task); ++} ++ ++static int ++ssam_virtio_discard_process(struct spdk_ssam_blk_task *task) ++{ ++ int rc; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ struct virtio_blk_discard_write_zeroes *desc = task->iovs.virt.sges[0].iov_base; ++ ++ if (ssam_is_req_sector_err(desc->sector)) { ++ SPDK_ERRLOG("discard check sector error, gfunc_id=%u.\n", bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (task->payload_size != sizeof(*desc)) { ++ SPDK_ERRLOG("Invalid discard payload size: %u\n", task->payload_size); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (desc->flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) { ++ SPDK_ERRLOG("UNMAP flag is only used for WRITE ZEROES command\n"); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ bsmsession->bdev_count++; ++ rc = spdk_bdev_unmap(bsmsession->bdev_desc, bsmsession->io_channel, ++ desc->sector * SECTOR_SIZE, desc->num_sectors * SECTOR_SIZE, ++ ssam_blk_req_complete_cb, task); ++ ++ return ssam_request_rc_process(rc, task); ++} ++ ++static int ++ssam_virtio_write_zeroes_process(struct spdk_ssam_blk_task *task) ++{ ++ int rc; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ struct virtio_blk_discard_write_zeroes *desc = task->iovs.virt.sges[0].iov_base; ++ ++ if (ssam_is_req_sector_err(desc->sector)) { ++ SPDK_ERRLOG("write zeros check sector error, gfunc_id=%u.\n", bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (task->payload_size != sizeof(*desc)) { ++ SPDK_NOTICELOG("Invalid write zeroes payload size: %u\n", task->payload_size); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (desc->flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) { ++ SPDK_WARNLOG("Ignore the unmap flag for WRITE ZEROES from %"PRIx64", len %"PRIx64"\n", ++ (uint64_t)desc->sector * SECTOR_SIZE, (uint64_t)desc->num_sectors * SECTOR_SIZE); ++ } ++ bsmsession->bdev_count++; ++ rc = spdk_bdev_write_zeroes(bsmsession->bdev_desc, bsmsession->io_channel, ++ desc->sector * SECTOR_SIZE, desc->num_sectors * SECTOR_SIZE, ssam_blk_req_complete_cb, task); ++ ++ return ssam_request_rc_process(rc, task); ++} ++ ++static int ++ssam_virtio_flush_process(struct spdk_ssam_blk_task *task, ++ const struct virtio_blk_outhdr *req) ++{ ++ int rc; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ uint64_t blockcnt = spdk_bdev_get_num_blocks(bsmsession->bdev); ++ uint32_t blocklen = spdk_bdev_get_block_size(bsmsession->bdev); ++ uint64_t flush_bytes; ++ ++ if (blocklen == 0) { ++ SPDK_ERRLOG("bdev's blocklen %u error.\n", blocklen); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ if (req->sector != 0) { ++ SPDK_ERRLOG("sector must be zero for flush command\n"); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (blockcnt > (UINT64_MAX / blocklen)) { ++ SPDK_ERRLOG("bdev's blockcnt %lu or blocklen %u out of range.\n", ++ blockcnt, blocklen); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ flush_bytes = blockcnt * blocklen; ++ bsmsession->bdev_count++; ++ rc = spdk_bdev_flush(bsmsession->bdev_desc, bsmsession->io_channel, ++ 0, flush_bytes, ssam_blk_req_complete_cb, task); ++ ++ return ssam_request_rc_process(rc, task); ++} ++ ++static int ++ssam_virtio_get_id_process(struct spdk_ssam_blk_task *task) ++{ ++ uint32_t used_length; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ ++ if (task->iovcnt == 0 || task->payload_size == 0) { ++ SPDK_ERRLOG("check task param error, gfunc_id=%u.\n", bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ used_length = spdk_min((size_t)VIRTIO_BLK_ID_BYTES, task->iovs.virt.sges[0].iov_len); ++ if (bsmsession->serial == NULL) { ++ spdk_strcpy_pad(task->iovs.virt.sges[0].iov_base, spdk_bdev_get_product_name(bsmsession->bdev), ++ used_length, ' '); ++ } else { ++ spdk_strcpy_pad(task->iovs.virt.sges[0].iov_base, bsmsession->serial, ++ used_length, ' '); ++ } ++ bsmsession->blk_stat.bdev_complete_count++; ++ bsmsession->vq_blk_stat[task->vq_idx].bdev_complete_count++; ++ ssam_blk_request_finish(true, task); ++ ++ return 0; ++} ++ ++static int ++ssam_io_process(struct spdk_ssam_blk_task *task, const struct virtio_blk_outhdr *req) ++{ ++ int rc; ++ SPDK_INFOLOG(ssam_blk_data, ++ "backend io start tid=%u gfunc_id=%u reqtype=%d rw=%u vqid=%u reqid=%u offset=%llu length=%u.\n", ++ task->bsmsession->smsession.smdev->tid, task->bsmsession->smsession.gfunc_id, req->type, ++ task->io_req->req.cmd.writable, task->io_req->req.cmd.virtio.vq_idx, ++ task->io_req->req.cmd.virtio.req_idx, ++ req->sector * SECTOR_SIZE, task->payload_size); ++ task->bsmsession->blk_stat.bdev_count++; ++ task->bsmsession->vq_blk_stat[task->vq_idx].bdev_count++; ++ switch (req->type) { ++ case VIRTIO_BLK_T_IN: ++ case VIRTIO_BLK_T_OUT: ++ rc = ssam_virtio_read_write_process(task, req); ++ break; ++ case VIRTIO_BLK_T_DISCARD: ++ rc = ssam_virtio_discard_process(task); ++ break; ++ case VIRTIO_BLK_T_WRITE_ZEROES: ++ rc = ssam_virtio_write_zeroes_process(task); ++ break; ++ case VIRTIO_BLK_T_FLUSH: ++ rc = ssam_virtio_flush_process(task, req); ++ break; ++ case VIRTIO_BLK_T_GET_ID: ++ rc = ssam_virtio_get_id_process(task); ++ break; ++ default: ++ SPDK_ERRLOG("Not supported request type '%"PRIu32"'.\n", req->type); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ return rc; ++} ++ ++static int ++ssam_process_blk_request(struct spdk_ssam_blk_task *task) ++{ ++ int ret; ++ struct iovec *iov = NULL; ++ const struct virtio_blk_outhdr *req = NULL; ++ struct ssam_io_message *io_cmd = NULL; ++ ++ io_cmd = &task->io_req->req.cmd; ++ /* get req header */ ++ if (spdk_unlikely(io_cmd->iovs[0].iov_len != sizeof(*req))) { ++ SPDK_ERRLOG("First descriptor size is %zu but expected %zu (req_idx = %"PRIu16").\n", ++ io_cmd->iovs[0].iov_len, sizeof(*req), get_req_idx(task)); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ req = (struct virtio_blk_outhdr *)io_cmd->header; ++ /* get req tail */ ++ iov = &io_cmd->iovs[io_cmd->iovcnt - 1]; ++ if (spdk_unlikely(iov->iov_len != 1)) { ++ SPDK_ERRLOG("Last descriptor size is %zu but expected %d (req_idx = %"PRIu16").\n", ++ iov->iov_len, 1, get_req_idx(task)); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ ret = ssam_io_process(task, req); ++ if (ret < 0) { ++ SPDK_ERRLOG("ssam io process failed(%d)\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_get_payload_size(struct ssam_request *io_req, uint32_t *payload_size) ++{ ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ uint32_t payload = 0; ++ uint32_t i; ++ ++ for (i = 1; i < io_cmd->iovcnt - 1; i++) { ++ if (spdk_unlikely((UINT32_MAX - io_cmd->iovs[i].iov_len) < payload)) { ++ SPDK_ERRLOG("payload size overflow\n"); ++ return -1; ++ } ++ payload += io_cmd->iovs[i].iov_len; ++ } ++ ++ if (spdk_unlikely(payload > PAYLOAD_SIZE_MAX)) { ++ SPDK_ERRLOG("payload size larger than %u, payload_size = %u\n", ++ PAYLOAD_SIZE_MAX, payload); ++ return -1; ++ } ++ ++ *payload_size = payload; ++ ++ return 0; ++} ++ ++static int ++ssam_task_iovs_memory_get(struct spdk_ssam_blk_task *task) ++{ ++ struct ssam_mempool *mp = task->bsmsession->smsession.mp; ++ void *buffer = NULL; ++ uint64_t phys_addr = 0; ++ ++ if (task->payload_size == 0) { ++ /* request type of VIRTIO_BLK_T_FLUSH does not have payload */ ++ task->iovs.virt.sges[0].iov_base = NULL; ++ return 0; ++ } ++ ++ task->iovs.virt.sges[0].iov_base = NULL; ++ task->iovs.phys.sges[0].iov_base = NULL; ++ task->iovs.virt.sges[0].iov_len = task->payload_size; ++ task->iovs.phys.sges[0].iov_len = task->payload_size; ++ task->iovcnt = 1; ++ ++ buffer = ssam_mempool_alloc(mp, task->payload_size, &phys_addr); ++ if (spdk_unlikely(buffer == NULL)) { ++ return -ENOMEM; ++ } ++ ++ /* ssam request max IO size is PAYLOAD_SIZE_MAX, only use one iov to save data */ ++ task->iovs.virt.sges[0].iov_base = buffer; ++ task->iovs.phys.sges[0].iov_base = (void *)phys_addr; ++ ++ return 0; ++} ++ ++static void ++ssam_data_request_para(struct ssam_dma_request *dma_req, struct spdk_ssam_blk_task *task, ++ uint32_t type, uint8_t status) ++{ ++ struct ssam_io_message *io_cmd = NULL; ++ struct spdk_ssam_dma_cb dma_cb = { ++ .status = status, ++ .req_dir = type, ++ .gfunc_id = task->io_req->gfunc_id, ++ .vq_idx = task->vq_idx, ++ .task_idx = task->task_idx ++ }; ++ ++ io_cmd = &task->io_req->req.cmd; ++ dma_req->cb = (void *) * (uint64_t *)&dma_cb; ++ dma_req->gfunc_id = task->io_req->gfunc_id; ++ dma_req->flr_seq = task->io_req->flr_seq; ++ dma_req->direction = type; ++ dma_req->data_len = task->payload_size; ++ if (type == SSAM_REQUEST_DATA_STORE) { ++ dma_req->src = task->iovs.phys.sges; ++ dma_req->src_num = task->iovcnt; ++ dma_req->dst = &io_cmd->iovs[1]; ++ /* dma data iovs does not contain header and tail */ ++ dma_req->dst_num = io_cmd->iovcnt - IOV_HEADER_TAIL_NUM; ++ } else if (type == SSAM_REQUEST_DATA_LOAD) { ++ dma_req->src = &io_cmd->iovs[1]; ++ /* dma data iovs does not contain header and tail */ ++ dma_req->src_num = io_cmd->iovcnt - IOV_HEADER_TAIL_NUM; ++ dma_req->dst = task->iovs.phys.sges; ++ dma_req->dst_num = task->iovcnt; ++ } ++} ++ ++static void ++ssam_request_dma_process(struct spdk_ssam_session *smsession, struct spdk_ssam_blk_task *task) ++{ ++ struct virtio_blk_outhdr *req = NULL; ++ int ret; ++ ++ req = (struct virtio_blk_outhdr *)task->io_req->req.cmd.header; ++ SPDK_INFOLOG(ssam_blk_data, ++ "request dma request io tid=%u gfunc_id=%u reqtype=%d rw=%u vqid=%u reqid=%u.\n", ++ smsession->smdev->tid, smsession->gfunc_id, req->type, task->io_req->req.cmd.writable, ++ task->io_req->req.cmd.virtio.vq_idx, task->io_req->req.cmd.virtio.req_idx); ++ ++ switch (req->type) { ++ case VIRTIO_BLK_T_IN: ++ case VIRTIO_BLK_T_GET_ID: ++ case VIRTIO_BLK_T_FLUSH: ++ ret = ssam_process_blk_request(task); ++ if (ret < 0) { ++ SPDK_ERRLOG("====== Task: req_idx %u failed ======\n", task->req_idx); ++ } ++ break; ++ ++ case VIRTIO_BLK_T_OUT: ++ case VIRTIO_BLK_T_DISCARD: ++ case VIRTIO_BLK_T_WRITE_ZEROES: ++ /* dma request: Host -> ipu */ ++ ssam_res_dma_process(smsession, task, SSAM_REQUEST_DATA_LOAD, 0); ++ break; ++ ++ default: ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ SPDK_ERRLOG("Not supported request type '%"PRIu32"'.\n", req->type); ++ } ++} ++ ++struct ssam_blk_io_complete_arg { ++ struct spdk_ssam_dev *smdev; ++ struct ssam_request *io_req; ++}; ++ ++static void ++ssam_blk_io_complete_cb(void *arg) ++{ ++ struct ssam_blk_io_complete_arg *cb_arg = (struct ssam_blk_io_complete_arg *)arg; ++ int rc = ssam_blk_io_complete(cb_arg->smdev, cb_arg->io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_process_blk_task(struct spdk_ssam_session *smsession, struct ssam_request *io_req, ++ uint16_t vq_idx, uint16_t req_idx, uint32_t payload_size) ++{ ++ int rc; ++ struct spdk_ssam_blk_task *task = NULL; ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[vq_idx]; ++ ++ rc = ssam_alloc_task_pool(smsession, vq_idx); ++ if (rc != 0) { ++ rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ } ++ return; ++ } ++ ++ if (spdk_unlikely(vq->use_num >= vq->num)) { ++ SPDK_ERRLOG("Session:%s vq(%hu) task_cnt(%u) limit(%u).\n", smsession->name, vq_idx, vq->use_num, ++ vq->num); ++ rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ } ++ return; ++ } ++ ++ uint32_t index = vq->index[vq->index_r]; ++ task = &((struct spdk_ssam_blk_task *)vq->tasks)[index]; ++ if (spdk_unlikely(task->used)) { ++ SPDK_ERRLOG("%s: vq(%u) task with idx %u is already pending.\n", smsession->name, vq_idx, index); ++ rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ } ++ return; ++ } ++ ++ smsession->task_cnt++; ++ vq->index_r = (vq->index_r + 1) & 0xFF; ++ vq->use_num++; ++ ++ ssam_blk_task_init(task); ++ task->io_req = io_req; ++ task->vq_idx = vq_idx; ++ task->req_idx = req_idx; ++ task->payload_size = payload_size; ++ task->session_io_wait.cb_fn = ssam_session_io_resubmit; ++ task->session_io_wait.cb_arg = task; ++ ++ rc = ssam_task_iovs_memory_get(task); ++ if (rc != 0) { ++ ssam_session_insert_io_wait(smsession, &task->session_io_wait); ++ return; ++ } ++ ++ ssam_request_dma_process(smsession, task); ++ return; ++} ++ ++static void ++ssam_process_vq(struct spdk_ssam_session *smsession, struct ssam_request *io_req) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ uint16_t vq_idx = io_cmd->virtio.vq_idx; ++ uint16_t req_idx = io_cmd->virtio.req_idx; ++ uint32_t payload_size = 0; ++ int rc; ++ ++ if (vq_idx >= smsession->max_queues) { ++ SPDK_ERRLOG("vq_idx out of range, need less than %u, actually %u\n", ++ smsession->max_queues, vq_idx); ++ goto err; ++ } ++ ++ bsmsession->vq_blk_stat[vq_idx].start_count++; ++ ++ if (io_req->status != SSAM_IO_STATUS_OK) { ++ SPDK_WARNLOG("%s: ssam request status invalid, but still process, status=%d\n", ++ smsession->name, io_req->status); ++ goto err; ++ } ++ ++ rc = ssam_get_payload_size(io_req, &payload_size); ++ if (rc != 0) { ++ goto err; ++ } ++ ++ ssam_process_blk_task(smsession, io_req, vq_idx, req_idx, payload_size); ++ return; ++ ++err: ++ rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ } ++ return; ++} ++ ++static void ++ssam_no_bdev_put_io_channel(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ if (smsession->task_cnt == 0 && (bsmsession->io_channel != NULL)) { ++ spdk_put_io_channel(bsmsession->io_channel); ++ bsmsession->io_channel = NULL; ++ } ++} ++ ++struct ssam_no_bdev_process_vq_arg { ++ struct spdk_ssam_session *smsession; ++ struct ssam_request *io_req; ++}; ++ ++static void ++ssam_no_bdev_process_vq_cb(void *arg) ++{ ++ struct ssam_no_bdev_process_vq_arg *cb_arg = (struct ssam_no_bdev_process_vq_arg *)arg; ++ int rc = ssam_blk_io_complete(cb_arg->smsession->smdev, cb_arg->io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_no_bdev_process_vq_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_no_bdev_put_io_channel(cb_arg->smsession); ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_no_bdev_process_vq(struct spdk_ssam_session *smsession, struct ssam_request *io_req) ++{ ++ SPDK_ERRLOG("gfunc_id %u No bdev, aborting request, return EIO\n", io_req->gfunc_id); ++ int rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_no_bdev_process_vq_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_no_bdev_process_vq_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smsession = smsession; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_no_bdev_process_vq_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ SPDK_WARNLOG("Aborting request because no this controller\n"); ++ ++ ssam_no_bdev_put_io_channel(smsession); ++} ++ ++static void ++ssam_blk_response_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct ssam_dma_rsp *dma_rsp = (struct ssam_dma_rsp *)arg; ++ struct spdk_ssam_dma_cb *dma_cb = (struct spdk_ssam_dma_cb *)&dma_rsp->cb; ++ struct spdk_ssam_blk_task *task = NULL; ++ uint16_t vq_idx = dma_cb->vq_idx; ++ uint16_t task_idx = dma_cb->task_idx; ++ uint8_t req_dir = dma_cb->req_dir; ++ ++ if (vq_idx >= smsession->max_queues) { ++ smsession->smdev->discard_io_num++; ++ SPDK_ERRLOG("vq_idx out of range, need less than %u, actually %u\n", ++ smsession->max_queues, vq_idx); ++ return; ++ } ++ ++ if (smsession->virtqueue[vq_idx].tasks == NULL) { ++ smsession->smdev->discard_io_num++; ++ SPDK_ERRLOG("vq_idx invalid, vq %u has not received io\n", vq_idx); ++ return; ++ } ++ ++ task = &((struct spdk_ssam_blk_task *)smsession->virtqueue[vq_idx].tasks)[task_idx]; ++ if (dma_rsp->status != 0) { ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ SPDK_ERRLOG("dma data process failed!\n"); ++ return; ++ } ++ if (dma_rsp->last_flag == 0) { ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ SPDK_ERRLOG("last_flag should not equal 0!\n"); ++ return; ++ } ++ ssam_task_stat_tick(&task->task_stat.dma_end_tsc); ++ task->bsmsession->blk_stat.dma_complete_count++; ++ task->bsmsession->vq_blk_stat[task->vq_idx].dma_complete_count++; ++ if (req_dir == SSAM_REQUEST_DATA_LOAD) { ++ /* Write data ready, start a request to backend */ ++ ssam_process_blk_request(task); ++ } else if (req_dir == SSAM_REQUEST_DATA_STORE) { ++ /* Data have been read by user, complete the task */ ++ ssam_task_complete(task, dma_cb->status); ++ } ++} ++ ++static int ++ssam_blk_check_io_req(struct spdk_ssam_dev *smdev, struct ssam_request *io_req) ++{ ++ struct ssam_io_message *io_cmd = NULL; ++ uint16_t vq_idx; ++ uint16_t req_idx; ++ const struct virtio_blk_outhdr *req = NULL; ++ ++ if (io_req == NULL) { ++ SPDK_ERRLOG("%s: received a NULL IO message\n", smdev->name); ++ return -1; ++ } ++ ++ io_cmd = &io_req->req.cmd; ++ vq_idx = io_cmd->virtio.vq_idx; ++ req_idx = io_cmd->virtio.req_idx; ++ req = (struct virtio_blk_outhdr *)io_cmd->header; ++ ++ if (io_cmd->iovs == NULL) { ++ SPDK_ERRLOG("%s: received an empty IO, vq_idx:%u, req_idx:%u\n", ++ smdev->name, vq_idx, req_idx); ++ return -1; ++ } ++ ++ if (io_cmd->iovcnt < IOV_HEADER_TAIL_NUM) { ++ SPDK_ERRLOG("%s: iovcnt %u less than %d but expected not less than %d\n", ++ smdev->name, io_cmd->iovcnt, IOV_HEADER_TAIL_NUM, IOV_HEADER_TAIL_NUM); ++ return -1; ++ } ++ ++ if ((io_cmd->iovcnt == IOV_HEADER_TAIL_NUM) && (req->type != VIRTIO_BLK_T_FLUSH)) { ++ SPDK_ERRLOG("%s: received an IO not contain valid data, iovcnt:%u, vq_idx:%u, " ++ "req_idx:%u, req_type:%u, req_ioprio:%u, req_sector:%llu\n", ++ smdev->name, io_cmd->iovcnt, vq_idx, req_idx, req->type, req->ioprio, req->sector); ++ return -1; ++ } ++ ++ if (io_cmd->iovcnt > (SPDK_SSAM_IOVS_MAX + IOV_HEADER_TAIL_NUM)) { ++ SPDK_ERRLOG("%s: received too much IO, iovcnt:%u, vq_idx:%u, req_idx:%u\n", ++ smdev->name, io_cmd->iovcnt, vq_idx, req_idx); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_blk_request_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct spdk_ssam_dev *smdev = smsession->smdev; ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ struct ssam_request *io_req = (struct ssam_request *)arg; ++ int ret; ++ ++ smdev->io_num++; ++ bsmsession->blk_stat.start_count++; ++ ++ ret = ssam_blk_check_io_req(smdev, io_req); ++ if (ret < 0) { ++ smdev->discard_io_num++; ++ return; ++ } ++ ++ if (bsmsession->io_channel == NULL) { ++ bsmsession->io_channel = spdk_bdev_get_io_channel(bsmsession->bdev_desc); ++ if (bsmsession->io_channel == NULL) { ++ ssam_no_bdev_process_vq(smsession, io_req); ++ SPDK_ERRLOG("%s: I/O channel allocation failed\n", smsession->name); ++ return; ++ } ++ } ++ ++ if (bsmsession->no_bdev) { ++ ssam_no_bdev_process_vq(smsession, io_req); ++ } else { ++ ssam_process_vq(smsession, io_req); ++ } ++} ++ ++static void ++ssam_blk_no_data_request_worker(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ ++ bsmsession = ssam_to_blk_session(smsession); ++ if (bsmsession->no_bdev) { ++ ssam_no_bdev_put_io_channel(smsession); ++ } ++} ++ ++static void ++ssam_blk_destroy_bdev_device(struct spdk_ssam_session *smsession, void *args) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ spdk_bdev_close(bsmsession->bdev_desc); ++ ++ /* free taskpool */ ++ ssam_free_task_pool(bsmsession); ++ ++ /* free */ ++ free(bsmsession); ++} ++ ++static void ++ssam_request_resubmit(void *arg) ++{ ++ struct spdk_ssam_blk_task *task = (struct spdk_ssam_blk_task *)arg; ++ int rc; ++ ++ rc = ssam_process_blk_request(task); ++ if (rc == 0) { ++ SPDK_DEBUGLOG(ssam_blk_data, "====== Task: req_idx = %"PRIu16" resubmitted ======\n", ++ get_req_idx(task)); ++ } else { ++ SPDK_WARNLOG("====== Task: req_idx = %"PRIu16" failed ======\n", get_req_idx(task)); ++ } ++} ++ ++static inline void ++ssam_request_queue_io(struct spdk_ssam_blk_task *task) ++{ ++ int rc; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ ++ task->bdev_io_wait.bdev = bsmsession->bdev; ++ task->bdev_io_wait.cb_fn = ssam_request_resubmit; ++ task->bdev_io_wait.cb_arg = task; ++ ++ rc = spdk_bdev_queue_io_wait(bsmsession->bdev, bsmsession->io_channel, &task->bdev_io_wait); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: failed to queue I/O, rc=%d\n", bsmsession->smsession.name, rc); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ } ++} ++ ++static void ++ssam_session_io_resubmit(void *arg) ++{ ++ struct spdk_ssam_blk_task *task = (struct spdk_ssam_blk_task *)arg; ++ struct spdk_ssam_session *smsession = &task->bsmsession->smsession; ++ int rc; ++ ++ rc = ssam_task_iovs_memory_get(task); ++ if (rc != 0) { ++ ssam_session_insert_io_wait(smsession, &task->session_io_wait); ++ return; ++ } ++ ssam_request_dma_process(smsession, task); ++} ++ ++static void ++ssam_blk_construct_roll_back(uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ int rc; ++ ++ if (ssam_get_hash_mode() == SSAM_VQ_HASH_MODE) { ++ rc = ssam_virtio_vq_unbind_core(gfunc_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("virtio blk vq unbind core failed.\n"); ++ } ++ } ++ ++ if ((gfunc_id > SSAM_PF_MAX_NUM) || (ssam_get_en_hpd() == true)) { ++ rc = ssam_virtio_blk_release_resource(gfunc_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("virtio blk release vq failed.\n"); ++ } ++ } ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = smdev->smsessions[gfunc_id]; ++ if ((smsession != NULL) && (smsession->smdev != NULL)) { ++ ssam_to_blk_session(smsession)->stop_session_poller = SPDK_POLLER_REGISTER(ssam_destroy_session_cb, ++ smsession, 0); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ return; ++} ++ ++static void ++ssam_blk_start_post_cb(struct spdk_ssam_session *smsession, void **arg) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ int rc; ++ ++ smsession->started = true; ++ g_blk_set_times[smsession->gfunc_id]++; ++ if (g_blk_set_times[smsession->gfunc_id] == ssam_get_core_num()) { ++ if ((ssam_get_en_hpd() == true) && (g_hpd_to_async[smsession->gfunc_id] == false)) { ++ rc = ssam_hotplug_add(smsession->gfunc_id); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam hotplug add function(%d) config failed:%s\n", smsession->gfunc_id, spdk_strerror(-rc)); ++ ssam_blk_construct_roll_back(smsession->gfunc_id); ++ goto out; ++ } ++ } ++ ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, bsmsession->bdev->blockcnt); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed.\n", smsession->name); ++ } ++ ++ rc = ssam_mount_normal(smsession, 0); ++ if (rc != SSAM_MOUNT_OK) { ++ SPDK_WARNLOG("%s: mount ssam volume failed\n", smsession->name); ++ } ++ ++ /* Smdev poller is not created here, but is created in the initialization process. */ ++ SPDK_NOTICELOG("BLK controller %s created with bdev %s, queues %u\n", ++ smsession->name, spdk_bdev_get_name(bsmsession->bdev), smsession->max_queues); ++ } else { ++ smsession->rsp_fn = NULL; ++ smsession->rsp_ctx = NULL; ++ } ++ ++out: ++ if (smsession->rsp_fn != NULL) { ++ smsession->rsp_fn(smsession->rsp_ctx, rc); ++ smsession->rsp_fn = NULL; ++ } ++ ++ return; ++} ++ ++static int ++ssam_blk_start_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ if (bsmsession->bdev == NULL) { ++ SPDK_ERRLOG("%s: session not have a bdev.\n", smsession->name); ++ return -ENODEV; ++ } ++ ++ ssam_send_event_async_done(ctx); ++ ++ return 0; ++} ++ ++static int ++ssam_blk_start(struct spdk_ssam_session *smsession) ++{ ++ g_gfunc_session_number++; ++ ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = true, ++ .need_rsp = false, ++ }; ++ ++ return ssam_send_event_to_session(smsession, ssam_blk_start_cb, ssam_blk_start_post_cb, ++ send_event_flag, NULL); ++} ++ ++static void ++ssam_blk_destroy_session(struct ssam_blk_session_ctx *ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ctx->bsmsession; ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ ++ if (smsession->task_cnt > 0) { ++ return; ++ } ++ ++ /* If in ssam subsystem finish process, session registered flag will ++ * be set to false first, bdev will be removed in ssam_bdev_remove_cb() ++ * call back process, wating for the call back process finish first. ++ */ ++ if ((smsession->registered == false) && (bsmsession->bdev != NULL)) { ++ return; ++ } ++ ++ SPDK_NOTICELOG("%s: removing on lcore %d\n", ++ smsession->name, spdk_env_get_current_core()); ++ ++ ssam_session_destroy(smsession); ++ ++ if (bsmsession->io_channel != NULL) { ++ spdk_put_io_channel(bsmsession->io_channel); ++ bsmsession->io_channel = NULL; ++ } ++ ssam_free_task_pool(bsmsession); ++ ++ if (bsmsession->serial != NULL) { ++ free(bsmsession->serial); ++ } ++ spdk_poller_unregister(&bsmsession->stop_poller); ++ ++ ssam_session_stop_done(smsession, 0, ctx->user_ctx); ++ free(ctx); ++ ++ return; ++} ++ ++static int ++ssam_destroy_session_poller_cb(void *arg) ++{ ++ struct ssam_blk_session_ctx *ctx = arg; ++ ++ if (ssam_trylock() != 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ ssam_blk_destroy_session(ctx); ++ ++ ssam_unlock(); ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++ ++static void ++ssam_blk_del_recover(struct spdk_ssam_session *smsession, struct spdk_bdev *bdev_roback) ++{ ++ struct spdk_ssam_session_reg_info info = {0}; ++ const char *dev_name = NULL; ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ bool readonly = bsmsession->readonly; ++ char *serial = bsmsession->serial; ++ ++ info.gfunc_id = smsession->gfunc_id; ++ dev_name = bdev_roback->name; ++ info.tid = smsession->smdev->tid; ++ info.queues = smsession->max_queues; ++ info.rsp_ctx = NULL; ++ info.rsp_fn = NULL; ++ ++ g_hpd_to_async[smsession->gfunc_id] = true; ++ ssam_blk_roback(&info, dev_name, readonly, serial); ++ return; ++} ++ ++static int ++ssam_blk_stop_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ smsession->started = false; ++ ++ struct ssam_blk_session_ctx *_ctx = ++ (struct ssam_blk_session_ctx *)calloc(1, sizeof(struct ssam_blk_session_ctx)); ++ ++ if (_ctx == NULL) { ++ SPDK_ERRLOG("%s: calloc blk session ctx error.\n", smsession->name); ++ return -ENOMEM; ++ } ++ ++ _ctx->bsmsession = bsmsession; ++ _ctx->user_ctx = ctx; ++ ++ bsmsession->stop_poller = SPDK_POLLER_REGISTER(ssam_destroy_session_poller_cb, ++ _ctx, SESSION_STOP_POLLER_PERIOD); ++ if (bsmsession->stop_poller == NULL) { ++ SPDK_WARNLOG("%s: ssam_destroy_session_poller_cb start failed.\n", smsession->name); ++ ssam_session_stop_done(smsession, -EBUSY, ctx); ++ free(_ctx); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_blk_stop(struct spdk_ssam_session *smsession) ++{ ++ g_delete_flag++; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = true, ++ .need_rsp = false, ++ }; ++ ++ return ssam_send_event_to_session(smsession, ssam_blk_stop_cb, ssam_blk_stop_cpl_cb, ++ send_event_flag, NULL); ++} ++ ++static int ++ssam_blk_remove_session(struct spdk_ssam_session *smsession) ++{ ++ SPDK_NOTICELOG("session gfunc_id=%u removing\n", smsession->gfunc_id); ++ int ret = ssam_blk_stop(smsession); ++ if ((ret != 0) && (smsession->registered == true)) { ++ (void)ssam_remount_normal(smsession, 0); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++const char * ++ssam_get_bdev_name_by_gfunc_id(uint16_t gfunc_id) ++{ ++ struct spdk_ssam_session *smsession; ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ return NULL; ++ } ++ bsmsession = ssam_to_blk_session(smsession); ++ ++ return spdk_bdev_get_name(bsmsession->bdev); ++} ++ ++struct spdk_bdev * ++ssam_get_session_bdev(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ return bsmsession->bdev; ++} ++ ++static int ++ssam_destroy_session_cb(void *arg) ++{ ++ struct spdk_ssam_blk_session *bsmsession = (struct spdk_ssam_blk_session *)arg; ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ int rc = 0; ++ ++ if (smsession->task_cnt > 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ if (smsession->pending_async_op_num != 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ /* stop poller */ ++ spdk_poller_unregister(&bsmsession->stop_session_poller); ++ ++ ssam_session_unreg_response_cb(smsession); ++ rc = ssam_session_unregister(smsession, false); ++ if (rc != 0) { ++ SPDK_ERRLOG("function id %d: blk construct failed and session remove failed\n", ++ smsession->gfunc_id); ++ } ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static void ++ssam_blk_roback(struct spdk_ssam_session_reg_info *info, const char *dev_name, ++ bool readonly, char *serial) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_ssam_session *smsession_tmp = NULL; ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ uint32_t session_ctx_size = sizeof(struct spdk_ssam_blk_session) - ++ sizeof(struct spdk_ssam_session); ++ struct spdk_ssam_dev *smdev = NULL; ++ uint16_t tid; ++ int ret = 0; ++ ++ g_gfunc_session_number = 0; ++ g_blk_set_times[info->gfunc_id] = 0; ++ g_hpd_delete_session_times[info->gfunc_id] = 0; ++ g_hpd_del_flag[info->gfunc_id] = 0; ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession_tmp = smdev->smsessions[info->gfunc_id]; ++ if (smsession_tmp != NULL) { ++ smdev = ssam_dev_next(smdev); ++ continue; ++ } ++ tid = smdev->tid; ++ if (tid == SPDK_INVALID_TID) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ info->tid = tid; ++ info->backend = &g_ssam_blk_session_backend; ++ info->session_ctx_size = session_ctx_size; ++ snprintf(info->type_name, SPDK_SESSION_TYPE_MAX_LEN, "%s", SPDK_SESSION_TYPE_BLK); ++ ret = ssam_session_register(info, &smsession); ++ if (ret != 0) { ++ goto out; ++ } ++ ++ ssam_session_start_done(smsession, 0); ++ ++ bsmsession = ssam_to_blk_session(smsession); ++ ++ ret = spdk_bdev_open_ext(dev_name, true, ssam_bdev_event_cb, smsession, ++ &bsmsession->bdev_desc); ++ if (ret != 0) { ++ SPDK_ERRLOG("function id %d: could not open bdev, error:%s\n", info->gfunc_id, spdk_strerror(-ret)); ++ goto out; ++ } ++ bdev = spdk_bdev_desc_get_bdev(bsmsession->bdev_desc); ++ bsmsession->bdev = bdev; ++ bsmsession->readonly = readonly; ++ bsmsession->need_write_config = ((tid == ssam_get_core_num() - 1) ? true : false); ++ ++ if (serial == NULL) { ++ SPDK_INFOLOG(ssam_blk, "function id %d: not set volume id.\n", info->gfunc_id); ++ } else { ++ bsmsession->serial = calloc(SERIAL_STRING_LEN, sizeof(char)); ++ if (!bsmsession->serial) { ++ SPDK_ERRLOG("no memory for alloc.\n"); ++ goto out; ++ } ++ (void)snprintf(bsmsession->serial, SERIAL_STRING_LEN, "%s", serial); ++ } ++ ++ ret = ssam_blk_start(smsession); ++ if (ret != 0) { ++ SPDK_ERRLOG("%s: start failed\n", smsession->name); ++ goto out; ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ++ SPDK_INFOLOG(ssam_blk, "function id %d: using bdev '%s'\n", info->gfunc_id, dev_name); ++out: ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = smdev->smsessions[info->gfunc_id]; ++ if ((ret != 0) && (smsession != NULL) && (smsession->smdev != NULL)) { ++ ssam_to_blk_session(smsession)->stop_session_poller = SPDK_POLLER_REGISTER(ssam_destroy_session_cb, ++ smsession, 0); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ return; ++} ++ ++ ++int ++ssam_blk_construct(struct spdk_ssam_session_reg_info *info, const char *dev_name, ++ bool readonly, char *serial) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ uint32_t session_ctx_size = sizeof(struct spdk_ssam_blk_session) - ++ sizeof(struct spdk_ssam_session); ++ struct spdk_ssam_dev *smdev = NULL; ++ uint16_t tid; ++ int ret = 0; ++ ++ ssam_lock(); ++ g_gfunc_session_number = 0; ++ g_blk_set_times[info->gfunc_id] = 0; ++ g_hpd_delete_session_times[info->gfunc_id] = 0; ++ g_hpd_del_flag[info->gfunc_id] = 0; ++ g_hpd_to_async[info->gfunc_id] = false; ++ ++ for (int i = 0; i < ssam_get_core_num(); i++) { ++ tid = i; ++ if (tid == SPDK_INVALID_TID) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ info->tid = tid; ++ info->backend = &g_ssam_blk_session_backend; ++ info->session_ctx_size = session_ctx_size; ++ snprintf(info->type_name, SPDK_SESSION_TYPE_MAX_LEN, "%s", SPDK_SESSION_TYPE_BLK); ++ ret = ssam_session_register(info, &smsession); ++ if (ret != 0) { ++ goto out; ++ } ++ ++ ssam_session_start_done(smsession, 0); ++ ++ bsmsession = ssam_to_blk_session(smsession); ++ ++ ret = spdk_bdev_open_ext(dev_name, true, ssam_bdev_event_cb, smsession, ++ &bsmsession->bdev_desc); ++ if (ret != 0) { ++ SPDK_ERRLOG("function id %d: could not open bdev, error:%s\n", info->gfunc_id, spdk_strerror(-ret)); ++ goto out; ++ } ++ bdev = spdk_bdev_desc_get_bdev(bsmsession->bdev_desc); ++ bsmsession->bdev = bdev; ++ bsmsession->readonly = readonly; ++ bsmsession->hot_plug_poller_number = 0; ++ bsmsession->need_write_config = ((tid == ssam_get_core_num() - 1) ? true : false); ++ ++ if (serial == NULL) { ++ SPDK_INFOLOG(ssam_blk, "function id %d: not set volume id.\n", info->gfunc_id); ++ } else { ++ bsmsession->serial = calloc(SERIAL_STRING_LEN, sizeof(char)); ++ if (!bsmsession->serial) { ++ SPDK_ERRLOG("no memory for alloc.\n"); ++ goto out; ++ } ++ (void)snprintf(bsmsession->serial, SERIAL_STRING_LEN, "%s", serial); ++ } ++ ++ ret = ssam_blk_start(smsession); ++ if (ret != 0) { ++ SPDK_ERRLOG("%s: start failed\n", smsession->name); ++ goto out; ++ } ++ } ++ ++ SPDK_INFOLOG(ssam_blk, "function id %d: using bdev '%s'\n", info->gfunc_id, dev_name); ++out: ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = smdev->smsessions[info->gfunc_id]; ++ if ((ret != 0) && (smsession != NULL) && (smsession->smdev != NULL)) { ++ ssam_to_blk_session(smsession)->stop_session_poller = SPDK_POLLER_REGISTER(ssam_destroy_session_cb, ++ smsession, 0); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++ return ret; ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_blk) ++SPDK_LOG_REGISTER_COMPONENT(ssam_blk_data) +diff --git a/lib/ssam/ssam_config.c b/lib/ssam/ssam_config.c +new file mode 100644 +index 0000000..f4044da +--- /dev/null ++++ b/lib/ssam/ssam_config.c +@@ -0,0 +1,615 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include ++#include ++#include ++ ++#include "spdk/string.h" ++#include "spdk/file.h" ++#include "ssam_internal.h" ++ ++#define SSAM_JSON_DEFAULT_MEMPOOL_SIZE 1024 ++#define SSAM_JSON_MAX_MEMPOOL_SIZE 10240 ++#define HPD_CONFIG_POLLER_PERIOD (1000 * 1000) ++ ++enum ssam_dma_queue_num { ++ SSAM_DMA_QUEUE_NUM_DISABLE = 0, ++ SSAM_DMA_QUEUE_NUM_SMALL_IO = 1, ++ SSAM_DMA_QUEUE_NUM_DEFAULT = 2, ++ SSAM_DMA_QUEUE_NUM_LARGE_IO = 4, ++}; ++ ++struct ssam_user_config { ++ char *cfg_file_name; ++ uint32_t mempool_size; ++ uint32_t queues; ++ uint32_t dma_queue_num; ++ char *mode; ++ uint8_t hash_mode; ++}; ++ ++struct ssam_config { ++ struct ssam_user_config user_config; ++ struct ssam_hostep_info ep_info; ++ uint32_t core_num; ++ bool shm_created; ++ bool en_hpd; ++ struct spdk_poller *hpd_poller; ++}; ++ ++static struct ssam_config g_ssam_config; ++ ++static const struct spdk_json_object_decoder g_ssam_user_config_decoders[] = { ++ {"mempool_size_mb", offsetof(struct ssam_user_config, mempool_size), spdk_json_decode_uint32}, ++ {"queues", offsetof(struct ssam_user_config, queues), spdk_json_decode_uint32}, ++ {"mode", offsetof(struct ssam_user_config, mode), spdk_json_decode_string}, ++}; ++ ++static int ++ssam_heap_malloc(const char *type, size_t size, int socket_arg, ++ unsigned int flags, size_t align, size_t bound, bool contig, struct ssam_melem *mem) ++{ ++ void *addr = NULL; ++ unsigned long long pg_size; ++ int socket_id; ++ int rc; ++ uint64_t iova; ++ ++ addr = rte_malloc_socket(type, size, align, socket_arg); ++ if (addr == NULL) { ++ return -ENOMEM; ++ } ++ ++ rc = ssam_malloc_elem_from_addr(addr, &pg_size, &socket_id); ++ if (rc != 0) { ++ ssam_free_ex(addr); ++ return -ENOMEM; ++ } ++ ++ iova = rte_malloc_virt2iova(addr); ++ if (iova == RTE_BAD_IOVA) { ++ ssam_free_ex(addr); ++ return -ENOMEM; ++ } ++ ++ mem->addr = addr; ++ mem->iova = iova; ++ mem->page_sz = pg_size; ++ mem->socket_id = socket_id; ++ return 0; ++} ++ ++static int ++ssam_heap_free(void *addr) ++{ ++ return ssam_free_ex(addr); ++} ++ ++static uint8_t ++ssam_get_dma_queue_num_by_mode(void) ++{ ++ if (g_ssam_config.user_config.mode == NULL) { ++ return SSAM_DMA_QUEUE_NUM_DISABLE; ++ } ++ ++ if (!strcasecmp(g_ssam_config.user_config.mode, "default")) { ++ return SSAM_DMA_QUEUE_NUM_DEFAULT; ++ } else if (!strcasecmp(g_ssam_config.user_config.mode, "small-IO")) { ++ return SSAM_DMA_QUEUE_NUM_SMALL_IO; ++ } else if (!strcasecmp(g_ssam_config.user_config.mode, "large-IO")) { ++ return SSAM_DMA_QUEUE_NUM_LARGE_IO; ++ } ++ return SSAM_DMA_QUEUE_NUM_DISABLE; ++} ++ ++static void ++ssam_get_ssam_lib_init_config(struct ssam_lib_args *cfg) ++{ ++ uint32_t core_num = g_ssam_config.core_num; ++ ++ cfg->role = 1; ++ cfg->dma_queue_num = g_ssam_config.user_config.dma_queue_num; ++ cfg->ssam_heap_malloc = ssam_heap_malloc; ++ cfg->ssam_heap_free = ssam_heap_free; ++ cfg->hash_mode = g_ssam_config.user_config.hash_mode; ++ ++ /* The number of tid is 1 greater than the number of cores. */ ++ cfg->core_num = core_num; ++} ++ ++void ++spdk_ssam_set_shm_created(bool shm_created) ++{ ++ g_ssam_config.shm_created = shm_created; ++} ++ ++bool ++spdk_ssam_get_shm_created(void) ++{ ++ return g_ssam_config.shm_created; ++} ++ ++bool ++ssam_get_en_hpd(void) ++{ ++ return g_ssam_config.en_hpd; ++} ++ ++int ++ssam_set_core_num(uint32_t core_num) ++{ ++ if (core_num > SSAM_MAX_CORE_NUM) { ++ SPDK_ERRLOG("Invalid coremask, total cores need less or equal than %d, " ++ "actually %u, please check startup item.\n", ++ SSAM_MAX_CORE_NUM, core_num); ++ return -EINVAL; ++ } ++ if (g_ssam_config.user_config.dma_queue_num == SSAM_DMA_QUEUE_NUM_LARGE_IO ++ && core_num > SSAM_MAX_CORE_NUM_WITH_LARGE_IO) { ++ SPDK_ERRLOG("Invalid coremask, total cores need less or equal than %d, " ++ "actually %u, please check startup item.\n", ++ SSAM_MAX_CORE_NUM_WITH_LARGE_IO, core_num); ++ return -EINVAL; ++ } ++ g_ssam_config.core_num = core_num; ++ return 0; ++} ++ ++uint16_t ++ssam_get_core_num(void) ++{ ++ return (uint16_t)g_ssam_config.core_num; ++} ++ ++uint32_t ++ssam_get_mempool_size(void) ++{ ++ return g_ssam_config.user_config.mempool_size; ++} ++ ++uint16_t ++ssam_get_queues(void) ++{ ++ uint16_t cfg_queues = (uint16_t)g_ssam_config.user_config.queues; ++ ++ if (cfg_queues == 0) { ++ SPDK_INFOLOG(ssam_config, "Use default queues number: %u.\n", SPDK_SSAM_DEFAULT_VQUEUES); ++ return SPDK_SSAM_DEFAULT_VQUEUES; ++ } ++ return cfg_queues; ++} ++ ++uint8_t ++ssam_get_hash_mode(void) ++{ ++ return g_ssam_config.user_config.hash_mode; ++} ++ ++enum ssam_device_type ++ssam_get_virtio_type(uint16_t gfunc_id) { ++ uint16_t vf_start, vf_end; ++ struct ssam_pf_list *pf = g_ssam_config.ep_info.host_pf_list; ++ ++ for (uint32_t i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) ++ { ++ if (pf[i].pf_funcid == UINT16_MAX) { ++ continue; ++ } ++ if (gfunc_id == pf[i].pf_funcid) { ++ return pf[i].pf_type; ++ } ++ ++ vf_start = pf[i].vf_funcid_start; ++ if (((uint32_t)vf_start + (uint32_t)pf[i].vf_num) > UINT16_MAX) { ++ SPDK_ERRLOG("vf_start %u + vf_num %u out of range, need less or equal than %u.\n", ++ vf_start, pf[i].vf_num, UINT16_MAX); ++ continue; ++ } ++ vf_end = vf_start + pf[i].vf_num; ++ if ((gfunc_id >= vf_start) && (gfunc_id < vf_end)) { ++ return pf[i].pf_type; ++ } ++ } ++ ++ return SSAM_DEVICE_VIRTIO_MAX; ++} ++ ++static void ++ssam_get_virtio_blk_config(struct ssam_virtio_config *cfg) ++{ ++ struct virtio_blk_config *dev_cfg = (struct virtio_blk_config *)cfg->device_config; ++ ++ cfg->device_feature = SPDK_SSAM_VIRTIO_BLK_DEFAULT_FEATURE; ++ cfg->queue_num = g_ssam_config.user_config.queues; ++ cfg->config_len = sizeof(struct virtio_blk_config); ++ ++ memset(dev_cfg, 0, cfg->config_len); ++ dev_cfg->blk_size = 0x200; ++ dev_cfg->min_io_size = 0; ++ dev_cfg->capacity = 0; ++ dev_cfg->num_queues = cfg->queue_num; ++ dev_cfg->seg_max = 0x7d; ++ dev_cfg->size_max = 0x200000; ++ cfg->queue_size = VIRITO_DEFAULT_QUEUE_SIZE; ++ ++ return; ++} ++ ++static void ++ssam_get_virtio_scsi_config(struct ssam_virtio_config *cfg) ++{ ++ struct virtio_scsi_config *dev_cfg = (struct virtio_scsi_config *)cfg->device_config; ++ ++ cfg->device_feature = SPDK_SSAM_VIRTIO_SCSI_DEFAULT_FEATURE; ++ cfg->queue_num = g_ssam_config.user_config.queues; ++ cfg->config_len = sizeof(struct virtio_scsi_config); ++ ++ memset(dev_cfg, 0, sizeof(struct virtio_scsi_config)); ++ dev_cfg->num_queues = 0x04; ++ dev_cfg->seg_max = 0x6f; ++ dev_cfg->max_sectors = 0x1ff; ++ dev_cfg->cmd_per_lun = 0x80; ++ dev_cfg->event_info_size = 0; ++ dev_cfg->sense_size = 0x60; ++ dev_cfg->cdb_size = 0x20; ++ dev_cfg->max_channel = 0; ++ dev_cfg->max_target = SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; ++ dev_cfg->max_lun = 0xff; ++ cfg->queue_size = VIRITO_DEFAULT_QUEUE_SIZE; ++ ++ return; ++} ++ ++static int ++ssam_virtio_config_get(struct ssam_pf_list *pf, struct ssam_function_config *cfg) ++{ ++ int ret = 0; ++ ++ cfg->gfunc_id = pf->pf_funcid; ++ cfg->type = pf->pf_type; ++ switch (cfg->type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ ssam_get_virtio_blk_config(&cfg->virtio_config); ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ ssam_get_virtio_scsi_config(&cfg->virtio_config); ++ break; ++ default: { ++ SPDK_ERRLOG("function config init fail (%d|%d)\n", cfg->gfunc_id, cfg->type); ++ ret = -EINVAL; ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++static int ++ssam_setup_pf(struct ssam_pf_list *pf, struct ssam_function_config *cfg) ++{ ++ int rc; ++ ++ rc = ssam_setup_function(pf->pf_funcid, pf->vf_num, pf->pf_type); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam init function(%u) failed:%s\n", pf->pf_funcid, spdk_strerror(-rc)); ++ return rc; ++ } ++ ++ if (g_ssam_config.en_hpd == false) { ++ rc = ssam_write_function_config(cfg); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam write function(%d) config failed:%s\n", cfg->gfunc_id, spdk_strerror(-rc)); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_hotplug_cfg_poller(void *ctx) ++{ ++ ssam_hotplug_cfg(); ++ return SPDK_POLLER_BUSY; ++} ++ ++static int ++ssam_virtio_config_init(struct ssam_hostep_info *ep_info) ++{ ++ int rc = 0; ++ uint32_t i; ++ struct ssam_function_config cfg = {0}; ++ struct ssam_pf_list *pf = ep_info->host_pf_list; ++ ++ g_ssam_config.en_hpd = ssam_hotplug_enable_check(); ++ if (g_ssam_config.en_hpd) { ++ g_ssam_config.hpd_poller = SPDK_POLLER_REGISTER(ssam_hotplug_cfg_poller, NULL, HPD_CONFIG_POLLER_PERIOD); ++ } ++ ++ if (spdk_ssam_get_shm_created()) { ++ /* If server is crashed from last time, no need setup config this time */ ++ return 0; ++ } ++ ++ /** ++ * During chip initialization, the vq and msix resources are initialized. ++ * However, the ssam configuration may be different from the initialization configuration. ++ * In the scene of virtio-blk, resources will be alloced at the function `ssam_blk_controller_set_vqueue`. ++ * Therefore, the original resources need to be released before negotiation with the host end. ++ */ ++ for (i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ if (pf[i].pf_funcid == UINT16_MAX || pf[i].pf_type != SSAM_DEVICE_VIRTIO_BLK) { ++ continue; ++ } ++ rc = ssam_virtio_blk_release_resource(i); ++ if (rc != 0) { ++ SPDK_WARNLOG("virtio blk release vq failed.\n"); ++ } ++ } ++ ++ for (i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ if (pf[i].pf_funcid == UINT16_MAX) { ++ continue; ++ } ++ rc = ssam_virtio_config_get(&pf[i], &cfg); ++ if (rc != 0) { ++ return rc; ++ } ++ rc = ssam_setup_pf(&pf[i], &cfg); ++ if (rc != 0) { ++ return rc; ++ } ++ } ++ ++ return rc; ++} ++ ++static int ++ssam_virtio_init(void) ++{ ++ struct ssam_lib_args ssam_args = { 0 }; ++ struct ssam_hostep_info *ep_info = &g_ssam_config.ep_info; ++ int rc; ++ ++ ssam_get_ssam_lib_init_config(&ssam_args); ++ ++ rc = ssam_lib_init(&ssam_args, ep_info); ++ if (rc != 0) { ++ SPDK_ERRLOG("Failed to init ssam:%s\n", spdk_strerror(-rc)); ++ return rc; ++ } ++ ++ rc = ssam_virtio_config_init(ep_info); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam virtio device init failed:%s\n", spdk_strerror(-rc)); ++ if (ssam_lib_exit() != 0) { ++ SPDK_WARNLOG("ssam lib exit failed\n"); ++ } ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_user_config_default(void) ++{ ++ struct ssam_user_config *user_config = &g_ssam_config.user_config; ++ ++ user_config->mempool_size = SSAM_JSON_DEFAULT_MEMPOOL_SIZE; ++ /** ++ * If file param json file is not exist, queue number will be ++ * set default value SPDK_SSAM_DEFAULT_VQUEUES when user create controller. ++ */ ++ user_config->queues = SPDK_SSAM_DEFAULT_VQUEUES; ++ user_config->dma_queue_num = SSAM_DMA_QUEUE_NUM_DEFAULT; ++ user_config->mode = NULL; ++ user_config->hash_mode = SSAM_VQ_HASH_MODE; ++ ++ return -ENOENT; ++} ++ ++static int ++ssam_user_config_file_read(const char *config_file, size_t *file_len, ++ void **json, ssize_t *value_size) ++{ ++ FILE *read_json = fopen(config_file, "r"); ++ ssize_t ret; ++ void *end = NULL; ++ ++ if (read_json == NULL) { ++ if (errno != ENOENT) { ++ SPDK_ERRLOG("Read JSON configuration file \"%s\" failed\n", config_file); ++ return -1; ++ } ++ SPDK_WARNLOG("JSON config file:%s does not exist! Use default configuration.\n", ++ config_file); ++ return ssam_user_config_default(); ++ } ++ ++ void *load = spdk_posix_file_load(read_json, file_len); ++ fclose(read_json); ++ if (load == NULL) { ++ return -1; ++ } ++ ++ ret = spdk_json_parse(load, *file_len, NULL, 0, &end, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS); ++ if (ret < 0) { ++ SPDK_ERRLOG("Parsing JSON configuration file \"%s\" failed (%zd)\n", config_file, ret); ++ free(load); ++ load = NULL; ++ if (ret == -ENOENT) { /* json file exists, but content is null */ ++ SPDK_ERRLOG("json file exists, but content is null\n"); ++ ret = -1; ++ } ++ return ret; ++ } ++ *json = load; ++ *value_size = ret; ++ ++ return 0; ++} ++ ++static void ++ssam_user_config_free(struct ssam_user_config *user_config) ++{ ++ if (user_config->mode != NULL) { ++ free(user_config->mode); ++ user_config->mode = NULL; ++ } ++} ++ ++static int ++ssam_user_config_parse(size_t file_len, void *json, ssize_t value_size) ++{ ++ struct spdk_json_val *value; ++ struct ssam_user_config *user_config = &g_ssam_config.user_config; ++ ssize_t ret; ++ void *end = NULL; ++ int rc; ++ ++ value = calloc(value_size, sizeof(struct spdk_json_val)); ++ if (value == NULL) { ++ SPDK_ERRLOG("Out of memory\n"); ++ free(json); ++ return -ENOMEM; ++ } ++ ++ ret = spdk_json_parse(json, file_len, value, value_size, &end, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS); ++ if (ret != value_size) { ++ SPDK_ERRLOG("Parsing JSON configuration file failed\n"); ++ free(json); ++ free(value); ++ return -1; ++ } ++ ++ /* resolve json values to struct spdk_ssam_json_config */ ++ ++ rc = spdk_json_decode_object(value, g_ssam_user_config_decoders, ++ SPDK_COUNTOF(g_ssam_user_config_decoders), user_config); ++ free(json); ++ free(value); ++ if (rc != 0) { ++ SPDK_ERRLOG("decode object failed:%s\n", spdk_strerror(-rc)); ++ ssam_user_config_free(user_config); ++ return -1; ++ } ++ user_config->hash_mode = SSAM_VQ_HASH_MODE; ++ ++ return 0; ++} ++ ++static int ++ssam_user_config_check(void) ++{ ++ struct ssam_user_config *user_config = &g_ssam_config.user_config; ++ ++ if (user_config->mempool_size < SSAM_JSON_DEFAULT_MEMPOOL_SIZE) { ++ SPDK_ERRLOG("mempool_size_mb value in file %s out of range, need larger or equal than %u MB, actually %u MB.\n", ++ user_config->cfg_file_name, SSAM_JSON_DEFAULT_MEMPOOL_SIZE, user_config->mempool_size); ++ return -EINVAL; ++ } ++ ++ if (user_config->mempool_size > SSAM_JSON_MAX_MEMPOOL_SIZE) { ++ SPDK_ERRLOG("mempool_size_mb value in file %s out of range, need less or equal than %u MB, actually %u MB.\n", ++ user_config->cfg_file_name, SSAM_JSON_MAX_MEMPOOL_SIZE, user_config->mempool_size); ++ return -EINVAL; ++ } ++ ++ if (user_config->queues > SPDK_SSAM_MAX_VQUEUES) { ++ SPDK_ERRLOG("queues value in file %s out of range, need less or equal than %u, actually %u\n", ++ user_config->cfg_file_name, SPDK_SSAM_MAX_VQUEUES, user_config->queues); ++ return -EINVAL; ++ } ++ ++ if (user_config->queues == 0) { ++ SPDK_ERRLOG("queues value in file %s out of range, need not equal to 0\n", ++ user_config->cfg_file_name); ++ return -EINVAL; ++ } ++ ++ user_config->dma_queue_num = ssam_get_dma_queue_num_by_mode(); ++ if (user_config->dma_queue_num == SSAM_DMA_QUEUE_NUM_DISABLE) { ++ SPDK_ERRLOG("Invalid mode in file %s, which should be chosen from default, small-IO, large-IO, " ++ "actually %s\n", ++ user_config->mode, ssam_rc_get_param_json_file_path()); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++int ++spdk_ssam_user_config_init(void) ++{ ++ size_t file_len = 0; ++ void *json = NULL; ++ ssize_t value_size = 0; ++ int rc; ++ struct ssam_user_config *user_config = &g_ssam_config.user_config; ++ ++ user_config->cfg_file_name = ssam_rc_get_param_json_file_path(); ++ rc = ssam_user_config_file_read(user_config->cfg_file_name, &file_len, &json, &value_size); ++ if (rc != 0) { ++ if (rc == -ENOENT) { ++ return 0; ++ } ++ return rc; ++ } ++ ++ rc = ssam_user_config_parse(file_len, json, value_size); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ rc = ssam_user_config_check(); ++ if (rc != 0) { ++ ssam_user_config_free(&g_ssam_config.user_config); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_virtio_exit(void) ++{ ++ int rc; ++ ++ rc = ssam_lib_exit(); ++ if (rc != 0) { ++ SPDK_WARNLOG("ssam lib exit failed\n"); ++ } ++} ++ ++void ++ssam_unregister_hpd_poller(void) ++{ ++ if (g_ssam_config.hpd_poller != NULL) { ++ spdk_poller_unregister(&g_ssam_config.hpd_poller); ++ g_ssam_config.hpd_poller = NULL; ++ } ++} ++ ++int ++ssam_config_init(void) ++{ ++ int rc; ++ ++ rc = ssam_virtio_init(); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return 0; ++} ++ ++void ++ssam_config_exit(void) ++{ ++ ssam_virtio_exit(); ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_config) +diff --git a/lib/ssam/ssam_config.h b/lib/ssam/ssam_config.h +new file mode 100644 +index 0000000..02bebaa +--- /dev/null ++++ b/lib/ssam/ssam_config.h +@@ -0,0 +1,29 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#ifndef SSAM_CONFIG_H ++#define SSAM_CONFIG_H ++ ++int ssam_set_core_num(uint32_t core_num); ++ ++uint16_t ssam_get_core_num(void); ++ ++uint32_t ssam_get_mempool_size(void); ++ ++uint16_t ssam_get_queues(void); ++ ++uint8_t ssam_get_hash_mode(void); ++ ++bool ssam_get_en_hpd(void); ++ ++enum ssam_device_type ssam_get_virtio_type(uint16_t gfunc_id); ++ ++void ssam_unregister_hpd_poller(void); ++ ++int ssam_config_init(void); ++ ++void ssam_config_exit(void); ++ ++#endif /* SSAM_CONFIG_H */ +diff --git a/lib/ssam/ssam_device_pcie.c b/lib/ssam/ssam_device_pcie.c +new file mode 100644 +index 0000000..3b34934 +--- /dev/null ++++ b/lib/ssam/ssam_device_pcie.c +@@ -0,0 +1,223 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "spdk/string.h" ++#include "spdk/file.h" ++#include "ssam_internal.h" ++ ++#define SSAM_KEY_MAX_LEN 16 ++#define SSAM_TYPE_MAX_LEN 12 ++#define SSAM_DBDF_MAX_LEN 16 ++ ++struct ssam_device_pcie_info { ++ uint32_t func_id; ++ char type[SSAM_TYPE_MAX_LEN]; ++ char dbdf[SSAM_DBDF_MAX_LEN]; ++}; ++ ++struct ssam_device_pcie_list { ++ uint32_t size; ++ struct ssam_device_pcie_info *device_pcie_list; ++}; ++ ++static struct ssam_device_pcie_list g_ssam_device_pcie_list = { ++ .size = 0, ++ .device_pcie_list = NULL, ++}; ++ ++void ++ssam_deinit_device_pcie_list(void) ++{ ++ if (g_ssam_device_pcie_list.device_pcie_list != NULL) { ++ free(g_ssam_device_pcie_list.device_pcie_list); ++ g_ssam_device_pcie_list.device_pcie_list = NULL; ++ } ++} ++ ++static int ++ssam_alloc_device_pcie_list(struct spdk_json_val *values, size_t num_values) ++{ ++ size_t i; ++ uint32_t size = 0; ++ ++ for (i = 0; i < num_values; i++) { ++ if (values[i].type == SPDK_JSON_VAL_OBJECT_END) { ++ size++; ++ } ++ } ++ ++ if (g_ssam_device_pcie_list.device_pcie_list == NULL) { ++ g_ssam_device_pcie_list.size = size; ++ g_ssam_device_pcie_list.device_pcie_list = calloc(size, sizeof(struct ssam_device_pcie_info)); ++ if (g_ssam_device_pcie_list.device_pcie_list == NULL) { ++ SPDK_ERRLOG("Unable to allocate enough memory for device_pcie_list\n"); ++ return -ENOMEM; ++ } ++ } ++ return 0; ++} ++ ++static void ++ssam_set_device_pcie_index(struct spdk_json_val *value, uint32_t cur_index) ++{ ++ char val[16]; ++ uint32_t gfunc_id; ++ if (value->type != SPDK_JSON_VAL_NUMBER || value->len > 5) { ++ SPDK_ERRLOG("device pcie gfunc id is invalid, type: %u, len: %u\n", value->type, value->len); ++ return; ++ } ++ ++ memset(val, 0, 16); ++ memcpy(val, value->start, value->len); ++ gfunc_id = spdk_strtol(val, 10); ++ if (gfunc_id >= SPDK_INVALID_GFUNC_ID) { ++ SPDK_ERRLOG("device pcie gfunc id(%u) is more than %u\n", gfunc_id, SPDK_INVALID_GFUNC_ID); ++ return; ++ } ++ g_ssam_device_pcie_list.device_pcie_list[cur_index].func_id = gfunc_id; ++} ++ ++static void ++ssam_set_device_pcie_dbdf(struct spdk_json_val *value, uint32_t cur_index) ++{ ++ if (value->type != SPDK_JSON_VAL_STRING || value->len >= SSAM_DBDF_MAX_LEN) { ++ SPDK_ERRLOG("device pcie dbdf is invalid, type: %u, len: %u\n", value->type, value->len); ++ return; ++ } ++ ++ memset(g_ssam_device_pcie_list.device_pcie_list[cur_index].dbdf, 0, SSAM_DBDF_MAX_LEN); ++ memcpy(g_ssam_device_pcie_list.device_pcie_list[cur_index].dbdf, value->start, value->len); ++} ++ ++static void ++ssam_set_device_pcie_type(struct spdk_json_val *value, uint32_t cur_index) ++{ ++ if (value->type != SPDK_JSON_VAL_STRING || value->len >= SSAM_TYPE_MAX_LEN) { ++ SPDK_ERRLOG("device pcie type is invalid, type: %u, len: %u\n", value->type, value->len); ++ return; ++ } ++ ++ memset(g_ssam_device_pcie_list.device_pcie_list[cur_index].type, 0, SSAM_TYPE_MAX_LEN); ++ memcpy(g_ssam_device_pcie_list.device_pcie_list[cur_index].type, value->start, value->len); ++} ++ ++static void ++ssam_init_device_pcie_list_by_values(struct spdk_json_val *values, size_t num_values) ++{ ++ char key[SSAM_KEY_MAX_LEN]; ++ uint32_t cur_index = 0; ++ size_t i; ++ ++ for (i = 0; i < num_values; i++) { ++ if (values[i].type == SPDK_JSON_VAL_OBJECT_END) { ++ cur_index++; ++ } ++ if (values[i].type != SPDK_JSON_VAL_NAME || values[i].len >= SSAM_KEY_MAX_LEN) { ++ continue; ++ } ++ ++ memset(key, 0, SSAM_KEY_MAX_LEN); ++ memcpy(key, values[i].start, values[i].len); ++ ++ /* point to val */ ++ i++; ++ ++ if (strcmp(key, "index") == 0) { ++ ssam_set_device_pcie_index(&values[i], cur_index); ++ } else if (strcmp(key, "dbdf") == 0) { ++ ssam_set_device_pcie_dbdf(&values[i], cur_index); ++ } else if (strcmp(key, "type") == 0) { ++ ssam_set_device_pcie_type(&values[i], cur_index); ++ } ++ } ++} ++ ++int ++ssam_init_device_pcie_list(void) ++{ ++ FILE *fp = NULL; ++ void *buf = NULL; ++ ssize_t rc = 0; ++ size_t size; ++ size_t num_values; ++ struct spdk_json_val *values = NULL; ++ ++ fp = popen("dpak-smi info -t device_pcie_list -f storage", "r"); ++ if (fp == NULL) { ++ SPDK_ERRLOG("execute dpak-smi failed\n"); ++ return -EINVAL; ++ } ++ ++ buf = spdk_posix_file_load(fp, &size); ++ if (buf == NULL) { ++ SPDK_ERRLOG("get size of json failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_parse(buf, size, NULL, 0, NULL, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS); ++ if (rc < 0) { ++ SPDK_ERRLOG("dpak-smi error: %s\n", (char *)buf); ++ goto invalid; ++ } ++ num_values = (size_t)rc; ++ values = calloc(num_values, sizeof(*values)); ++ if (values == NULL) { ++ SPDK_ERRLOG("Unable to allocate enough memory for values\n"); ++ rc = -ENOMEM; ++ goto invalid; ++ } ++ ++ rc = spdk_json_parse(buf, size, values, num_values, NULL, ++ SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS | SPDK_JSON_PARSE_FLAG_DECODE_IN_PLACE); ++ if (rc <= 0) { ++ SPDK_ERRLOG("parse json to values failed\n"); ++ goto invalid; ++ } ++ ++ rc = ssam_alloc_device_pcie_list(values, num_values); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_init_device_pcie_list_by_values(values, num_values); ++ rc = 0; ++ ++invalid: ++ if (values != NULL) { ++ free(values); ++ values = NULL; ++ } ++ if (buf != NULL) { ++ free(buf); ++ buf = NULL; ++ } ++ if (fp != NULL) { ++ pclose(fp); ++ fp = NULL; ++ } ++ return rc; ++} ++ ++void ++ssam_dump_device_pcie_list(struct spdk_json_write_ctx *w) ++{ ++ uint32_t i; ++ spdk_json_write_named_array_begin(w, "device_pcie_list"); ++ for (i = 0; i < g_ssam_device_pcie_list.size; i++) { ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_uint32(w, "index", g_ssam_device_pcie_list.device_pcie_list[i].func_id); ++ spdk_json_write_named_string(w, "dbdf", g_ssam_device_pcie_list.device_pcie_list[i].dbdf); ++ spdk_json_write_named_string(w, "type", g_ssam_device_pcie_list.device_pcie_list[i].type); ++ spdk_json_write_object_end(w); ++ } ++ spdk_json_write_array_end(w); ++} ++ ++uint32_t ++ssam_get_device_pcie_list_size(void) ++{ ++ return g_ssam_device_pcie_list.size; ++} +diff --git a/lib/ssam/ssam_driver/dpak_ssam.h b/lib/ssam/ssam_driver/dpak_ssam.h +new file mode 100644 +index 0000000..928f22e +--- /dev/null ++++ b/lib/ssam/ssam_driver/dpak_ssam.h +@@ -0,0 +1,614 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#ifndef DPAK_SSAM_H ++#define DPAK_SSAM_H ++ ++#include "spdk/stdinc.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#define SSAM_HOSTEP_NUM_MAX 32 ++#define SSAM_MAX_REQ_POLL_SIZE 16 ++#define SSAM_MAX_RESP_POLL_SIZE 10 ++#define SSAM_VIRTIO_HEAD_LEN 64 ++#define SSAM_DEV_CFG_MAX_LEN 60 ++#define SSAM_DBDF_STR_MAX_LEN 13 ++#define SSAM_MB (uint64_t)(1 << 20) ++#define SSAM_SERVER_NAME "ssam" ++ ++enum ssam_device_type { ++ SSAM_DEVICE_NVME = 0, /* NVMe device */ ++ SSAM_DEVICE_VIRTIO_BLK = 2, /* virtio-blk device */ ++ SSAM_DEVICE_VIRTIO_SCSI = 3, /* virtio-scsi device */ ++ SSAM_DEVICE_VIRTIO_FS = 5, /* virtio-fs device */ ++ SSAM_DEVICE_VIRTIO_MAX = 6 /* virtio device type upper boundary */ ++}; ++ ++enum ssam_mount_type { ++ SSAM_MOUNT_DUMMY = 0, /* mount virtio to dummy function */ ++ SSAM_MOUNT_NORMAL /* mount virtio to normal function */ ++}; ++ ++enum ssam_function_mount_status { ++ SSAM_MOUNT_OK, /* mount ok */ ++ SSAM_MOUNT_VOLUME_NOT_FOUND, /* mount volume not found */ ++ SSAM_MOUNT_PARAMETERS_ERROR, /* mount parameter error */ ++ SSAM_MOUNT_UNKNOWN_ERROR /* unknow error */ ++}; ++ ++enum ssam_io_type { ++ SSAM_VIRTIO_BLK_IO = 2, /* virtio-blk IO */ ++ SSAM_VIRTIO_SCSI_IO, /* virtio-scsi normal IO */ ++ SSAM_VIRTIO_SCSI_CTRL, /* virtio-scsi control IO */ ++ SSAM_VIRTIO_SCSI_EVT, /* virtio-scsi event IO */ ++ SSAM_VIRTIO_VSOCK_IO, /* virtio-vsock IO */ ++ SSAM_VIRTIO_VSOCK_EVT, /* virtio-vsock event */ ++ SSAM_VIRTIO_FUNC_STATUS, /* virtio function status change */ ++ SSAM_VIRTIO_FS_IO, /* virtio-fs normal IO */ ++ SSAM_VIRTIO_FS_HIPRI, /* virtio-fs high priority IO */ ++ SSAM_VIRTIO_TYPE_RSVD, /* virtio type rsvd */ ++}; ++ ++enum ssam_io_status { ++ SSAM_IO_STATUS_OK, /* ok */ ++ SSAM_IO_STATUS_EMPTY, /* poll return empty */ ++ SSAM_IO_STATUS_ERROR /* error */ ++}; ++ ++enum ssam_function_action { ++ SSAM_FUNCTION_ACTION_START, /* start */ ++ SSAM_FUNCTION_ACTION_STOP, /* stop */ ++ SSAM_FUNCTION_ACTION_RESET, /* reset */ ++ SSAM_FUNCTION_ACTION_CONFIG_CHANGE, /* config change report */ ++ SSAM_FUNCTION_ACTION_SCSI_EVENT, /* SCSI event report */ ++ SSAM_FUNCTION_ACTION_MAX ++}; ++ ++enum ssam_function_status { ++ SSAM_FUNCTION_STATUS_START, /* start */ ++ SSAM_FUNCTION_STATUS_STOP, /* stop */ ++ SSAM_FUNCTION_EVENT_MIGRATE /* migrate */ ++}; ++ ++enum data_request_dma_type { ++ SSAM_REQUEST_DATA_LOAD = 0, /* load data from host->CPU DDR */ ++ SSAM_REQUEST_DATA_STORE = 1, /* store data frome CPU DDR->host */ ++ SSAM_REQUEST_DATA_MAX ++}; ++ ++struct ssam_melem { ++ void *addr; /* virtual address */ ++ uint64_t iova; /* IO address */ ++ uint64_t page_sz; /* page size of underlying memory */ ++ int socket_id; /* NUMA socket ID */ ++ int rsvd; ++}; ++ ++enum ssam_blk_hash_mode { ++ SSAM_PF_HASH_MODE = 0, ++ SSAM_VQ_HASH_MODE, ++ SSAM_IO_HASH_MODE, ++}; ++ ++struct ssam_lib_args { ++ uint8_t role; /* reserved */ ++ uint8_t core_num; /* core num that polled by SPDK thread */ ++ uint8_t dma_queue_num; /* host dma queue num per channel */ ++ uint8_t hash_mode; /* hash mode: BLK:0-1bits SCSI:2-3bits FS:4-5bits NVMe:6-7bits */ ++ uint8_t rsvd[32]; /* for rsvd */ ++ /* register DPDK function rte_malloc_heap_alloc */ ++ int (*ssam_heap_malloc)(const char *type, size_t size, ++ int socket_arg, unsigned int flags, size_t align, ++ size_t bound, bool contig, struct ssam_melem *mem); ++ int (*ssam_heap_free)(void *addr); /* register DPDK function rte_malloc_heap_free */ ++}; ++ ++struct ssam_pf_list { ++ uint16_t pf_funcid; /* pf_funcid = -1 means invalid */ ++ uint16_t pf_type; /* refer to enum ssam_device_type */ ++ uint16_t vf_funcid_start; /* the start function id of vf */ ++ uint16_t vf_num; /* the number of vf that have been configured */ ++ uint16_t vf_max; /* the max number of vf that can be configured */ ++}; ++ ++/* the host side all pf/vf end point info */ ++struct ssam_hostep_info { ++ struct ssam_pf_list host_pf_list[SSAM_HOSTEP_NUM_MAX]; ++}; ++ ++struct ssam_virtio_config { ++ uint64_t device_feature; /* the virtio device feature */ ++ uint16_t queue_num; /* the queue number of virtio device */ ++ uint16_t config_len; /* the actual length of device_config */ ++ uint8_t device_config[SSAM_DEV_CFG_MAX_LEN]; /* the virtio device configure */ ++ uint16_t queue_size; ++ uint16_t rx_queue_id; ++}; ++ ++/* ssam function config */ ++struct ssam_function_config { ++ int gfunc_id; /* pf or vf funcion id */ ++ enum ssam_device_type type; /* pf or vf type */ ++ struct ssam_virtio_config virtio_config; /* pf or vf configure */ ++}; ++ ++struct ssam_virt_request { ++ uint16_t vq_idx; ++ uint16_t req_idx; ++}; ++ ++struct ssam_nvme_request { ++ void *data; ++}; ++ ++struct ssam_io_message { ++ uint32_t header_len; /* io header length */ ++ uint8_t header[SSAM_VIRTIO_HEAD_LEN]; /* refer to struct virtio_blk_outhdr */ ++ uint32_t iovcnt; /* io vector count */ ++ struct iovec *iovs; /* io vectors, max 1MB IO */ ++ uint8_t writable; /* 0 : write io, 1 : read io */ ++ uint8_t rsvd[3]; /* for byte alignment */ ++ union { ++ struct ssam_virt_request virtio; ++ struct ssam_nvme_request nvme; ++ }; ++}; ++ ++/** ++ * @brief function event structure ++ */ ++struct ssam_func_event { ++ enum ssam_function_status status; /* function status */ ++ uint32_t data; /* virtio version: 0--v0.95 1--v1.0 2--v1.1 */ ++}; ++ ++struct ssam_request { ++ uint16_t gfunc_id; /* function id vf id number */ ++ uint16_t rsvd; ++ uint32_t iocb_id; /* response need */ ++ enum ssam_io_type type; ++ union { ++ struct ssam_io_message cmd; /* VMIO command structure */ ++ struct ssam_func_event event; /* report function event */ ++ } req; ++ enum ssam_io_status status; /* request status */ ++ uint32_t flr_seq; /* response need */ ++}; ++ ++struct ssam_request_poll_opt { ++ struct iovec ++ *sge1_iov; /**< output for req->req.cmd.iovs[1] (per VMIO req). Actual data length set in iov_len */ ++ uint16_t queue_id; /**< (optional) poll a queue id instead of using 'tid' parameter to calculate the queue */ ++ uint8_t rsvd[54]; ++}; ++ ++struct ssam_virtio_res { ++ struct iovec *iovs; /* rsp io vectors */ ++ void *rsp; /* data of rsp */ ++ uint32_t rsp_len; /* length of rsp */ ++ uint32_t iovcnt; /* rsp vector count */ ++}; ++ ++struct ssam_io_response { ++ uint16_t gfunc_id; /* global function id in chip */ ++ uint16_t rsvd; ++ uint32_t iocb_id; /* copy from struct ssam_request */ ++ struct ssam_virtio_res data; ++ struct ssam_request *req; /* corresponding to struct vmio_request */ ++ enum ssam_io_status status; /* IO status, copy from struct ssam_request */ ++ uint32_t flr_seq; /* copy from struct ssam_request */ ++}; ++ ++struct ssam_dma_request { ++ uint16_t gfunc_id; ++ uint16_t direction; ++ uint32_t flr_seq; ++ uint32_t src_num; /* source sge number */ ++ uint32_t dst_num; /* dest sge number */ ++ struct iovec *src; /* source buffer address, gpa mode */ ++ struct iovec *dst; /* dest buffer address, va mode */ ++ uint32_t data_len; ++ void *cb; ++}; ++ ++struct ssam_dma_rsp { ++ void *cb; ++ uint32_t status; /* process status, 0--OK, 1--ERR */ ++ uint32_t last_flag; /* data copy finish until receive this last flag */ ++}; ++ ++struct memory_info_stats { ++ size_t total_size; /* Total bytes of mempool */ ++ size_t free_size; /* Total free bytes of mempool */ ++ size_t greatest_free_size; /* Size in bytes of largest free block */ ++ unsigned free_count; /* Number of free elements of mempool */ ++ unsigned alloc_count; /* Number of allocated elements of mempool */ ++ size_t used_size; /* Total allocated bytes of mempool */ ++}; ++ ++/** ++ * Init ssam lib, set ssam work mode, set core num, set functions, get host pf/vf endpoint info. ++ * ++ * \param args_in input work mode, core num, functions. ++ * \param eps_out output host pf/vf endpoint info. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_lib_init(struct ssam_lib_args *args_in, struct ssam_hostep_info *eps_out); ++ ++/** ++ * Exit ssam lib when not use ssam any more. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_lib_exit(void); ++ ++typedef void ssam_mempool_t; ++ ++/** ++ * Create the memory pool, the memory is allocated by spdk_dma_malloc. ++ * ++ * \param size the memory pool size. ++ * \param extra_size_limit the memory size that can alloc in addition to the memory pool ++ * ++ * \return a pointer to memory pool when succeed or null when failed ++ */ ++ssam_mempool_t *ssam_mempool_create(uint64_t size, uint64_t extra_size_limit); ++ ++/** ++ * Allocate one piece of memory from the memory pool. ++ * ++ * \param mp the memory pool. ++ * \param size the memory size that want to allocate. ++ * \param phys_addr save the physical address of the allocated memory, ++ * if allocate failed, will not change the value. ++ * ++ * \return the allocated memory's start virtual address when succeed or null when failed ++ */ ++void *ssam_mempool_alloc(ssam_mempool_t *mp, uint64_t size, uint64_t *phys_addr); ++ ++/** ++ * Free the memory back to the memory pool. ++ * ++ * \param mp the memory pool. ++ * \param ptr the memory virtual address that return by ssam_mempool_alloc. ++ */ ++void ssam_mempool_free(ssam_mempool_t *mp, void *ptr); ++ ++/** ++ * Destroy the memory pool, when this done, the memory pool cannot be used again. ++ * ++ * \param mp the memory pool. ++ */ ++void ssam_mempool_destroy(ssam_mempool_t *mp); ++ ++/** ++ * get the memory pool info status. ++ * ++ * \param mp the memory pool. ++ * \param info the mempool info status. ++ */ ++int ssam_get_mempool_info(ssam_mempool_t *mp, struct memory_info_stats *info); ++ ++/** ++ * ssam recover module preinit. ++ * ++ * \return 0 for succeed, 1 for config file exist, and less then 0 for failed. ++ */ ++int spdk_ssam_rc_preinit(void); ++ ++/** ++ * Get recover json file path. ++ * ++ * \return a file path string ++ */ ++char *ssam_rc_get_recover_json_file_path(void); ++ ++/** ++ * Get parameter json file path. ++ * ++ * \return a file path string ++ */ ++char *ssam_rc_get_param_json_file_path(void); ++ ++/** ++ * Initialize PF (include all VFs belong to this PF) to specific device type. ++ * The interface must be called with increasing pf_id. The function is not ++ * visible to host after init. ++ * ++ * \param pf_id PF function id. ++ * \param num_vf number of VFs of the PF. ++ * \param dev_type PF/VF type. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_setup_function(uint16_t pf_id, uint16_t num_vf, enum ssam_device_type dev_type); ++ ++/** ++ * Change specific device config. ++ * ++ * \param cfg new device configuration data. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_write_function_config(struct ssam_function_config *cfg); ++ ++/** ++ * send action to function. Invoked by SPDK. ++ * ++ * \param gfunc_id the global function index of the chip ++ * \param action the action to take on the function ++ * \param data extra action data if used ++ * \param data_len extra action data len ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_send_action(uint16_t gfunc_id, enum ssam_function_action action, const void *data, ++ uint16_t data_len); ++ ++/** ++ * Mount ssam volume, synchronous interface. ++ * ++ * \param gfunc_id the global function id of chip. ++ * \param lun_id the lun id of this volume. ++ * \param type mount type, refer to enum ssam_mount_type. ++ * \param tid it's used as the request queue id per CPU core. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_function_mount(uint16_t gfunc_id, uint32_t lun_id, enum ssam_mount_type type, ++ uint16_t tid); ++ ++/** ++ * Umount ssam volume, synchronous interface. ++ * ++ * \param gfunc_id the global function id of chip. ++ * \param lun_id the lun id of this volume. ++ * ++ * \return refer to enum ssam_function_mount_status ++ */ ++int ssam_function_umount(uint16_t gfunc_id, uint32_t lun_id); ++ ++/** ++ * Poll request queue for ssam request. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param poll_num the number of ssam request that want to be polled. ++ * \param io_req output for received request, the buffer is allocated by ssam, ++ * and released when IO complete. ++ * ++ * \return the number of vmio has been polled, less than 0 or bigger than poll_num for failed ++ */ ++int ssam_request_poll(uint16_t tid, uint16_t poll_num, struct ssam_request **io_req); ++ ++/** ++ * Poll request queue for ssam request. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param poll_num the number of ssam request that want to be polled. ++ * \param io_req output for received request, the buffer is allocated by ssam, ++ * and released when IO complete. ++ * \param poll_opt (optional) extra poll options. ++ * ++ * \return the number of vmio has been polled, less than 0 or bigger than poll_num for failed ++ */ ++int ssam_request_poll_ext(uint16_t tid, uint16_t poll_num, struct ssam_request **io_req, ++ struct ssam_request_poll_opt *poll_opt); ++ ++/** ++ * Request ssam data. Hardware will load or store data betweent host and CPU. ++ * Asynchronous interface. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param dma_req request data is here. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_dma_data_request(uint16_t tid, struct ssam_dma_request *dma_req); ++ ++/** ++ * Poll ssam request data. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param poll_num the number of ssam request that want to be polled. ++ * \param dma_rsp response data is here. ++ * ++ * \return the number of msg rsp has been polled, less than 0 or bigger than poll_num for failed ++ */ ++int ssam_dma_rsp_poll(uint16_t tid, uint16_t poll_num, struct ssam_dma_rsp *dma_rsp); ++ ++/** ++ * Send IO complete info to ssam request queue. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param resp response info is here. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_io_complete(uint16_t tid, struct ssam_io_response *resp); ++ ++/** ++ * Create vmio rx queue ++ * ++ * \param queue_id_out id of the queue create ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_vmio_rxq_create(uint16_t *queue_id_out); ++ ++/** ++ * Update virtio device used or not. ++ * ++ * \param glb_function_id the global function index of the chip ++ * \param device_used virtio device is used or not ++ * ++ * \return 0: success -1: fail, internal error, others: fail, refer to errno.h ++ */ ++int ssam_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used); ++ ++/** ++ * release virtio blk vq resource. ++ * ++ * \param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * ++ * \return 0: success -1: fail, internal error, others: fail, refer to errno.h ++ */ ++int ssam_virtio_blk_release_resource(uint16_t glb_function_id); ++ ++/** ++ * alloc virtio blk vq resource. ++ * ++ * \param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * \param queue_num number of vq ++ * ++ * \return 0: success -1: fail, internal error, others: fail, refer to errno.h ++ */ ++int ssam_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num); ++ ++/** ++ * Update virtio blk capacity. ++ * ++ * \param gfunc_id the global function index of the chip. ++ * \param capacity the new capacity. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_virtio_blk_resize(uint16_t gfunc_id, uint64_t capacity); ++ ++/** ++ * Vq bind core. ++ * ++ * \param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * \param queue_num the num of vqueue ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_virtio_vq_bind_core(uint16_t glb_function_id, uint16_t queue_num); ++ ++/** ++ * Vq unbind core. ++ * ++ * \param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_virtio_vq_unbind_core(uint16_t glb_function_id); ++ ++/** ++ * Get global function id by dbdf. ++ * ++ * \param dbdf the combine of domain bus device function. ++ * \param gfunc_id the global function index of the chip. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_get_funcid_by_dbdf(uint32_t dbdf, uint16_t *gfunc_id); ++ ++/** ++ * Convert dbdf from string format to number. ++ * ++ * \param str source dbdf string. ++ * \param dbdf store result. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_dbdf_str2num(char *str, uint32_t *dbdf); ++ ++/** ++ * Convert dbdf from number format to string. ++ * ++ * \param dbdf source dbdf number. ++ * \param str store result. ++ * \param len the str buffer length. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_dbdf_num2str(uint32_t dbdf, char *str, size_t len); ++ ++/** ++ * @brief check device ready ++ * @param role 0--old process; 1--new process ++ * @param proc_type enum proc_type, supoort PROC_TYPE_VBS and PROC_TYPE_BOOT ++ * @param ready output_para 0--not ready, 1--ready ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int ssam_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready); ++ ++/** ++ * @brief get hot upgrade state ++ * @param void ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int ssam_get_hot_upgrade_state(void); ++ ++/** ++ * @brief sync PF/VF config info to the hpd process in host ++ * @param void ++ * @return void ++ */ ++void ssam_hotplug_cfg(void); ++ ++/** ++ * @brief hot insert device interface ++ * @param prot_id the number of PF to add ++ * @return ++ * - 0: success ++ * - others: fail, refer to errno.h ++ */ ++int ssam_hotplug_add(uint16_t port_id); ++ ++/** ++ * @brief hot remove device interface ++ * @param prot_id the number of PF to remove ++ * @return ++ * - 0: success ++ * - others: fail, refer to errno.h ++ */ ++int ssam_hotplug_del(uint16_t port_id); ++ ++/** ++ * @brief func_id hotplug del async api ++ * @param func_id the global function index of the chip ++ * @return ++ * - 0: success ++ * - 1: remove failed ++ * - 2: invalid func_id ++ * - 3: repeat hot del ++ * - others: fail, refer to errno.h ++ */ ++int ssam_hotplug_del_async(uint16_t port_id); ++ ++/** ++ * @brief get hot upgrade state ++ * @param void ++ * @return ++ * - 0: HPD enable ++ * - 1: HPD disable ++ */ ++bool ssam_hotplug_enable_check(void); ++ ++/** ++ * @brief get hot del state ++ * @param func_id the global function index of the chip ++ * @return ++ * - 0: success ++ * - 1: remove failed ++ * - 2: try again ++ */ ++int ssam_hotplug_del_async_check(uint16_t port_id); ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* DPAK_SSAM_H */ +diff --git a/lib/ssam/ssam_driver/hivio_api.h b/lib/ssam/ssam_driver/hivio_api.h +new file mode 100644 +index 0000000..aca72a7 +--- /dev/null ++++ b/lib/ssam/ssam_driver/hivio_api.h +@@ -0,0 +1,728 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#ifndef HIVIO_API_H ++#define HIVIO_API_H ++ ++#include "spdk/stdinc.h" ++ ++#define MEM_ALLOC_SGE_NUM_MAX 512 ++ ++/** ++ * @brief memory descriptor for hvio_mem_alloc. ++ */ ++typedef struct mem_desc { ++ uint32_t size; /* *< mem array size */ ++ struct { ++ uint64_t virt; /* *< virtual address */ ++ uint64_t phys; /* *< physical address */ ++ uint32_t len; /* *< length */ ++ } mem[MEM_ALLOC_SGE_NUM_MAX]; ++} mem_desc_s; ++ ++/** ++ * @brief memory descriptor for hvio_heap_malloc. ++ */ ++struct hvio_melem { ++ void *addr; /**< virtual address */ ++ uint64_t iova; /**< IO address */ ++ uint64_t page_sz; /**< page size of underlying memory */ ++ int socket_id; /**< NUMA socket ID */ ++ int rsvd; ++}; ++ ++/** ++ * @brief memory-related callbacks. ++ */ ++typedef struct hvio_callback_ops { ++ int (*hvio_heap_malloc)(const char *type, size_t size, int socket_arg, unsigned int flags, ++ size_t align, size_t bound, bool contig, ++ struct hvio_melem *mem); /* register rte_malloc_heap_alloc */ ++ int (*hvio_heap_free)(void *addr); /* register rte_malloc_heap_free */ ++ int (*hvio_mem_alloc)(uint32_t size, int phy_contig, ++ mem_desc_s *mem_desc); /* register dma_mem_alloc function */ ++ int (*hvio_mem_free)(void *virt); /* register dma_mem_free function */ ++} hvio_callback_ops_s; ++ ++/** ++ * @brief proc type definition ++ */ ++enum proc_type { ++ PROC_TYPE_VBS = 0, ++ PROC_TYPE_BOOT, ++ PROC_TYPE_MIGTORBO, ++ PROC_TYPE_MAX ++}; ++ ++enum hivio_blk_hash_mode { ++ HVIO_PF_HASH_MODE = 0, ++ HVIO_VQ_HASH_MODE, ++ HVIO_IO_HASH_MODE, ++}; ++ ++/** ++ * @brief hivio_lib initialize parameters ++ */ ++typedef struct hvio_lib_args { ++ uint8_t role; /**< 0--old process; 1--new process */ ++ uint8_t core_num; /**< core num that polled by SPDK thread */ ++ hvio_callback_ops_s cb_ops; /**< memory-related callbacks */ ++ uint32_t proc_type; /**< enum proc_type */ ++ uint8_t host_dma_chnl_num; /**< host dma channel number, used for migtorbo multi chan process */ ++ uint8_t host_dma_mp_per_chnl; /**< host dma mempool per channel, 0: disable mp per channel, 1: enable */ ++ uint8_t host_dma_queue_per_chnl; /**< host dma queue num per channel, 0: disabled-defalt 1, max: 4 */ ++ uint8_t hash_mode; /**< HASH MODE: BLK:0-1bits SCSI:2-3bits FS:4-5bits NVMe:6-7bits */ ++ uint8_t rsvd[56]; /**< for rsvd */ ++} hvio_lib_args_s; ++ ++#define HVIO_HOSTEP_NUM_MAX 32 ++ ++/** ++ * @brief host side storage pf/vf end point info ++ */ ++typedef struct hvio_hostep_info { ++ struct { ++ uint16_t pf_funcid; /* *< pf_funcid = 0xffff means invalid */ ++ uint16_t pf_type; /* *< is config or not */ ++ uint16_t vf_funcid_start; ++ uint16_t vf_num; /* *< already config vf num */ ++ uint16_t vf_max; /* *< max num can be config */ ++ } host_pf_list[HVIO_HOSTEP_NUM_MAX]; ++} hvio_hostep_info_s; ++ ++/** ++ * @brief device type definition ++ */ ++enum device_type { ++ DEVICE_NVME, /* *< NVMe device */ ++ DEVICE_VIRTIO_NET, /* *< VirtIO-net device */ ++ DEVICE_VIRTIO_BLK, /* *< VirtIO-blk device */ ++ DEVICE_VIRTIO_SCSI, /* *< VirtIO-scsi device */ ++ DEVICE_VIRTIO_VSOCK, /* *< VirtIO-vsock device */ ++ DEVICE_VIRTIO_FS, /**< VirtIO-FS device */ ++ DEVICE_VIRTIO_MAX /* *< VirtIO-max device */ ++}; ++ ++/** ++ * @brief configration type definition ++ */ ++ ++struct function_config { ++ uint32_t function_id; ++ enum device_type type; ++ union { ++ struct { ++ uint64_t device_feature; ++ uint16_t queue_num; ++ uint16_t config_len; ++ uint8_t device_config[60]; ++ uint16_t queue_size; ++ uint16_t rx_queue_id; ++ } virtio; ++ } config; ++}; ++ ++/** ++ * @brief EP operation definition. ++ */ ++enum function_action { ++ FUNCTION_ACTION_START, /* *< start */ ++ FUNCTION_ACTION_STOP, /* *< stop */ ++ FUNCTION_ACTION_RESET, /* *< reset */ ++ FUNCTION_ACTION_CONFIG_CHANGE, /* *< config change report */ ++ FUNCTION_ACTION_SCSI_EVENT, /* *< SCSI event report */ ++ FUNCTION_ACTION_MAX ++}; ++ ++/** ++ * @brief EP function status definition. ++ */ ++enum function_status { ++ FUNCTION_STATUS_START, /* *< start */ ++ FUNCTION_STATUS_STOP, /* *< stop */ ++ FUNCTION_EVENT_MIGRATE, /* *< migrate */ ++}; ++ ++/** ++ * @brief VMIO type definition, support nvme and virtio. ++ */ ++enum vmio_type { ++ VMIO_TYPE_NVME_IO, /* *< NVMe normal IO */ ++ VMIO_TYPE_NVME_ADMIN, /* *< NVMe admin IO */ ++ VMIO_TYPE_VIRTIO_BLK_IO, /* *< VirtIO blk IO */ ++ VMIO_TYPE_VIRTIO_SCSI_IO, /* *< VirtIO scsi normal IO */ ++ VMIO_TYPE_VIRTIO_SCSI_CTRL, /* *< VirtIO scsi IO */ ++ VMIO_TYPE_VIRTIO_SCSI_EVT, /* *< VirtIO scsi event */ ++ VMIO_TYPE_VIRTIO_VSOCK_IO, /* *< VirtIO vsock IO */ ++ VMIO_TYPE_VIRTIO_VSOCK_EVT, /* *< VirtIO vsock event */ ++ VMIO_TYPE_VIRTIO_FUNC_STATUS, /* *< VirtIO function status change */ ++ VMIO_TYPE_VIRTIO_FS_IO, /* *< VirtIO fs normal IO */ ++ VMIO_TYPE_VIRTIO_FS_HIPRI, /* *< VirtIO fs high priority IO */ ++ VMIO_TYPE_RSVD, /* *< VMIO type rsvd */ ++}; ++ ++struct virtio_req { ++ uint16_t vq_idx; /* *< vq idx */ ++ uint16_t req_idx; /* *< head desc idx of io */ ++}; ++ ++struct nvme_req { ++ void *data; /* *< nvme admin input data */ ++}; ++ ++/** ++ * @brief VMIO cmd structure. ++ */ ++struct vmio_cmd { ++ uint32_t cmd_len; /* *< length of VMIO command, fixed to 64B */ ++ uint8_t cmd[64]; /* *< the specific format according to vmio_type */ ++ ++ uint32_t iovcnt; /* *< io vector count */ ++ struct iovec *iovs; /* *< io vectors, max 1MB IO */ ++ uint8_t writable; /* *< 2nd desc->write_flag */ ++ uint8_t rsvd[3]; /* *< rsvd */ ++ union { ++ struct virtio_req virtio; ++ struct nvme_req nvme; ++ }; ++}; ++ ++/** ++ * @brief function event structure. ++ */ ++struct func_event { ++ enum function_status status; /* *< function status */ ++ uint32_t data; /* *< VirtIO version: 0--v0.95; 1--v1.0; 2--v1.1 */ ++}; ++ ++/** ++ * @brief VMIO status definition. ++ */ ++enum vmio_status { ++ VMIO_STATUS_OK, /* *< ok */ ++ VMIO_STATUS_VQ_EMPTY, /* *< VQ empty */ ++ VMIO_STATUS_ERROR, /* *< error */ ++ VMIO_STATUS_DRIVER_NOT_OK, /* *< frontend driver not ready */ ++ VMIO_STATUS_VQ_ENGN_NOT_EN, /* *< backend vq not ready */ ++ VMIO_STATUS_DMA_IO_ERROR, /* *< frontend dma access error */ ++ VMIO_STATUS_VQ_SOURCE_ERROR, /* *< VQ cache source error */ ++ VMIO_STATUS_VQ_ERROR /* *< frontend vq status error */ ++}; ++ ++/** ++ * @brief VMIO request structure. ++ */ ++struct vmio_request { ++ uint16_t glb_function_id; /* *< global function id in chip */ ++ uint16_t nvme_sq_id; /* *< sq_id in iocb for NVMe vmio */ ++ uint32_t iocb_id; /* *< io control block id for ucode */ ++ enum vmio_type type; /* *< VMIO type to parse the req format */ ++ union { ++ struct vmio_cmd cmd; /* *< VMIO command structure */ ++ struct func_event event; /* *< report function event */ ++ } req; ++ enum vmio_status status; /* *< when flr occurs, set status to error */ ++ uint32_t flr_seq; /* *< check whether VMIO is from VF which FLR occurs */ ++}; ++ ++typedef struct tag_nvme_cqe { ++ uint32_t cmd_spec; ++ uint32_t rsvd; ++ ++ uint32_t sq_hd : 16; ++ uint32_t sq_id : 16; ++ ++ uint32_t cmd_id : 16; ++ uint32_t p : 1; ++ uint32_t status : 15; ++} nvme_cqe_s; ++ ++/** ++ * @brief NVMe response structure ++ */ ++struct nvme_response { ++ nvme_cqe_s nvme_cqe; ++ ++ uint32_t rsp_len; /* *< rsp length */ ++ uint32_t iovcnt; /* *< rsp vector count */ ++ struct iovec *iovs; /* *< rsp io vectors */ ++ void *rsp; /* *< rsp data */ ++}; ++ ++/** ++ * @brief VirtIO response structure ++ */ ++struct virtio_response { ++ uint32_t used_len; /* *< length of data has been upload to VM */ ++ uint32_t rsp_len; /* *< length of rsp */ ++ uint32_t iovcnt; /* *< rsp vector count */ ++ struct iovec *iovs; /* *< rsp io vectors */ ++ void *rsp; /* *< data of rsp */ ++}; ++ ++/** ++ * @brief VMIO response structure ++ */ ++struct vmio_response { ++ uint16_t glb_function_id; /* *< global function id in chip */ ++ uint16_t rsvd0; /* *< make sure nvme and virtio offset is 16B aligned */ ++ uint32_t iocb_id; /* *< io control block id used by ucode */ ++ enum vmio_type type; /* *< VMIO type */ ++ uint32_t rsvd1; /* make sure nvme and virtio offset is 16B aligned */ ++ ++ union { ++ struct nvme_response nvme; /* *< nvme rsp structure */ ++ struct virtio_response virtio; /* *< virtio rsp structure */ ++ }; ++ ++ struct vmio_request *req; /* *< corresponding vmio_request */ ++ enum vmio_status status; /* *< VMIO status, copy from vmio_request */ ++ uint32_t flr_seq; /* *< copy from vmio_request */ ++}; ++ ++/** ++ * @brief data structrue for send action request. ++ */ ++typedef struct hvio_send_action_req { ++ uint16_t glb_function_id; /**< global function id in chip */ ++ uint16_t data_len; /**< length of request's payload */ ++ void *data; /**< request's payload */ ++ enum function_action action; /**< action type */ ++} hvio_send_action_req_s; ++ ++/** ++ * @brief data structrue for VMIO send request(destination is virtio RQ). ++ */ ++typedef struct hvio_vmio_send_req { ++ uint64_t cb; /**< callback info */ ++ uint16_t glb_function_id; /**< global function id in chip */ ++ uint16_t vqn; /**< function inner vq idx */ ++ uint32_t sge_num; /**< data sge number */ ++ struct iovec *data; /**< data buffer address, gpa mode, including virtio_hdr and payload */ ++ uint32_t data_len; /**< data len, including virtio_hdr len and payload len. */ ++ enum vmio_type type; /**< vmio type */ ++} hvio_vmio_send_req_s; ++ ++/** ++ * @brief data structrue for ACK of VMIO send request(destination is virtio RQ). ++ */ ++typedef struct hvio_vmio_send_rsp { ++ uint64_t cb; /**< callback info */ ++ uint32_t status; /**< refer to enum vmio_status */ ++} hvio_vmio_send_rsp_s; ++ ++/** ++ * @brief data structrue for rsp of vsock recovery. ++ */ ++typedef struct hvio_vsock_recovery_rsp { ++ uint16_t tx_used_idx; /* *< virtio vsock txq used idx */ ++ uint16_t rx_used_idx; /* *< virtio vsock rxq used idx */ ++} hvio_vsock_recovery_rsp_s; ++ ++/** ++ * @brief host_dma direction. ++ */ ++enum hvio_host_dma_mode { ++ READ_HOST_MODE = 0, /**< read host data and write to SPU */ ++ WRITE_HOST_MODE = 1, /**< write data to host */ ++ HOST_DMA_MODE_MAX ++}; ++ ++/** ++ * @brief data structrue for host dma request. ++ */ ++typedef struct hvio_host_dma_req { ++ uint16_t glb_function_id; /**< VM global function id */ ++ uint16_t direction; /**< host dma direction, format is enum hvio_host_dma_mode */ ++ uint32_t flr_seq; /**< check whether the vmio copy request is a leaked request when flr occurs */ ++ uint32_t ssge_num; /**< source sge number */ ++ uint32_t dsge_num; /**< dest sge number */ ++ struct iovec *src; /**< source buffer address, gpa. host buf for read, ddr for write. */ ++ struct iovec *dst; /**< dest buffer address, gpa. ddr for read, host buf for write */ ++ uint32_t data_len; /**< length for load or store */ ++ void *cb; /**< callback info */ ++} hvio_host_dma_req_s; ++ ++/** ++ * @brief data structrue for ACK of host dma request. ++ */ ++typedef struct hvio_host_dma_rsp { ++ void *cb; /**< SPDK callback info */ ++ uint32_t status; /**< 0 OK, 1 ERROR */ ++ uint32_t last_flag; ++} hvio_host_dma_rsp_s; ++ ++/** ++ * @brief data structrue for hivio stats. ++ */ ++ ++typedef struct hvio_info_stats { ++ uint64_t vmio_req; ++ uint64_t vmio_rsp; ++ ++ uint64_t vsock_tx_req; ++ uint64_t vscok_tx_rsp; ++ uint64_t vsock_rx_req; ++ uint64_t vsock_rx_rsp; ++ ++ uint64_t host_dma_req; ++ uint64_t host_dma_sub_req; ++ uint64_t host_dma_rsp; ++ ++ uint64_t update_blk_cap; ++ uint64_t send_action; ++ ++ uint64_t rsvd[16]; ++} hvio_info_stats_s; ++ ++typedef struct hvio_warn_stats { ++ uint64_t invalid_vmio; ++ uint64_t vsock_rx_rsp_status_abnormal; ++ uint64_t host_dma_rsp_status_abnormal; ++ ++ uint64_t rsvd[16]; ++} hvio_warn_stats_s; ++ ++typedef struct hvio_error_stats { ++ uint64_t update_blk_cap_fail; ++ uint64_t send_action_fail; ++ uint64_t vmio_rsp_fail; ++ uint64_t vsock_tx_fail; ++ uint64_t vsock_rx_fail; ++ uint64_t host_dma_req_fail; ++ ++ uint64_t rsvd[16]; ++} hvio_error_stats_s; ++ ++typedef struct hivio_func_ctx_read_rsp { ++ uint8_t device_type; ++ uint8_t device_status; ++ uint16_t num_queues; ++ uint8_t flr_status; ++ uint8_t rsvd0[3]; ++ uint32_t device_feature_l; ++ uint32_t device_feature_h; ++ uint32_t driver_feature_l; ++ uint32_t driver_feature_h; ++ uint32_t rsvd1[26]; ++} hivio_func_ctx_read_rsp_s; ++ ++struct hvio_mount_para { ++ uint32_t algo_type; /* *< VBS:algorithm 0 or 1; IPU:0--dummy; 1--normal */ ++ uint32_t key[3]; /* *< 0 for rsvd. VBS:key[0] tree_id, key[1] pt_num, key[2] blk_size */ ++}; ++ ++/** ++ * @brief hivio initialization function ++ * @param args_in initialization parameters input ++ * @param eps_out host side ep info ouput ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_lib_init(hvio_lib_args_s *args_in, hvio_hostep_info_s *eps_out); ++ ++/** ++ * @brief hivbs de-initialize function. ++ * @param void ++ * @return ++ * - 0: success ++ */ ++int hvio_lib_deinit(void); ++ ++/** ++ * @brief update virtio blk capacity. ++ * @param glb_function_id the global function index of the chip ++ * @param capacity new capacity ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_update_virtio_blk_capacity(uint16_t glb_function_id, uint64_t capacity); ++ ++/** ++ * @brief poll RQ for VMIO request. ++ * @param tid It's used as the L2NIC RQ id per SPU core. ++ * @param poll_num the number of msg rsp want to be polled ++ * @param req output for received request. The buffer is allocated by hivbs, and used by SPDK. Release when IO complete. ++ * @return ++ * - >=0: the number of vmio_request has been polled ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_vmio_req_poll_batch(uint16_t tid, uint16_t poll_num, struct vmio_request **req); ++ ++/** ++ * @brief hvio_vmio_req_poll_batch_ext extra poll options ++ */ ++typedef struct hvio_vmio_req_poll_opt { ++ struct iovec ++ *sge1_iov; /**< output for req->req.cmd.iovs[1] (per VMIO req). Actual data length set in iov_len */ ++ uint16_t queue_id; /**< (optional) poll a queue id instead of using 'tid' parameter to calculate the queue */ ++ uint8_t rsvd[54]; ++} hvio_vmio_req_poll_opt_s; ++ ++/** ++ * @brief poll RQ for VMIO request, together with the contents of req->req.cmd.iovs[1]. ++ * @param tid It's used as the L2NIC RQ id per SPU core. ++ * @param poll_num the number of msg rsp want to be polled, if the poll_num > 16, the actual poll num is 16. ++ * @param req output for received request. The buffer is allocated by hivbs, and used by SPDK. Release when IO complete. ++ * @param poll_opt (optional) extra poll options. ++ * @return ++ * - >=0: the number of vmio_request has been polled ++ * - <0: fail, refer to errno.h ++ * @note req->req.cmd.writable will be used to specify the first writable index in req->req.cmd.iovs. ++ */ ++int hvio_vmio_req_poll_batch_ext(uint16_t tid, uint16_t poll_num, struct vmio_request **req, ++ hvio_vmio_req_poll_opt_s *poll_opt); ++ ++/** ++ * @brief send VMIO complete to SQ. ++ * @param tid It's used as the L2NIC SQ id per SPU core. ++ * @param resp VMIO response ++ * @return ++ * - 0: success ++ * - others: fail, refer to errno.h ++ */ ++int hvio_vmio_complete(uint16_t tid, struct vmio_response *resp); ++ ++/** ++ * @brief create vmio rx queue ++ * @param queue_id_out id of the queue create ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_vmio_rxq_create(uint16_t *queue_id_out); ++ ++/** ++ * @brief initialize PF (include all VFs belong to this PF) to specific device type. For virtio device of the PF and VF ++ * can be set to different virtio device_type. The interface must be called with increasing pf_id. The function is not ++ * visible to host after init. ++ * @param pf_id PF id ++ * @param num_vf number of VFs of the PF, they use the same type ++ * @param pf_type pf device type ++ * @param vf_type vf device type ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_setup_function(uint16_t pf_id, uint16_t num_vf, enum device_type pf_type, ++ enum device_type vf_type); ++ ++/** ++ * @brief change specific device config. ++ * @param cfg new device configuration data ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_write_function_config(struct function_config *cfg); ++ ++/** ++ * @brief get global function index by pcie device dbdf info. ++ * @param dbdf pcie device dbdf info(input para) ++ * @param glb_function_id the global function index of the chip(output para) ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_get_glb_function_id_by_dbdf(uint32_t dbdf, uint16_t *glb_function_id); ++ ++/** ++ * @brief send action to function, synchronous interface. ++ * @param req send action request ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_send_action(uint16_t glb_function_id, enum function_action action, const void *data, ++ uint16_t data_len); ++ ++/** ++ * @brief DMA request. hw will load or store data between X86 host and spu ddr, asynchronous interface. ++ * @param chnl_id is associated with L2NIC SQ ID. ++ * @param req host dma request ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_host_dma_request(uint16_t chnl_id, hvio_host_dma_req_s *req); ++ ++/** ++ * @brief poll RQ for dma response status. device provides DMA response in the same order with DMA request. ++ * @param chnl_id is associated with L2NIC RQ ID. ++ * @param poll_num the number of rsp want to be polled. ++ * @param[out] rsp output for received response. ++ * @return ++ * - >=0: the number of host dma rsp has been polled ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_host_dma_rsp_poll(uint16_t chnl_id, uint16_t poll_num, hvio_host_dma_rsp_s *rsp); ++ ++union hvio_nvme_config_cmd_info { ++ uint32_t cmd[5]; ++}; ++ ++/** ++ * @brief get hot upgrade state ++ * @param void ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_get_hot_upgrade_state(void); ++ ++/** ++ * @brief check device ready ++ * @param role 0--old process; 1--new process ++ * @param proc_type enum proc_type, supoort PROC_TYPE_VBS and PROC_TYPE_BOOT ++ * @param ready output_para 0--not ready, 1--ready ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready); ++ ++/** ++ * @brief mount VIO volume, synchronous interface. Invoked by VIO. ++ * @param glb_function_id the global function id of chip ++ * @param lun_id the lun id of this volume ++ * @param hash_paras hash item paras ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_volume_mount(uint16_t glb_function_id, uint32_t lun_id, ++ struct hvio_mount_para *hash_paras); ++ ++/** ++ * @brief umount VIO volume, synchronous interface. Invoked by VIO. ++ * @param glb_function_id the global function id of chip ++ * @param lun_id the lun id of this volume ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_volume_umount(uint16_t glb_function_id, uint32_t lun_id); ++ ++/** ++ * @brief update virtio device used or not. ++ * @param glb_function_id the global function index of the chip ++ * @param device_used virtio device is used or not ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used); ++ ++/** ++ * @brief release virtio blk vq resource. ++ * @param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_virtio_blk_release_resource(uint16_t glb_function_id); ++ ++/** ++ * @brief query slave host cfg require and cfg global func to it ++ * @param null ++ * @return ++ */ ++void hvio_hotplug_cfg(void); ++ ++/** ++ * @brief port_id hot plug add ++ * @param port_id the global function index of the chip ++ * @return ++ * - 0: success ++ * - -1: invalid port_id ++ * - -2: repeat hot plug ++ * - others: fail, refer to errno.h ++ */ ++int hvio_hotplug_add(uint16_t port_id); ++ ++/** ++ * @brief port_id hot plug del ++ * @param port_id the global function index of the chip ++ * @return ++ * - 0: success ++ * - -1: invalid port_id ++ * - -2: repeat hot del ++ * - others: fail, refer to errno.h ++ */ ++int hvio_hotplug_del(uint16_t port_id); ++ ++/** ++ * @brief func_id hotplug del async api ++ * @param func_id the global function index of the chip ++ * @return ++ * - 0: success ++ * - 1: remove failed ++ * - 2: invalid func_id ++ * - 3: repeat hot del ++ * - others: fail, refer to errno.h ++ */ ++int hvio_hotplug_del_async(uint16_t port_id); ++ ++/** ++ * @brief check hotplug if enable ++ * @param null ++ * @return ++ * - true: enable ++ * - false: disable ++ */ ++bool hvio_hotplug_enable_check(void); ++ ++/** ++ * @brief get hot del state ++ * @param func_id the global function index of the chip ++ * @return ++ * - 0: success ++ * - 1: remove failed ++ * - 2: try again ++ */ ++int hvio_hotplug_del_async_check(uint16_t port_id); ++ ++/** ++ * @brief alloc virtio blk vq resource. ++ * @param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num); ++ ++/** ++ * @brief vq bind core. ++ * @param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * @param queue_num the num of vqueue ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_virtio_vq_bind_core(uint16_t glb_function_id, uint16_t queue_num); ++ ++/** ++ * @brief vq unbind core. ++ * @param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_virtio_vq_unbind_core(uint16_t glb_function_id); ++ ++#endif /* HIVIO_API_H */ +diff --git a/lib/ssam/ssam_driver/ssam_dbdf.c b/lib/ssam/ssam_driver/ssam_dbdf.c +new file mode 100644 +index 0000000..63a4d09 +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_dbdf.c +@@ -0,0 +1,315 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "spdk/stdinc.h" ++ ++#include "spdk/log.h" ++#include "dpak_ssam.h" ++ ++#define SSAM_DBDF_DOMAIN_OFFSET 16 ++#define SSAM_DBDF_BUS_OFFSET 8 ++#define SSAM_DBDF_DEVICE_OFFSET 3 ++#define SSAM_DBDF_FUNC_OFFSET 0x7 ++#define SSAM_DBDF_DOMAIN_MAX 0xffff ++#define SSAM_DBDF_BUS_MAX 0xff ++#define SSAM_DBDF_DEVICE_MAX 0x1f ++#define SSAM_DBDF_FUNCTION_MAX 0x7 ++#define SSAM_DBDF_DOMAIN_MAX_LEN 4 ++#define SSAM_DBDF_BD_MAX_LEN 2 ++#define SSAM_DBDF_FUNCTION_MAX_LEN 1 ++#define SSAM_DBDF_MAX_STR_LEN 20 ++#define SSAM_STR_CONVERT_HEX 16 ++ ++ ++struct ssam_dbdf { ++ uint32_t domain; ++ uint32_t bus; ++ uint32_t device; ++ uint32_t function; ++}; ++ ++static int ++ssam_dbdf_cvt_str2num(char *input, uint16_t val_limit, uint32_t len_limit, ++ uint32_t *num_resolved) ++{ ++ char *end_ptr = NULL; ++ long int val = strtol(input, &end_ptr, SSAM_STR_CONVERT_HEX); ++ ++ if (strlen(input) > len_limit) { ++ return -EINVAL; ++ } ++ ++ if (end_ptr == NULL || end_ptr == input || *end_ptr != '\0') { ++ return -EINVAL; ++ } ++ if (val < 0 || val > val_limit) { ++ return -EINVAL; ++ } ++ ++ *num_resolved = (uint32_t)val; ++ return 0; ++} ++ ++/* resolve dbdf's domain */ ++static int ++ssam_dbdf_cvt_dom(char *str, struct ssam_dbdf *dbdf, ++ char **bus) ++{ ++ char *colon2 = NULL; ++ int rc; ++ ++ /* find second ":" from dbdf string */ ++ colon2 = strchr(str, ':'); ++ if (colon2 != NULL) { ++ *colon2++ = 0; ++ *bus = colon2; ++ if (str[0] != 0) { ++ /* convert domain number */ ++ rc = ssam_dbdf_cvt_str2num(str, SSAM_DBDF_DOMAIN_MAX, ++ SSAM_DBDF_DOMAIN_MAX_LEN, &dbdf->domain); ++ if (rc != 0) { ++ SPDK_ERRLOG("Invalid characters of domain number!\n"); ++ return rc; ++ } ++ } else { ++ SPDK_ERRLOG("domain number is blank!\n"); ++ return -EINVAL; ++ } ++ } else { ++ /* dbdf string does not contain domain number */ ++ *bus = str; ++ } ++ ++ return 0; ++} ++ ++/* resolve dbdf's bus */ ++static int ++ssam_dbdf_cvt_b(struct ssam_dbdf *dbdf, char *bus) ++{ ++ int rc; ++ ++ if (bus[0] != 0) { ++ /* convert bus number */ ++ rc = ssam_dbdf_cvt_str2num(bus, SSAM_DBDF_BUS_MAX, ++ SSAM_DBDF_BD_MAX_LEN, &dbdf->bus); ++ if (rc != 0) { ++ SPDK_ERRLOG("Invalid characters of bus number!\n"); ++ return rc; ++ } ++ } else { ++ SPDK_ERRLOG("bus number is blank!\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* resolve dbdf's domain and bus part */ ++static int ++ssam_dbdf_cvt_domb(char *str, struct ssam_dbdf *dbdf, ++ char **colon_input, char **mid_input) ++{ ++ char *bus = NULL; ++ char *colon = *colon_input; ++ int rc; ++ ++ *colon++ = 0; ++ *mid_input = colon; ++ rc = ssam_dbdf_cvt_dom(str, dbdf, &bus); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return ssam_dbdf_cvt_b(dbdf, bus); ++} ++ ++/* resolve dbdf's device */ ++static int ++ssam_dbdf_cvt_dev(struct ssam_dbdf *dbdf, char *mid) ++{ ++ int rc; ++ ++ if (mid[0] != 0) { ++ /* convert device number */ ++ rc = ssam_dbdf_cvt_str2num(mid, SSAM_DBDF_DEVICE_MAX, ++ SSAM_DBDF_BD_MAX_LEN, &dbdf->device); ++ if (rc != 0) { ++ SPDK_ERRLOG("Invalid characters of device number!\n"); ++ return rc; ++ } ++ } else { ++ SPDK_ERRLOG("device number is blank!\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_dbdf_cvt_f(struct ssam_dbdf *dbdf, char *dot) ++{ ++ int rc; ++ ++ if (dot != NULL && dot[0] != 0) { ++ /* convert function number */ ++ rc = ssam_dbdf_cvt_str2num(dot, SSAM_DBDF_FUNCTION_MAX, ++ SSAM_DBDF_FUNCTION_MAX_LEN, &dbdf->function); ++ if (rc != 0) { ++ SPDK_ERRLOG("Invalid characters of function number!\n"); ++ return rc; ++ } ++ } else { ++ SPDK_ERRLOG("function number is blank!\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* resolve dbdf's device and function part */ ++static int ++ssam_dbdf_cvt_devf(struct ssam_dbdf *dbdf, char **dot_input, char **mid_input) ++{ ++ char *dot = *dot_input; ++ int rc; ++ ++ if (dot != NULL) { ++ *dot++ = 0; ++ } else { ++ /* Input dbdf string does not contain "." */ ++ SPDK_ERRLOG("Invalid DBDF format\n"); ++ return -1; ++ } ++ ++ rc = ssam_dbdf_cvt_dev(dbdf, *mid_input); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return ssam_dbdf_cvt_f(dbdf, dot); ++} ++ ++static uint32_t ++ssam_dbdf_assemble(const struct ssam_dbdf *dbdf) ++{ ++ return ((dbdf->domain << SSAM_DBDF_DOMAIN_OFFSET) | ++ (dbdf->bus << SSAM_DBDF_BUS_OFFSET) | ++ (dbdf->device << SSAM_DBDF_DEVICE_OFFSET) | ++ (dbdf->function & SSAM_DBDF_FUNC_OFFSET)); ++} ++ ++static int ++ssam_dbdf_cvt_dbdf(char *str, size_t len, uint32_t *dbdf) ++{ ++ if (dbdf == NULL) { ++ SPDK_ERRLOG("dbdf is null\n"); ++ return -1; ++ } ++ /* find ":" from dbdf string */ ++ char *colon = strrchr(str, ':'); ++ /* find "." from dbdf string */ ++ char *dot = NULL; ++ char *mid = str; ++ int rc; ++ struct ssam_dbdf st_dbdf = {0}; ++ ++ if (colon != NULL) { ++ rc = ssam_dbdf_cvt_domb(str, &st_dbdf, &colon, &mid); ++ if (rc != 0) { ++ return rc; ++ } ++ } else { ++ /* Input dbdf string does not contain ":" */ ++ SPDK_ERRLOG("Invalid DBDF format\n"); ++ return -EINVAL; ++ } ++ ++ dot = strchr((colon ? (colon + 1) : str), '.'); ++ rc = ssam_dbdf_cvt_devf(&st_dbdf, &dot, &mid); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ *dbdf = ssam_dbdf_assemble(&st_dbdf); ++ ++ return 0; ++} ++ ++/* convert dbdf from string to number */ ++int ++ssam_dbdf_str2num(char *str, uint32_t *dbdf) ++{ ++ int len; ++ char *dbdf_str = NULL; ++ int ret; ++ ++ if (str == NULL) { ++ SPDK_ERRLOG("dbdf str2num input str null!\n"); ++ return -EINVAL; ++ } ++ ++ if (dbdf == NULL) { ++ SPDK_ERRLOG("dbdf str2num output dbdf null!\n"); ++ return -EINVAL; ++ } ++ ++ len = strlen(str); ++ if (len == 0 || len > SSAM_DBDF_MAX_STR_LEN) { ++ SPDK_ERRLOG("dbdf str2num len %u error!\n", len); ++ return -ERANGE; ++ } ++ ++ dbdf_str = (char *)malloc(len + 1); ++ if (dbdf_str == NULL) { ++ return -ENOMEM; ++ } ++ ++ ret = snprintf(dbdf_str, len + 1, "%s", str); ++ if ((ret > len) || (ret <= 0)) { ++ SPDK_ERRLOG("dbdf str2num snprintf_s error\n"); ++ free(dbdf_str); ++ return -EINVAL; ++ } ++ ++ ret = ssam_dbdf_cvt_dbdf(dbdf_str, len, dbdf); ++ free(dbdf_str); ++ dbdf_str = NULL; ++ ++ return ret; ++} ++ ++static void ++ssam_dbdf_num2struct(uint32_t dbdf, struct ssam_dbdf *st_dbdf) ++{ ++ st_dbdf->domain = (dbdf >> SSAM_DBDF_DOMAIN_OFFSET) & SSAM_DBDF_DOMAIN_MAX; ++ st_dbdf->bus = (dbdf >> SSAM_DBDF_BUS_OFFSET) & SSAM_DBDF_BUS_MAX; ++ st_dbdf->device = (dbdf >> SSAM_DBDF_DEVICE_OFFSET) & SSAM_DBDF_DEVICE_MAX; ++ st_dbdf->function = dbdf & SSAM_DBDF_FUNCTION_MAX; ++ return; ++} ++ ++int ++ssam_dbdf_num2str(uint32_t dbdf, char *str, size_t len) ++{ ++ int ret; ++ struct ssam_dbdf st_dbdf = {0}; ++ ++ if (str == NULL) { ++ SPDK_ERRLOG("dbdf num2str output str null!\n"); ++ return -EINVAL; ++ } ++ ++ ssam_dbdf_num2struct(dbdf, &st_dbdf); ++ ++ ret = snprintf(str, len - 1, "%04x:%02x:%02x.%x", ++ st_dbdf.domain, st_dbdf.bus, st_dbdf.device, st_dbdf.function); ++ if ((ret >= (int)(len - 1)) || (ret <= 0)) { ++ SPDK_ERRLOG("dbdf num2str error\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} +diff --git a/lib/ssam/ssam_driver/ssam_driver.c b/lib/ssam/ssam_driver/ssam_driver.c +new file mode 100644 +index 0000000..47fe6a1 +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_driver.c +@@ -0,0 +1,480 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "spdk/stdinc.h" ++#include "spdk/log.h" ++#include "ssam_driver_adapter.h" ++#include "dpak_ssam.h" ++ ++#define SSAM_DRV_PRIORITY_LAST 65535 ++#define VIRTIO_F_NOTIFICATION_DATA (1UL << 38) ++#define SSAM_DPAK_DIR "/etc/dpak/" ++#define SSAM_CFG_DIR SSAM_DPAK_DIR SSAM_SERVER_NAME "/" ++#define SSAM_RECOVER_CFG_JSON SSAM_CFG_DIR "recover.json" ++#define SSAM_PARAM_CFG_JSON SSAM_CFG_DIR "parameter.json" ++#define SSAM_CONFIG_DIR_PERMIT 0750 ++ ++__attribute__((constructor(SSAM_DRV_PRIORITY_LAST))) int ssam_construct(void); ++ ++__attribute__((destructor(SSAM_DRV_PRIORITY_LAST))) void ssam_destruct(void); ++ ++int ++ssam_lib_init(struct ssam_lib_args *args_in, struct ssam_hostep_info *eps_out) ++{ ++ hvio_lib_args_s hvio_args_in; ++ hvio_hostep_info_s *hostep_info = NULL; ++ ++ if (args_in == NULL || eps_out == NULL) { ++ SPDK_ERRLOG("input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ memset(&hvio_args_in, 0, sizeof(hvio_lib_args_s)); ++ hvio_args_in.role = args_in->role; ++ hvio_args_in.core_num = args_in->core_num; ++ hvio_args_in.cb_ops.hvio_heap_malloc = (__typeof__(hvio_args_in.cb_ops.hvio_heap_malloc)) ++ args_in->ssam_heap_malloc; ++ hvio_args_in.cb_ops.hvio_heap_free = args_in->ssam_heap_free; ++ hvio_args_in.host_dma_queue_per_chnl = args_in->dma_queue_num; ++ hvio_args_in.hash_mode = args_in->hash_mode; ++ ++ hostep_info = (hvio_hostep_info_s *)(void *)eps_out; ++ ++ return ssam_drv_lib_init(&hvio_args_in, hostep_info); ++} ++ ++int ++ssam_lib_exit(void) ++{ ++ return ssam_drv_lib_deinit(); ++} ++ ++int ++ssam_setup_function(uint16_t pf_id, uint16_t num_vf, enum ssam_device_type dev_type) ++{ ++ enum device_type type; ++ switch (dev_type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ type = DEVICE_VIRTIO_BLK; ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ type = DEVICE_VIRTIO_SCSI; ++ break; ++ case SSAM_DEVICE_VIRTIO_FS: ++ type = DEVICE_VIRTIO_FS; ++ break; ++ default: ++ type = DEVICE_VIRTIO_MAX; ++ break; ++ } ++ ++ return ssam_drv_setup_function(pf_id, num_vf, type, type); ++} ++ ++int ++ssam_write_function_config(struct ssam_function_config *cfg) ++{ ++ struct function_config hvio_function_cfg; ++ ++ if (cfg == NULL) { ++ SPDK_ERRLOG("libssam input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ if ((cfg->virtio_config.device_feature & VIRTIO_F_NOTIFICATION_DATA) != 0) { ++ SPDK_ERRLOG("Virtio feature is error.\n"); ++ return -EINVAL; ++ } ++ ++ memset(&hvio_function_cfg, 0x0, sizeof(struct function_config)); ++ ++ hvio_function_cfg.function_id = (uint32_t)cfg->gfunc_id; ++ switch (cfg->type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ hvio_function_cfg.type = DEVICE_VIRTIO_BLK; ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ hvio_function_cfg.type = DEVICE_VIRTIO_SCSI; ++ break; ++ case SSAM_DEVICE_VIRTIO_FS: ++ hvio_function_cfg.type = DEVICE_VIRTIO_FS; ++ break; ++ default: ++ hvio_function_cfg.type = DEVICE_VIRTIO_MAX; ++ break; ++ } ++ ++ memcpy(&hvio_function_cfg.config.virtio, &cfg->virtio_config, sizeof(struct ssam_virtio_config)); ++ return ssam_drv_write_function_config(&hvio_function_cfg); ++} ++ ++int ++ssam_send_action(uint16_t gfunc_id, enum ssam_function_action action, const void *data, ++ uint16_t data_len) ++{ ++ enum function_action func_act; ++ ++ if (data == NULL || data_len == 0) { ++ SPDK_ERRLOG("libssam input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ switch (action) { ++ case SSAM_FUNCTION_ACTION_START: ++ func_act = FUNCTION_ACTION_START; ++ break; ++ ++ case SSAM_FUNCTION_ACTION_STOP: ++ func_act = FUNCTION_ACTION_STOP; ++ break; ++ ++ case SSAM_FUNCTION_ACTION_RESET: ++ func_act = FUNCTION_ACTION_RESET; ++ break; ++ ++ case SSAM_FUNCTION_ACTION_CONFIG_CHANGE: ++ func_act = FUNCTION_ACTION_CONFIG_CHANGE; ++ break; ++ ++ case SSAM_FUNCTION_ACTION_SCSI_EVENT: ++ func_act = FUNCTION_ACTION_SCSI_EVENT; ++ break; ++ ++ default: ++ func_act = FUNCTION_ACTION_MAX; ++ break; ++ } ++ ++ return ssam_drv_send_action(gfunc_id, func_act, data, data_len); ++} ++ ++int ++ssam_function_mount(uint16_t gfunc_id, uint32_t lun_id, enum ssam_mount_type type, uint16_t tid) ++{ ++ struct hvio_mount_para hash_paras; ++ ++ memset(&hash_paras, 0x0, sizeof(struct hvio_mount_para)); ++ ++ hash_paras.algo_type = type; ++ hash_paras.key[0] = tid; ++ ++ return ssam_drv_volume_mount(gfunc_id, lun_id, &hash_paras); ++} ++ ++int ++ssam_function_umount(uint16_t gfunc_id, uint32_t lun_id) ++{ ++ return ssam_drv_volume_umount(gfunc_id, lun_id); ++} ++ ++int ++ssam_request_poll(uint16_t tid, uint16_t poll_num, struct ssam_request **io_req) ++{ ++ if (io_req == NULL || poll_num > SSAM_MAX_REQ_POLL_SIZE) { ++ SPDK_ERRLOG("ssam request poll input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ return ssam_drv_vmio_req_poll_batch(tid, poll_num, (struct vmio_request **)io_req); ++} ++ ++int ++ssam_request_poll_ext(uint16_t tid, uint16_t poll_num, struct ssam_request **io_req, ++ struct ssam_request_poll_opt *poll_opt) ++{ ++ if (io_req == NULL || poll_num > SSAM_MAX_REQ_POLL_SIZE || poll_opt == NULL) { ++ SPDK_ERRLOG("ssam request poll ext input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ hvio_vmio_req_poll_opt_s hvio_poll_opt = { ++ .sge1_iov = poll_opt->sge1_iov, ++ .queue_id = poll_opt->queue_id, ++ }; ++ ++ return ssam_drv_vmio_req_poll_batch_ext(tid, poll_num, (struct vmio_request **)io_req, ++ &hvio_poll_opt); ++} ++ ++int ++ssam_dma_data_request(uint16_t tid, struct ssam_dma_request *dma_req) ++{ ++ if (dma_req == NULL || dma_req->direction >= SSAM_REQUEST_DATA_MAX) { ++ SPDK_ERRLOG("ssam dma request input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ hvio_host_dma_req_s *mode_para = (hvio_host_dma_req_s *)dma_req; ++ ++ return ssam_drv_host_dma_request(tid, mode_para); ++} ++ ++int ++ssam_dma_rsp_poll(uint16_t tid, uint16_t poll_num, struct ssam_dma_rsp *dma_rsp) ++{ ++ if (dma_rsp == NULL || poll_num > SSAM_MAX_RESP_POLL_SIZE) { ++ SPDK_ERRLOG("resp poll input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ return ssam_drv_host_dma_rsp_poll(tid, poll_num, (hvio_host_dma_rsp_s *)dma_rsp); ++} ++ ++static enum vmio_type ++ssam_io_type_to_vmio(enum ssam_io_type io_type) { ++ enum vmio_type vmio_type; ++ ++ switch (io_type) ++ { ++ case SSAM_VIRTIO_BLK_IO: ++ vmio_type = VMIO_TYPE_VIRTIO_BLK_IO; ++ break; ++ ++ case SSAM_VIRTIO_SCSI_IO: ++ vmio_type = VMIO_TYPE_VIRTIO_SCSI_IO; ++ break; ++ ++ case SSAM_VIRTIO_SCSI_CTRL: ++ vmio_type = VMIO_TYPE_VIRTIO_SCSI_CTRL; ++ break; ++ ++ case SSAM_VIRTIO_SCSI_EVT: ++ vmio_type = VMIO_TYPE_VIRTIO_SCSI_EVT; ++ break; ++ ++ case SSAM_VIRTIO_FUNC_STATUS: ++ vmio_type = VMIO_TYPE_VIRTIO_FUNC_STATUS; ++ break; ++ ++ case SSAM_VIRTIO_FS_IO: ++ vmio_type = VMIO_TYPE_VIRTIO_FS_IO; ++ break; ++ ++ case SSAM_VIRTIO_FS_HIPRI: ++ vmio_type = VMIO_TYPE_VIRTIO_FS_HIPRI; ++ break; ++ ++ default: ++ vmio_type = VMIO_TYPE_RSVD; ++ } ++ ++ return vmio_type; ++} ++ ++int ++ssam_io_complete(uint16_t tid, struct ssam_io_response *resp) ++{ ++ struct vmio_response vmio_res; ++ struct virtio_response *virtio_res = NULL; ++ ++ if (resp == NULL) { ++ SPDK_ERRLOG("ssam io complete input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ memset(&vmio_res, 0x0, sizeof(vmio_res)); ++ vmio_res.glb_function_id = resp->gfunc_id; ++ vmio_res.iocb_id = resp->iocb_id; ++ vmio_res.type = ssam_io_type_to_vmio(resp->req->type); ++ ++ switch (resp->status) { ++ case SSAM_IO_STATUS_OK: ++ vmio_res.status = VMIO_STATUS_OK; ++ break; ++ case SSAM_IO_STATUS_EMPTY: ++ vmio_res.status = VMIO_STATUS_VQ_EMPTY; ++ break; ++ default: ++ vmio_res.status = VMIO_STATUS_ERROR; ++ break; ++ } ++ ++ vmio_res.req = (struct vmio_request *)(void *)resp->req; ++ vmio_res.flr_seq = resp->flr_seq; ++ ++ virtio_res = (struct virtio_response *)&vmio_res.virtio; ++ virtio_res->used_len = 0; /* virtio-blk insensitive of this value, set 0 */ ++ virtio_res->rsp_len = resp->data.rsp_len; ++ virtio_res->iovcnt = resp->data.iovcnt; ++ virtio_res->iovs = resp->data.iovs; ++ virtio_res->rsp = resp->data.rsp; ++ ++ return ssam_drv_vmio_complete(tid, &vmio_res); ++} ++ ++int ++ssam_vmio_rxq_create(uint16_t *queue_id_out) ++{ ++ if (queue_id_out == NULL) { ++ return -EINVAL; ++ } ++ return ssam_drv_vmio_rxq_create(queue_id_out); ++} ++ ++int ++ssam_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used) ++{ ++ return ssam_drv_update_virtio_device_used(glb_function_id, device_used); ++} ++ ++int ++ssam_virtio_blk_resize(uint16_t gfunc_id, uint64_t capacity) ++{ ++ return ssam_drv_update_virtio_blk_capacity(gfunc_id, capacity); ++} ++ ++int ++ssam_get_funcid_by_dbdf(uint32_t dbdf, uint16_t *gfunc_id) ++{ ++ if (gfunc_id == NULL) { ++ SPDK_ERRLOG("libssam input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ return ssam_drv_get_glb_function_id_by_dbdf(dbdf, gfunc_id); ++} ++ ++int ++ssam_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready) ++{ ++ if (ready == NULL) { ++ SPDK_ERRLOG("libssam input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ return ssam_drv_check_device_ready(role, proc_type, ready); ++} ++ ++int ++ssam_get_hot_upgrade_state(void) ++{ ++ return ssam_drv_get_hot_upgrade_state(); ++} ++ ++void ++ssam_hotplug_cfg(void) ++{ ++ ssam_drv_hotplug_cfg(); ++} ++ ++int ++ssam_hotplug_add(uint16_t port_id) ++{ ++ return ssam_drv_hotplug_add(port_id); ++} ++ ++int ++ssam_hotplug_del(uint16_t port_id) ++{ ++ return ssam_drv_hotplug_del(port_id); ++} ++ ++int ++ssam_hotplug_del_async(uint16_t port_id) ++{ ++ return ssam_drv_hotplug_del_async(port_id); ++} ++ ++bool ++ssam_hotplug_enable_check(void) ++{ ++ return ssam_drv_hotplug_enable_check(); ++} ++ ++int ++ssam_hotplug_del_async_check(uint16_t port_id) ++{ ++ return ssam_drv_hotplug_del_async_check(port_id); ++} ++ ++int ++ssam_virtio_blk_release_resource(uint16_t glb_function_id) ++{ ++ return ssam_drv_virtio_blk_release_resource(glb_function_id); ++} ++ ++int ++ssam_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num) ++{ ++ return ssam_drv_virtio_blk_alloc_resource(glb_function_id, queue_num); ++} ++ ++int ++ssam_virtio_vq_bind_core(uint16_t glb_function_id, uint16_t queue_num) ++{ ++ return ssam_drv_virtio_vq_bind_core(glb_function_id, queue_num); ++} ++ ++int ++ssam_virtio_vq_unbind_core(uint16_t glb_function_id) ++{ ++ return ssam_drv_virtio_vq_unbind_core(glb_function_id); ++} ++ ++static int ++ssam_try_mkdir(const char *dir, mode_t mode) ++{ ++ int rc; ++ ++ rc = mkdir(dir, mode); ++ if (rc < 0 && errno != EEXIST) { ++ SPDK_ERRLOG("ssam try mkdir error, dir: '%s': %s\n", dir, strerror(errno)); ++ return -errno; ++ } ++ return 0; ++} ++ ++int ++spdk_ssam_rc_preinit(void) ++{ ++ int rc; ++ ++ rc = ssam_try_mkdir(SSAM_DPAK_DIR, SSAM_CONFIG_DIR_PERMIT); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ rc = ssam_try_mkdir(SSAM_CFG_DIR, SSAM_CONFIG_DIR_PERMIT); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ if (access(SSAM_RECOVER_CFG_JSON, F_OK) != 0) { ++ return 0; ++ } ++ ++ return 1; ++} ++ ++char * ++ssam_rc_get_recover_json_file_path(void) ++{ ++ return (char *)SSAM_RECOVER_CFG_JSON; ++} ++ ++char * ++ssam_rc_get_param_json_file_path(void) ++{ ++ return (char *)SSAM_PARAM_CFG_JSON; ++} ++ ++__attribute__((constructor(SSAM_DRV_PRIORITY_LAST))) int ++ssam_construct(void) ++{ ++ int ret = ssam_drv_ops_init(); ++ if (ret != 0) { ++ SPDK_ERRLOG("ssam drv ops init failed"); ++ return -1; ++ } ++ ++ SPDK_NOTICELOG("ssam construct finish"); ++ return 0; ++} ++ ++__attribute__((destructor(SSAM_DRV_PRIORITY_LAST))) void ++ssam_destruct(void) ++{ ++ ssam_drv_ops_uninit(); ++} +diff --git a/lib/ssam/ssam_driver/ssam_driver_adapter.c b/lib/ssam/ssam_driver/ssam_driver_adapter.c +new file mode 100644 +index 0000000..a87b58a +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_driver_adapter.c +@@ -0,0 +1,626 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include ++#include "spdk/stdinc.h" ++#include "spdk/log.h" ++#include "ssam_driver_adapter.h" ++ ++#define SSAM_DRV_SHARD_LIBRARY "/usr/lib64/libhivio.so" ++#define SSAM_DRV_FUNC_NO_PTR (-1) ++#define SSAM_DRV_ADD_FUNC(class, name) {#name, (void**)&(class).name} ++#define SSAM_FUNC_PTR_OR_ERR_RET(func, retval) do { \ ++ if ((func) == NULL) \ ++ return retval; \ ++} while (0) ++ ++struct ssam_drv_ops_map { ++ char *name; ++ void **func; ++}; ++ ++static void *g_ssam_drv_handler = NULL; ++static struct ssam_drv_ops g_ssam_drv_ops = { 0 }; ++typedef void (*lib_dlsym_uninit_cb_t)(void); ++static lib_dlsym_uninit_cb_t g_lib_dlsym_uninit_cb = NULL; ++ ++static struct ssam_drv_ops_map g_ssam_drv_ops_map[] = { ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_host_dma_request), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_vmio_req_poll_batch), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_vmio_req_poll_batch_ext), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_lib_deinit), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_volume_umount), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_lib_init), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_volume_mount), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_host_dma_rsp_poll), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_get_glb_function_id_by_dbdf), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_send_action), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_update_virtio_blk_capacity), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_setup_function), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_check_device_ready), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_write_function_config), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_get_hot_upgrade_state), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_vmio_complete), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_hotplug_cfg), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_hotplug_add), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_hotplug_del), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_hotplug_del_async), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_hotplug_enable_check), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_hotplug_del_async_check), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_vmio_rxq_create), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_update_virtio_device_used), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_virtio_blk_alloc_resource), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_virtio_blk_release_resource), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_virtio_vq_bind_core), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_virtio_vq_unbind_core), ++}; ++ ++void ssam_lib_dlsym_uninit_cb_register(lib_dlsym_uninit_cb_t cb); ++ ++ ++struct ssam_drv_ops * ++ssam_get_drv_ops(void) ++{ ++ return &g_ssam_drv_ops; ++} ++ ++static void ++ssam_drv_ops_cb_uninit(void) ++{ ++ if (g_ssam_drv_handler != NULL) { ++ memset(&g_ssam_drv_ops, 0, sizeof(struct ssam_drv_ops)); ++ dlclose(g_ssam_drv_handler); ++ } ++} ++ ++static int ++ssam_drv_ops_init_sub(void *handler, struct ssam_drv_ops_map driver_map[], int size) ++{ ++ for (int index = 0; index < size; index++) { ++ if (*driver_map[index].func != NULL) { ++ continue; ++ } ++ ++ *driver_map[index].func = dlsym(handler, driver_map[index].name); ++ if (*driver_map[index].func == NULL) { ++ SPDK_ERRLOG("%s load func %s fail: %s", SSAM_DRV_SHARD_LIBRARY, driver_map[index].name, dlerror()); ++ return -1; ++ } ++ } ++ return 0; ++} ++ ++void ++ssam_lib_dlsym_uninit_cb_register(lib_dlsym_uninit_cb_t cb) ++{ ++ g_lib_dlsym_uninit_cb = cb; ++} ++ ++int ++ssam_drv_ops_init(void) ++{ ++ int ret = 0; ++ void *handler = dlopen(SSAM_DRV_SHARD_LIBRARY, RTLD_NOW); ++ if (handler == NULL) { ++ SPDK_ERRLOG("%s load err %s\n", SSAM_DRV_SHARD_LIBRARY, dlerror()); ++ return -1; ++ } ++ ++ ret = ssam_drv_ops_init_sub(handler, g_ssam_drv_ops_map, ++ sizeof(g_ssam_drv_ops_map) / sizeof(g_ssam_drv_ops_map[0])); ++ if (ret != 0) { ++ SPDK_ERRLOG("hwoff drv ops init: common api load failed"); ++ dlclose(handler); ++ return -1; ++ } ++ ++ g_ssam_drv_handler = handler; ++ ssam_lib_dlsym_uninit_cb_register(ssam_drv_ops_cb_uninit); ++ ++ return 0; ++} ++ ++void ++ssam_drv_ops_uninit(void) ++{ ++ if (g_lib_dlsym_uninit_cb != NULL) { ++ g_lib_dlsym_uninit_cb(); ++ g_lib_dlsym_uninit_cb = NULL; ++ } ++} ++ ++int ++ssam_drv_host_dma_request(uint16_t chnl_id, hvio_host_dma_req_s *req) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_host_dma_request, SSAM_DRV_FUNC_NO_PTR); ++ ret = ops->hvio_host_dma_request(chnl_id, req); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_vmio_req_poll_batch(uint16_t tid, uint16_t poll_num, struct vmio_request **req) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_vmio_req_poll_batch, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_vmio_req_poll_batch(tid, poll_num, req); ++ if (ret < 0) { ++ SPDK_ERRLOG("hvio_vmio_req_poll_batch exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return ret; ++} ++ ++int ++ssam_drv_vmio_req_poll_batch_ext(uint16_t tid, uint16_t poll_num, struct vmio_request **req, ++ hvio_vmio_req_poll_opt_s *poll_opt) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_vmio_req_poll_batch_ext, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_vmio_req_poll_batch_ext(tid, poll_num, req, poll_opt); ++ if (ret < 0) { ++ SPDK_ERRLOG("hvio_vmio_req_poll_batch_ext exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return ret; ++} ++ ++int ++ssam_drv_lib_deinit(void) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_lib_deinit, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_lib_deinit(); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_lib_deinit exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_volume_umount(uint16_t glb_function_id, uint32_t lun_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_volume_umount, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_volume_umount(glb_function_id, lun_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_volume_umount exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_lib_init(hvio_lib_args_s *args_in, hvio_hostep_info_s *eps_out) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_lib_init, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_lib_init(args_in, eps_out); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_lib_init exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_volume_mount(uint16_t glb_function_id, uint32_t lun_id, struct hvio_mount_para *hash_paras) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_volume_mount, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_volume_mount(glb_function_id, lun_id, hash_paras); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_volume_mount exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_host_dma_rsp_poll(uint16_t chnl_id, uint16_t poll_num, hvio_host_dma_rsp_s *rsp) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_host_dma_rsp_poll, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_host_dma_rsp_poll(chnl_id, poll_num, rsp); ++ if (ret < 0) { ++ SPDK_ERRLOG("hvio_host_dma_rsp_poll exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return ret; ++} ++ ++int ++ssam_drv_get_glb_function_id_by_dbdf(uint32_t dbdf, uint16_t *glb_function_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_get_glb_function_id_by_dbdf, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_get_glb_function_id_by_dbdf(dbdf, glb_function_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_get_glb_function_id_by_dbdf exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_send_action(uint16_t glb_function_id, enum function_action action, const void *data, ++ uint16_t data_len) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_send_action, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_send_action(glb_function_id, action, data, data_len); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_send_action exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_update_virtio_blk_capacity(uint16_t glb_function_id, uint64_t capacity) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_update_virtio_blk_capacity, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_update_virtio_blk_capacity(glb_function_id, capacity); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_update_virtio_blk_capacity exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_setup_function(uint16_t pf_id, uint16_t num_vf, enum device_type pf_type, ++ enum device_type vf_type) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_setup_function, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_setup_function(pf_id, num_vf, pf_type, vf_type); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_setup_function exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_check_device_ready, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_check_device_ready(role, proc_type, ready); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_check_device_ready exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_write_function_config(struct function_config *cfg) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_write_function_config, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_write_function_config(cfg); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_write_function_config exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_get_hot_upgrade_state(void) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_get_hot_upgrade_state, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_get_hot_upgrade_state(); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_get_hot_upgrade_state exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_vmio_complete(uint16_t tid, struct vmio_response *resp) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_vmio_complete, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_vmio_complete(tid, resp); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_hotplug_cfg(void) ++{ ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_hotplug_cfg, SSAM_DRV_FUNC_NO_PTR); ++ ++ ops->hvio_hotplug_cfg(); ++ return 0; ++} ++ ++int ++ssam_drv_hotplug_add(uint16_t port_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_hotplug_add, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_hotplug_add(port_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_hotplug_add exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_hotplug_del(uint16_t port_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_hotplug_del, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_hotplug_del(port_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_hotplug_del exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_hotplug_del_async(uint16_t port_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_hotplug_del_async, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_hotplug_del_async(port_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_hotplug_del_async exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++bool ++ssam_drv_hotplug_enable_check(void) ++{ ++ bool ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_hotplug_enable_check, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_hotplug_enable_check(); ++ if (ret == false) { ++ SPDK_ERRLOG("hvio_hotplug_enable_check exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return true; ++} ++ ++int ++ssam_drv_hotplug_del_async_check(uint16_t port_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_hotplug_del_async_check, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_hotplug_del_async_check(port_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_hotplug_del_async_check exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_vmio_rxq_create(uint16_t *queue_id_out) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_vmio_rxq_create, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_vmio_rxq_create(queue_id_out); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_update_virtio_device_used, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_update_virtio_device_used(glb_function_id, device_used); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_virtio_blk_release_resource(uint16_t glb_function_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_virtio_blk_release_resource, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_virtio_blk_release_resource(glb_function_id); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_virtio_blk_alloc_resource, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_virtio_blk_alloc_resource(glb_function_id, queue_num); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_virtio_vq_bind_core(uint16_t glb_function_id, uint16_t queue_num) ++{ ++ bool ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_virtio_vq_bind_core, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_virtio_vq_bind_core(glb_function_id, queue_num); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_virtio_vq_bind_core exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_virtio_vq_unbind_core(uint16_t glb_function_id) ++{ ++ bool ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_virtio_vq_unbind_core, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_virtio_vq_unbind_core(glb_function_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_virtio_vq_unbind_core exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} +diff --git a/lib/ssam/ssam_driver/ssam_driver_adapter.h b/lib/ssam/ssam_driver/ssam_driver_adapter.h +new file mode 100644 +index 0000000..ca95404 +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_driver_adapter.h +@@ -0,0 +1,81 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#ifndef SSAM_DRIVER_ADAPTER_H ++#define SSAM_DRIVER_ADAPTER_H ++ ++#include "hivio_api.h" ++ ++struct ssam_drv_ops { ++ int (*hvio_host_dma_request)(uint16_t chnl_id, hvio_host_dma_req_s *req); ++ int (*hvio_vmio_req_poll_batch)(uint16_t tid, uint16_t poll_num, struct vmio_request **req); ++ int (*hvio_vmio_req_poll_batch_ext)(uint16_t tid, uint16_t poll_num, struct vmio_request **req, ++ hvio_vmio_req_poll_opt_s *poll_opt); ++ int (*hvio_lib_deinit)(void); ++ int (*hvio_volume_umount)(uint16_t glb_function_id, uint32_t lun_id); ++ int (*hvio_lib_init)(hvio_lib_args_s *args_in, hvio_hostep_info_s *eps_out); ++ int (*hvio_volume_mount)(uint16_t glb_function_id, uint32_t lun_id, ++ struct hvio_mount_para *hash_paras); ++ int (*hvio_host_dma_rsp_poll)(uint16_t chnl_id, uint16_t poll_num, hvio_host_dma_rsp_s *rsp); ++ int (*hvio_get_glb_function_id_by_dbdf)(uint32_t dbdf, uint16_t *glb_function_id); ++ int (*hvio_send_action)(uint16_t glb_function_id, enum function_action action, const void *data, ++ uint16_t data_len); ++ int (*hvio_update_virtio_blk_capacity)(uint16_t glb_function_id, uint64_t capacity); ++ int (*hvio_setup_function)(uint16_t pf_id, uint16_t num_vf, enum device_type pf_type, ++ enum device_type vf_type); ++ int (*hvio_check_device_ready)(uint8_t role, uint32_t proc_type, uint8_t *ready); ++ int (*hvio_write_function_config)(struct function_config *cfg); ++ int (*hvio_get_hot_upgrade_state)(void); ++ int (*hvio_vmio_complete)(uint16_t tid, struct vmio_response *resp); ++ int (*hvio_vmio_rxq_create)(uint16_t *queue_id_out); ++ int (*hvio_update_virtio_device_used)(uint16_t glb_function_id, uint64_t device_used); ++ int (*hvio_virtio_blk_release_resource)(uint16_t glb_function_id); ++ int (*hvio_virtio_blk_alloc_resource)(uint16_t glb_function_id, uint16_t queue_num); ++ void (*hvio_hotplug_cfg)(void); ++ int (*hvio_hotplug_add)(uint16_t port_id); ++ int (*hvio_hotplug_del)(uint16_t port_id); ++ int (*hvio_hotplug_del_async)(uint16_t port_id); ++ bool (*hvio_hotplug_enable_check)(void); ++ int (*hvio_hotplug_del_async_check)(uint16_t port_id); ++ int (*hvio_virtio_vq_bind_core)(uint16_t glb_function_id, uint16_t queue_num); ++ int (*hvio_virtio_vq_unbind_core)(uint16_t glb_function_id); ++}; ++ ++int ssam_drv_ops_init(void); ++void ssam_drv_ops_uninit(void); ++struct ssam_drv_ops *ssam_get_drv_ops(void); ++int ssam_drv_host_dma_request(uint16_t chnl_id, hvio_host_dma_req_s *req); ++int ssam_drv_vmio_req_poll_batch(uint16_t tid, uint16_t poll_num, struct vmio_request **req); ++int ssam_drv_vmio_req_poll_batch_ext(uint16_t tid, uint16_t poll_num, struct vmio_request **req, ++ hvio_vmio_req_poll_opt_s *poll_opt); ++int ssam_drv_lib_deinit(void); ++int ssam_drv_volume_umount(uint16_t glb_function_id, uint32_t lun_id); ++int ssam_drv_lib_init(hvio_lib_args_s *args_in, hvio_hostep_info_s *eps_out); ++int ssam_drv_volume_mount(uint16_t glb_function_id, uint32_t lun_id, ++ struct hvio_mount_para *hash_paras); ++int ssam_drv_host_dma_rsp_poll(uint16_t chnl_id, uint16_t poll_num, hvio_host_dma_rsp_s *rsp); ++int ssam_drv_get_glb_function_id_by_dbdf(uint32_t dbdf, uint16_t *glb_function_id); ++int ssam_drv_send_action(uint16_t glb_function_id, enum function_action action, const void *data, ++ uint16_t data_len); ++int ssam_drv_update_virtio_blk_capacity(uint16_t glb_function_id, uint64_t capacity); ++int ssam_drv_setup_function(uint16_t pf_id, uint16_t num_vf, enum device_type pf_type, ++ enum device_type vf_type); ++int ssam_drv_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready); ++int ssam_drv_write_function_config(struct function_config *cfg); ++int ssam_drv_get_hot_upgrade_state(void); ++int ssam_drv_vmio_complete(uint16_t tid, struct vmio_response *resp); ++int ssam_drv_vmio_rxq_create(uint16_t *queue_id_out); ++int ssam_drv_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used); ++int ssam_drv_virtio_blk_release_resource(uint16_t glb_function_id); ++int ssam_drv_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num); ++int ssam_drv_hotplug_cfg(void); ++int ssam_drv_hotplug_add(uint16_t port_id); ++int ssam_drv_hotplug_del(uint16_t port_id); ++int ssam_drv_hotplug_del_async(uint16_t port_id); ++bool ssam_drv_hotplug_enable_check(void); ++int ssam_drv_hotplug_del_async_check(uint16_t port_id); ++int ssam_drv_virtio_vq_bind_core(uint16_t glb_function_id, uint16_t queue_num); ++int ssam_drv_virtio_vq_unbind_core(uint16_t glb_function_id); ++#endif +diff --git a/lib/ssam/ssam_driver/ssam_mempool.c b/lib/ssam/ssam_driver/ssam_mempool.c +new file mode 100644 +index 0000000..c57f6dd +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_mempool.c +@@ -0,0 +1,774 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "spdk/stdinc.h" ++ ++#include "spdk/log.h" ++#include "spdk/env.h" ++#include "dpak_ssam.h" ++ ++#define MP_CK_HEADER_LEN sizeof(struct ssam_mp_chunk) ++#define MP_CK_END_LEN sizeof(struct ssam_mp_chunk*) ++#define MP_CK_CB_LEN (MP_CK_HEADER_LEN + MP_CK_END_LEN) ++ ++#define SHIFT_2MB 21 /* (1 << 21) == 2MB */ ++#define VALUE_2MB (1ULL << SHIFT_2MB) ++#define SHIFT_1GB 30 /* (1 << 30) == 1G */ ++#define VALUE_1GB (1ULL << SHIFT_1GB) ++#define SSAM_SPDK_VTOPHYS_ERROR (0xFFFFFFFFFFFFFFFFULL) ++#define SSAM_DMA_MEM_MAGIC (0xBABEFACEBABEFACE) ++ ++struct ssam_mp_dma_mem { ++ uint64_t magic; ++ uint64_t size; ++ char mem[0]; ++}; ++ ++struct ssam_mp_chunk { ++ struct ssam_mp_chunk *prev; ++ struct ssam_mp_chunk *next; ++ ++ /* Total size of the memory pool chunk, the chunk is in the memory block */ ++ uint64_t size; ++ ++ /* The chunk is free when true or in use when false */ ++ bool is_free; ++}; ++ ++struct ssam_mp_block { ++ struct ssam_mp_chunk *free_list; ++ struct ssam_mp_chunk *alloc_list; ++ struct ssam_mp_block *next; ++ ++ /* The memory pool block's start virtual address */ ++ char *virt_start; ++ ++ /* The memory pool block's start physical address */ ++ char *phys_start; ++ ++ /* Total size of the memory pool block */ ++ uint64_t size; ++ ++ /* Total size of the memory pool block that be allocated */ ++ uint64_t alloc_size; ++ ++ /* Total size of the memory pool block be allocated that program can be use */ ++ uint64_t alloc_prog_size; ++}; ++ ++struct ssam_mempool { ++ /* Total size of the memory pool */ ++ uint64_t size; ++ uint64_t extra_size; ++ uint64_t extra_size_limit; ++ struct ssam_mp_block *blk_list; ++ ++ /* The memory pool's start virtual address */ ++ char *virt; ++ pthread_mutex_t lock; ++}; ++ ++ ++static uint64_t ++ssam_mp_align_up(uint64_t size) ++{ ++ /* Aligin to sizeof long */ ++ return (size + sizeof(long) - 1) & (~(sizeof(long) - 1)); ++} ++ ++static inline void ++ssam_mp_lock(struct ssam_mempool *mp) ++{ ++ pthread_mutex_lock(&mp->lock); ++} ++ ++static inline void ++ssam_mp_unlock(struct ssam_mempool *mp) ++{ ++ pthread_mutex_unlock(&mp->lock); ++} ++ ++static void ++ssam_mp_init_block(struct ssam_mp_block *blk, uint64_t size) ++{ ++ blk->size = size; ++ blk->alloc_size = 0; ++ blk->alloc_prog_size = 0; ++ blk->free_list = (struct ssam_mp_chunk *)blk->virt_start; ++ blk->free_list->is_free = true; ++ blk->free_list->size = size; ++ blk->free_list->prev = NULL; ++ blk->free_list->next = NULL; ++ blk->alloc_list = NULL; ++} ++ ++static inline void ++ssam_mp_list_insert(struct ssam_mp_chunk **head, struct ssam_mp_chunk *ck) ++{ ++ struct ssam_mp_chunk *hd = *head; ++ ++ ck->prev = NULL; ++ ck->next = hd; ++ if (hd != NULL) { ++ hd->prev = ck; ++ } ++ *head = ck; ++} ++ ++static void ++ssam_mp_list_delete(struct ssam_mp_chunk **head, struct ssam_mp_chunk *ck) ++{ ++ if (ck->prev == NULL) { ++ *head = ck->next; ++ if (ck->next != NULL) { ++ ck->next->prev = NULL; ++ } ++ } else { ++ ck->prev->next = ck->next; ++ if (ck->next != NULL) { ++ ck->next->prev = ck->prev; ++ } ++ } ++} ++ ++static struct ++ ssam_mp_block * ++ssam_mp_find_block(struct ssam_mempool *mp, void *p) ++{ ++ struct ssam_mp_block *blk = mp->blk_list; ++ ++ while (blk != NULL) { ++ if ((blk->virt_start <= (char *)p) && ++ ((blk->virt_start + blk->size) > (char *)p)) { ++ break; ++ } ++ blk = blk->next; ++ } ++ ++ return blk; ++} ++ ++static void ++ssam_mp_merge_chunk(struct ssam_mp_block *blk, struct ssam_mp_chunk *ck) ++{ ++ struct ssam_mp_chunk *free_mem = ck; ++ struct ssam_mp_chunk *next = ck; ++ ++ /* Traversal free memory backward */ ++ while (next->is_free) { ++ free_mem = next; ++ if (((char *)next - MP_CK_CB_LEN) <= blk->virt_start) { ++ break; ++ } ++ next = *(struct ssam_mp_chunk **)((char *)next - MP_CK_END_LEN); ++ } ++ ++ /* Traverse free memory forward */ ++ next = (struct ssam_mp_chunk *)((char *)free_mem + free_mem->size); ++ while (((char *)next <= blk->virt_start + blk->size - MP_CK_HEADER_LEN) && next->is_free) { ++ ssam_mp_list_delete(&blk->free_list, next); ++ free_mem->size += next->size; ++ next = (struct ssam_mp_chunk *)((char *)next + next->size); ++ } ++ ++ /* Merge free memory */ ++ *(struct ssam_mp_chunk **)((char *)free_mem + free_mem->size - MP_CK_END_LEN) = free_mem; ++ ++ return; ++} ++ ++static int ++ssam_mp_get_mem_block(uint64_t start_virt_addr, uint64_t len, uint64_t *phys_addr, ++ uint64_t *blk_size) ++{ ++ uint64_t virt0, virt1, phys0, phys1; ++ uint64_t phys_len; ++ ++ if ((len % VALUE_2MB) != 0) { ++ SPDK_ERRLOG("Memory len %lu not align to %llu\n", len, VALUE_2MB); ++ return -EINVAL; ++ } ++ ++ virt0 = start_virt_addr; ++ virt1 = start_virt_addr; ++ phys0 = spdk_vtophys((void *)virt0, NULL); ++ if (phys0 == SSAM_SPDK_VTOPHYS_ERROR) { ++ SPDK_ERRLOG("Error translating virt0 address %lu\n", virt0); ++ return -EINVAL; ++ } ++ ++ /* ++ * Find a piece of memory with consecutive physical address, ++ * the memory got by spdk_dma_malloc is aligned by VALUE_2MB, ++ * this ensures that the physical addresses are consecutive ++ * within the VALUE_2MB length range. ++ */ ++ for (phys_len = VALUE_2MB; phys_len < len; phys_len += VALUE_2MB) { ++ virt1 += VALUE_2MB; ++ phys1 = spdk_vtophys((void *)virt1, NULL); ++ if (phys1 == SSAM_SPDK_VTOPHYS_ERROR) { ++ SPDK_ERRLOG("Error translating virt1 address %lu\n", virt1); ++ break; ++ } ++ if ((long)(phys1 - phys0) != (long)(virt1 - virt0)) { ++ SPDK_DEBUGLOG(ssam_mempool, "End of consecutive physical addresses\n"); ++ break; ++ } ++ } ++ ++ *phys_addr = spdk_vtophys((void *)virt0, NULL); ++ *blk_size = phys_len; ++ ++ return 0; ++} ++ ++static void ++ssam_mp_free_blk_heads(struct ssam_mp_block *blk) ++{ ++ struct ssam_mp_block *blk_head = blk; ++ struct ssam_mp_block *l_mp = NULL; ++ ++ while (blk_head != NULL) { ++ l_mp = blk_head; ++ blk_head = blk_head->next; ++ free(l_mp); ++ l_mp = NULL; ++ } ++} ++ ++static int ++ssam_mp_insert_blocks(struct ssam_mempool *mp, uint64_t size) ++{ ++ struct ssam_mp_block *blk_head = NULL; ++ uint64_t blk_size = 0; ++ uint64_t remain_size = size; ++ uint64_t phys = 0; ++ char *virt_addr = mp->virt; ++ int rc; ++ ++ /* Find memory blocks and insert them to memory pool list */ ++ while (remain_size > 0) { ++ rc = ssam_mp_get_mem_block((uint64_t)virt_addr, remain_size, &phys, &blk_size); ++ if (rc != 0) { ++ ssam_mp_free_blk_heads(mp->blk_list); ++ return -ENOMEM; ++ } ++ blk_head = (struct ssam_mp_block *)malloc(sizeof(struct ssam_mp_block)); ++ if (blk_head == NULL) { ++ SPDK_ERRLOG("mempool block head malloc failed, mempool create failed\n"); ++ ssam_mp_free_blk_heads(mp->blk_list); ++ return -ENOMEM; ++ } ++ blk_head->virt_start = virt_addr; ++ blk_head->phys_start = (char *)phys; ++ ssam_mp_init_block(blk_head, blk_size); ++ blk_head->next = mp->blk_list; ++ mp->blk_list = blk_head; ++ mp->size += blk_size; ++ virt_addr += blk_size; ++ remain_size -= blk_size; ++ } ++ ++ if (mp->size != size) { ++ SPDK_ERRLOG("mempool size lost, mempool create failed\n"); ++ ssam_mp_free_blk_heads(mp->blk_list); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_check_mempool_size(uint64_t size, uint64_t extra_size_limit) ++{ ++ if (size == 0) { ++ SPDK_ERRLOG("Memory pool size can not be %lu, mempool create failed\n", size); ++ return -EINVAL; ++ } ++ ++ if (size < VALUE_2MB) { ++ SPDK_ERRLOG("Memory pool size can not less than %llu, actually %lu, mempool create failed\n", ++ VALUE_2MB, size); ++ return -EINVAL; ++ } ++ ++ if (extra_size_limit > VALUE_1GB) { ++ SPDK_ERRLOG("Memory pool extra size can not greater than %llu, actually %lu, mempool create failed\n", ++ VALUE_1GB, extra_size_limit); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++ssam_mempool_t * ++ssam_mempool_create(uint64_t size, uint64_t extra_size_limit) ++{ ++ struct ssam_mempool *mp = NULL; ++ uint64_t mp_size = size; ++ uint64_t mp_extra_size_limit = extra_size_limit; ++ void *virt = NULL; ++ int rc; ++ ++ rc = ssam_check_mempool_size(mp_size, mp_extra_size_limit); ++ if (rc != 0) { ++ return NULL; ++ } ++ ++ if ((mp_size % VALUE_2MB) != 0) { ++ SPDK_NOTICELOG("Memory pool size %lu not align to %llu, Align down memory pool size to %llu\n", ++ mp_size, ++ VALUE_2MB, mp_size & ~(VALUE_2MB - 1)); ++ mp_size = mp_size & ~(VALUE_2MB - 1); ++ } ++ ++ if ((mp_extra_size_limit % VALUE_2MB) != 0) { ++ SPDK_NOTICELOG("Memory pool extra size %lu not align to %llu, Align down memory pool size to %llu\n", ++ mp_extra_size_limit, VALUE_2MB, mp_extra_size_limit & ~(VALUE_2MB - 1)); ++ mp_extra_size_limit = mp_extra_size_limit & ~(VALUE_2MB - 1); ++ } ++ ++ mp = (struct ssam_mempool *)calloc(1, sizeof(struct ssam_mempool)); ++ if (mp == NULL) { ++ SPDK_ERRLOG("mempool head malloc failed, mempool create failed\n"); ++ return NULL; ++ } ++ ++ virt = spdk_dma_malloc(mp_size, VALUE_2MB, NULL); ++ if (virt == NULL) { ++ SPDK_ERRLOG("spdk_dma_malloc failed, mempool create failed\n"); ++ free(mp); ++ mp = NULL; ++ return NULL; ++ } ++ mp->virt = (char *)virt; ++ ++ rc = ssam_mp_insert_blocks(mp, mp_size); ++ if (rc != 0) { ++ free(mp); ++ mp = NULL; ++ spdk_dma_free(virt); ++ return NULL; ++ } ++ ++ mp->extra_size = 0; ++ mp->extra_size_limit = mp_extra_size_limit; ++ pthread_mutex_init(&mp->lock, NULL); ++ ++ return (ssam_mempool_t *)mp; ++} ++ ++static void ++ssam_mp_split_block(struct ssam_mp_block *blk, struct ssam_mp_chunk *free_mem, ++ struct ssam_mp_chunk *allocated, uint64_t size) ++{ ++ *free_mem = *allocated; ++ free_mem->size -= size; ++ *(struct ssam_mp_chunk **)((char *)free_mem + free_mem->size - MP_CK_END_LEN) = free_mem; ++ ++ if (free_mem->prev == NULL) { ++ blk->free_list = free_mem; ++ } else { ++ free_mem->prev->next = free_mem; ++ } ++ ++ if (free_mem->next != NULL) { ++ free_mem->next->prev = free_mem; ++ } ++ ++ allocated->is_free = false; ++ allocated->size = size; ++ ++ *(struct ssam_mp_chunk **)((char *)allocated + size - MP_CK_END_LEN) = allocated; ++} ++ ++static void * ++ssam_mp_alloc_mem_from_block(struct ssam_mp_block *blk, uint64_t size, ++ uint64_t *phys_addr) ++{ ++ struct ssam_mp_chunk *free_mem = NULL; ++ struct ssam_mp_chunk *allocated = NULL; ++ char *alloc = NULL; ++ ++ free_mem = blk->free_list; ++ while (free_mem != NULL) { ++ if (free_mem->size < size) { ++ free_mem = free_mem->next; ++ continue; ++ } ++ ++ allocated = free_mem; ++ if ((free_mem->size - size) > MP_CK_CB_LEN) { ++ /* If enough mem in free chunk, split it */ ++ free_mem = (struct ssam_mp_chunk *)((char *)allocated + size); ++ ssam_mp_split_block(blk, free_mem, allocated, size); ++ } else { ++ /* If no enough mem in free chunk, all will be allocated */ ++ ssam_mp_list_delete(&blk->free_list, allocated); ++ allocated->is_free = false; ++ } ++ ssam_mp_list_insert(&blk->alloc_list, allocated); ++ ++ blk->alloc_size += allocated->size; ++ blk->alloc_prog_size += allocated->size - (uint64_t)MP_CK_CB_LEN; ++ alloc = (char *)allocated + MP_CK_HEADER_LEN; ++ if (phys_addr != NULL) { ++ *phys_addr = (uint64_t)blk->phys_start + (uint64_t)(alloc - blk->virt_start); ++ } ++ ++ return (void *)alloc; ++ } ++ ++ return NULL; ++} ++ ++static bool ++ssam_mp_check_consecutive_mem(void *start_addr, uint64_t len) ++{ ++ uint64_t phys_start; ++ uint64_t phys_end; ++ ++ phys_start = spdk_vtophys(start_addr, NULL); ++ phys_end = spdk_vtophys((void *)((uint64_t)start_addr + len - 1), NULL); ++ if ((phys_end - phys_start) == (len - 1)) { ++ return true; ++ } ++ ++ return false; ++} ++ ++/* alloc dma memory from hugepage directly */ ++static void * ++ssam_mp_dma_alloc(struct ssam_mempool *mp, uint64_t size, uint64_t *phys) ++{ ++ struct ssam_mp_dma_mem *alloc; ++ size_t len = size + sizeof(struct ssam_mp_dma_mem); ++ uint64_t phys_addr = 0; ++ ++ if (mp->extra_size + len > mp->extra_size_limit) { ++ SPDK_INFOLOG(ssam_mempool, "spdk_dma_malloc alloc failed, extra_size(%lu) size(%zu) limit(%lu).\n", ++ mp->extra_size, len, mp->extra_size_limit); ++ return NULL; ++ } ++ ++ alloc = (struct ssam_mp_dma_mem *)spdk_dma_malloc(len, 0, NULL); ++ if (alloc == NULL) { ++ SPDK_INFOLOG(ssam_mempool, "spdk_dma_malloc alloc failed, len %zu.\n", len); ++ return NULL; ++ } ++ if (!ssam_mp_check_consecutive_mem((void *)alloc->mem, size)) { ++ SPDK_ERRLOG("spdk_dma_malloc alloc failed, no consecutive mem, len %lu.\n", size); ++ spdk_dma_free(alloc); ++ return NULL; ++ } ++ phys_addr = spdk_vtophys((const void *)alloc->mem, NULL); ++ if (phys_addr == SSAM_SPDK_VTOPHYS_ERROR) { ++ SPDK_ERRLOG("Error translating spdk_dma_malloc address %lu\n", phys_addr); ++ spdk_dma_free(alloc); ++ return NULL; ++ } ++ *phys = phys_addr; ++ alloc->magic = SSAM_DMA_MEM_MAGIC; ++ alloc->size = len; ++ mp->extra_size += len; ++ ++ return (void *)alloc->mem; ++} ++ ++static void ++ssam_mp_dma_free(struct ssam_mempool *mp, const void *ptr) ++{ ++ struct ssam_mp_dma_mem *free_mem; ++ uint64_t addr = (uint64_t)ptr; ++ ++ if (addr <= sizeof(struct ssam_mp_dma_mem)) { ++ SPDK_ERRLOG("ssam_mp_dma_free mem address err\n"); ++ return; ++ } ++ ++ free_mem = (struct ssam_mp_dma_mem *)(addr - sizeof(struct ssam_mp_dma_mem)); ++ if (free_mem->magic == SSAM_DMA_MEM_MAGIC) { ++ mp->extra_size -= free_mem->size; ++ spdk_dma_free(free_mem); ++ } else { ++ SPDK_ERRLOG("ssam_mp_dma_free magic err, magic is %lx\n", free_mem->magic); ++ } ++ return; ++} ++ ++static void * ++ssam_mp_alloc_mem_from_blocks(struct ssam_mempool *mp, uint64_t size, ++ uint64_t *phys_addr) ++{ ++ struct ssam_mp_block *blk = mp->blk_list; ++ void *alloc = NULL; ++ ++ while (blk != NULL) { ++ if (size > (blk->size - blk->alloc_size)) { ++ blk = blk->next; ++ continue; ++ } ++ ++ alloc = ssam_mp_alloc_mem_from_block(blk, size, phys_addr); ++ if (alloc != NULL) { ++ return alloc; ++ } ++ ++ blk = blk->next; ++ } ++ SPDK_INFOLOG(ssam_mempool, "ssam mempool no enough memory, alloc size %lu\n", size); ++ alloc = ssam_mp_dma_alloc(mp, size, phys_addr); ++ ++ return alloc; ++} ++ ++void * ++ssam_mempool_alloc(ssam_mempool_t *mp, uint64_t size, uint64_t *phys_addr) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ void *alloc = NULL; ++ uint64_t need_size; ++ ++ if (phys_addr == NULL) { ++ SPDK_ERRLOG("alloc phys_addr pointer is NULL\n"); ++ return NULL; ++ } ++ ++ if (l_mp == NULL) { ++ SPDK_ERRLOG("alloc mp pointer is NULL\n"); ++ return NULL; ++ } ++ ++ if (size == 0) { ++ SPDK_ERRLOG("Memory pool size can not be %lu, mempool alloc failed\n", size); ++ return NULL; ++ } ++ ++ need_size = ssam_mp_align_up(size + MP_CK_CB_LEN); ++ ++ ssam_mp_lock(l_mp); ++ if (need_size > l_mp->size) { ++ SPDK_INFOLOG(ssam_mempool, "No enough memory in mempool, need %lu, actually %lu\n", ++ need_size, l_mp->size); ++ alloc = ssam_mp_dma_alloc(l_mp, size, phys_addr); ++ ssam_mp_unlock(l_mp); ++ return alloc; ++ } ++ ++ alloc = ssam_mp_alloc_mem_from_blocks(l_mp, need_size, phys_addr); ++ ++ ssam_mp_unlock(l_mp); ++ ++ return alloc; ++} ++ ++void ++ssam_mempool_free(ssam_mempool_t *mp, void *ptr) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ struct ssam_mp_chunk *ck = NULL; ++ ++ if (l_mp == NULL) { ++ SPDK_ERRLOG("free mp pointer is NULL\n"); ++ return; ++ } ++ ++ if (ptr == NULL) { ++ SPDK_ERRLOG("free ptr pointer is NULL\n"); ++ return; ++ } ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = ssam_mp_find_block(l_mp, ptr); ++ if (blk == NULL) { ++ ssam_mp_dma_free(l_mp, ptr); ++ ssam_mp_unlock(l_mp); ++ return; ++ } ++ ++ ck = (struct ssam_mp_chunk *)((char *)ptr - MP_CK_HEADER_LEN); ++ ++ ssam_mp_list_delete(&blk->alloc_list, ck); ++ ssam_mp_list_insert(&blk->free_list, ck); ++ ck->is_free = true; ++ ++ blk->alloc_size -= ck->size; ++ blk->alloc_prog_size -= ck->size - (uint64_t)MP_CK_CB_LEN; ++ ++ ssam_mp_merge_chunk(blk, ck); ++ ++ ssam_mp_unlock(l_mp); ++ ++ return; ++} ++ ++void ++ssam_mempool_destroy(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ ++ if (l_mp == NULL) { ++ SPDK_ERRLOG("destroy mp pointer is NULL\n"); ++ return; ++ } ++ ++ if (l_mp->virt == NULL) { ++ SPDK_ERRLOG("destroy mp->virt pointer is NULL\n"); ++ return; ++ } ++ ++ ssam_mp_lock(l_mp); ++ ssam_mp_free_blk_heads(l_mp->blk_list); ++ spdk_dma_free(l_mp->virt); ++ ssam_mp_unlock(l_mp); ++ pthread_mutex_destroy(&l_mp->lock); ++ free(l_mp); ++ l_mp = NULL; ++ ++ return; ++} ++ ++static uint64_t ++ssam_mp_total_memory(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ uint64_t size; ++ ++ ssam_mp_lock(l_mp); ++ size = l_mp->size; ++ ssam_mp_unlock(l_mp); ++ ++ return size; ++} ++ ++static uint64_t ++ssam_mp_total_used_memory(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ uint64_t total = 0; ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = l_mp->blk_list; ++ while (blk != NULL) { ++ total += blk->alloc_size; ++ blk = blk->next; ++ } ++ ++ ssam_mp_unlock(l_mp); ++ ++ return total; ++} ++ ++static uint32_t ++ssam_mp_alloc_num(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ struct ssam_mp_chunk *alloc = NULL; ++ uint32_t total = 0; ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = l_mp->blk_list; ++ while (blk != NULL) { ++ alloc = blk->alloc_list; ++ while (alloc != NULL) { ++ if (total == UINT32_MAX) { ++ SPDK_ERRLOG("mp alloc num out of bound\n"); ++ ssam_mp_unlock(l_mp); ++ return total; ++ } ++ total++; ++ alloc = alloc->next; ++ } ++ blk = blk->next; ++ } ++ ++ ssam_mp_unlock(l_mp); ++ ++ return total; ++} ++ ++static uint32_t ++ssam_mp_free_num(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ struct ssam_mp_chunk *free_mem = NULL; ++ uint32_t total = 0; ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = l_mp->blk_list; ++ while (blk != NULL) { ++ free_mem = blk->free_list; ++ while (free_mem != NULL) { ++ if (total == UINT32_MAX) { ++ SPDK_ERRLOG("mp free num out of bound\n"); ++ ssam_mp_unlock(l_mp); ++ return total; ++ } ++ total++; ++ free_mem = free_mem->next; ++ } ++ blk = blk->next; ++ } ++ ++ ssam_mp_unlock(l_mp); ++ ++ return total; ++} ++ ++static uint64_t ++ssam_mp_get_greatest_free_size(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ struct ssam_mp_chunk *free_mem = NULL; ++ uint64_t max_size = 0; ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = l_mp->blk_list; ++ while (blk != NULL) { ++ free_mem = blk->free_list; ++ while (free_mem != NULL) { ++ if (max_size < free_mem->size) { ++ max_size = free_mem->size; ++ } ++ free_mem = free_mem->next; ++ } ++ blk = blk->next; ++ } ++ ++ ssam_mp_unlock(l_mp); ++ ++ return max_size; ++} ++ ++int ++ssam_get_mempool_info(ssam_mempool_t *mp, struct memory_info_stats *info) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ ++ if (l_mp == NULL || info == NULL) { ++ SPDK_ERRLOG("ssam get mempool info mp or info pointer is NULL\n"); ++ return -EINVAL; ++ } ++ ++ info->total_size = ssam_mp_total_memory(l_mp); ++ info->used_size = ssam_mp_total_used_memory(l_mp); ++ info->free_size = info->total_size - info->used_size; ++ info->greatest_free_size = ssam_mp_get_greatest_free_size(l_mp); ++ info->alloc_count = ssam_mp_alloc_num(l_mp); ++ info->free_count = ssam_mp_free_num(l_mp); ++ ++ return 0; ++} ++SPDK_LOG_REGISTER_COMPONENT(ssam_mempool) +diff --git a/lib/ssam/ssam_internal.h b/lib/ssam/ssam_internal.h +new file mode 100644 +index 0000000..bbcd047 +--- /dev/null ++++ b/lib/ssam/ssam_internal.h +@@ -0,0 +1,520 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#ifndef SSAM_INTERNAL_H ++#define SSAM_INTERNAL_H ++ ++#include "stdint.h" ++ ++#include ++#include "ssam_driver/dpak_ssam.h" ++ ++#include "spdk_internal/thread.h" ++#include "spdk/log.h" ++#include "spdk/util.h" ++#include "spdk/rpc.h" ++#include "spdk/bdev.h" ++#include "spdk/ssam.h" ++#include "ssam_config.h" ++ ++#define SPDK_SSAM_FEATURES ((1ULL << VHOST_F_LOG_ALL) | \ ++ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \ ++ (1ULL << VIRTIO_F_VERSION_1) | \ ++ (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \ ++ (1ULL << VIRTIO_RING_F_EVENT_IDX) | \ ++ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \ ++ (1ULL << VIRTIO_F_RING_PACKED)) ++ ++#define VIRITO_DEFAULT_QUEUE_SIZE 256 ++ ++#define SPDK_SSAM_VQ_MAX_SUBMISSIONS 16 ++#define SPDK_SSAM_MAX_VQUEUES 32 ++#define SPDK_SSAM_MAX_VQ_SIZE 256 ++#define SPDK_SSAM_VF_DEFAULTE_VQUEUES 1 ++#define SPDK_SSAM_BLK_MAX_VQ_SIZE 32 ++#define SSAM_JSON_DEFAULT_QUEUES_NUM 16 ++ ++/* ssam not support config vq size so far */ ++#define SPDK_SSAM_DEFAULT_VQ_SIZE SPDK_SSAM_MAX_VQ_SIZE ++#define SPDK_SSAM_DEFAULT_VQUEUES 16 ++#define SPDK_SSAM_IOVS_MAX 32 ++#define SPDK_SSAM_MAX_SEG_SIZE (32 * 1024) ++ ++#define SPDK_INVALID_GFUNC_ID UINT16_MAX ++#define SPDK_INVALID_CORE_ID UINT16_MAX ++#define SPDK_INVALID_VQUEUE_NUM UINT16_MAX ++#define SPDK_INVALID_ID UINT16_MAX ++ ++#define SSAM_PF_MAX_NUM 32 ++#define SPDK_SSAM_SCSI_CTRLR_MAX_DEVS 255 ++#define SSAM_VIRTIO_SCSI_LUN_ID 0x400001 ++#define SPDK_SSAM_SCSI_DEFAULT_VQUEUES 128 ++#define SSAM_MAX_SESSION_PER_DEV UINT16_MAX ++#define SSAM_DEFAULT_MEMPOOL_EXTRA_SIZE 0 ++#define SSAM_MAX_CORE_NUM 16 ++#define SSAM_MAX_CORE_NUM_WITH_LARGE_IO 10 ++ ++#define SPDK_LIMIT_LOG_MAX_INTERNEL_IN_MS 3000 ++#define SPDK_CONVERT_MS_TO_US 1000 ++ ++#define SPDK_SSAM_VIRTIO_BLK_DEFAULT_FEATURE 0x3f11001046 ++#define SPDK_SSAM_VIRTIO_SCSI_DEFAULT_FEATURE 0x3f11000007 ++ ++enum spdk_ssam_iostat_mode { ++ SSAM_IOSTAT_NORMAL, ++ SSAM_IOSTAT_SUM, ++ SSAM_IOSTAT_DUMP_VQ, ++ SSAM_IOSTAT_SPARSE, ++}; ++ ++typedef void (*spdk_ssam_session_io_wait_cb)(void *cb_arg); ++ ++struct spdk_ssam_session_io_wait { ++ spdk_ssam_session_io_wait_cb cb_fn; ++ void *cb_arg; ++ TAILQ_ENTRY(spdk_ssam_session_io_wait) link; ++}; ++ ++typedef void (*spdk_ssam_session_io_wait_r_cb)(void *cb_arg); ++ ++struct spdk_ssam_session_io_wait_r { ++ spdk_ssam_session_io_wait_r_cb cb_fn; ++ void *cb_arg; ++ TAILQ_ENTRY(spdk_ssam_session_io_wait_r) link; ++}; ++ ++struct spdk_ssam_virtqueue { ++ void *tasks; ++ struct spdk_ssam_session *smsession; ++ uint32_t *index; ++ int num; ++ int use_num; ++ int index_l; ++ int index_r; ++}; ++ ++struct spdk_ssam_show_iostat_args { ++ /* vq_idx for blk; tgt_id for scsi */ ++ uint32_t id; ++ enum spdk_ssam_iostat_mode mode; ++}; ++ ++struct spdk_ssam_session_backend { ++ enum virtio_type type; ++ int (*remove_session)(struct spdk_ssam_session *smsession); ++ void (*remove_self)(struct spdk_ssam_session *smsession); ++ void (*request_worker)(struct spdk_ssam_session *smsession, void *arg); ++ void (*destroy_bdev_device)(struct spdk_ssam_session *smsession, void *args); ++ void (*response_worker)(struct spdk_ssam_session *smsession, void *arg); ++ void (*no_data_req_worker)(struct spdk_ssam_session *smsession); ++ ++ int (*ssam_get_config)(struct spdk_ssam_session *smsession, ++ uint8_t *config, uint32_t len, uint16_t queues); ++ int (*ssam_set_config)(struct spdk_ssam_session *smsession, ++ uint8_t *config, uint32_t offset, uint32_t size, uint32_t flags); ++ ++ void (*print_stuck_io_info)(struct spdk_ssam_session *smsession); ++ ++ void (*dump_info_json)(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++ void (*write_config_json)(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++ void (*show_iostat_json)(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_show_iostat_args *args, ++ struct spdk_json_write_ctx *w); ++ void (*clear_iostat_json)(struct spdk_ssam_session *smsession); ++ struct spdk_bdev *(*get_bdev)(struct spdk_ssam_session *smsession, uint32_t id); ++}; ++ ++struct spdk_ssam_session { ++ /* Unique session name, format as ssam.tid.gfunc_id. */ ++ char *name; ++ ++ struct spdk_ssam_dev *smdev; ++ ++ /* Session poller thread, same as ssam dev poller thread */ ++ struct spdk_thread *thread; ++ struct ssam_mempool *mp; ++ const struct spdk_ssam_session_backend *backend; ++ spdk_ssam_session_rsp_fn rsp_fn; ++ void *rsp_ctx; ++ struct spdk_ssam_virtqueue virtqueue[SPDK_SSAM_MAX_VQUEUES]; ++ ++ /* Number of processing tasks, can not remove session when task_cnt > 0 */ ++ int task_cnt; ++ ++ /* Number of pending asynchronous operations */ ++ uint32_t pending_async_op_num; ++ ++ /* ssam global virtual function id */ ++ uint16_t gfunc_id; ++ ++ /* Depth of virtio-blk virtqueue */ ++ uint16_t queue_size; ++ ++ /* Number of virtio-blk virtqueue */ ++ uint16_t max_queues; ++ bool started; ++ bool initialized; ++ ++ /* spdk_ssam_session_fn process finish flag */ ++ bool async_done; ++ ++ bool registered; ++ ++ TAILQ_ENTRY(spdk_ssam_session) tailq; ++}; ++ ++struct ssam_iovs { ++ struct iovec sges[SPDK_SSAM_IOVS_MAX]; ++}; ++ ++struct ssam_iovec { ++ struct ssam_iovs virt; /* virt's iov_base is virtual address */ ++ struct ssam_iovs phys; /* phys's iov_base is physical address */ ++}; ++ ++struct ssam_stat { ++ uint64_t poll_cur_tsc; ++ uint64_t poll_tsc; ++ uint64_t poll_count; ++}; ++ ++struct spdk_ssam_dev { ++ /* ssam device name, format as ssam.tid */ ++ char *name; ++ /* virtio type */ ++ enum virtio_type type; ++ ++ /* ssam device poller thread, same as session poller thread */ ++ struct spdk_thread *thread; ++ struct spdk_poller *requestq_poller; ++ struct spdk_poller *responseq_poller; ++ struct spdk_poller *stop_poller; ++ ++ /* Store sessions of this dev, max number is SSAM_MAX_SESSION_PER_DEV */ ++ struct spdk_ssam_session **smsessions; ++ ++ TAILQ_ENTRY(spdk_ssam_dev) tailq; ++ ++ /* IO num that is on flight */ ++ uint64_t io_num; ++ ++ uint64_t discard_io_num; ++ ++ /* IO stuck ticks in dma process */ ++ uint64_t io_stuck_tsc; ++ struct ssam_stat stat; ++ ++ uint64_t io_wait_cnt; ++ uint64_t io_wait_r_cnt; ++ ++ /* Number of started and actively polled sessions */ ++ uint32_t active_session_num; ++ ++ /* Information of tid, indicate from which ssam queue to receive or send data */ ++ uint16_t tid; ++ TAILQ_HEAD(, spdk_ssam_session_io_wait) io_wait_queue; ++ TAILQ_HEAD(, spdk_ssam_session_io_wait_r) io_wait_queue_r; ++}; ++ ++struct spdk_ssam_dma_cb { ++ uint8_t status; ++ uint8_t req_dir; ++ uint16_t vq_idx; ++ uint16_t task_idx; ++ uint16_t gfunc_id; ++}; ++ ++struct spdk_ssam_send_event_flag { ++ bool need_async; ++ bool need_rsp; ++}; ++ ++/** ++ * Remove a session from sessions array. ++ * ++ * \param smsessions sessions array. ++ * \param smsession the session to be removed. ++ */ ++void ssam_sessions_remove(struct spdk_ssam_session **smsessions, ++ struct spdk_ssam_session *smsession); ++ ++/** ++ * Check out whether sessions is empty or not. ++ * ++ * \param smsessions sessions array. ++ * \return true indicate sessions is empty or false not empty. ++ */ ++bool ssam_sessions_empty(struct spdk_ssam_session **smsessions); ++ ++/** ++ * Get next session in sessions array, begin with current session. ++ * ++ * \param smsessions sessions array. ++ * \param smsession the begin session. ++ * \return the next session found or null not found. ++ */ ++struct spdk_ssam_session *ssam_sessions_next(struct spdk_ssam_session **smsessions, ++ struct spdk_ssam_session *smsession); ++ ++/** ++ * Insert io wait task to session. ++ * ++ * \param smsession the session that io wait insert to. ++ * \param io_wait the io wait to be insert. ++ */ ++void ssam_session_insert_io_wait(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_session_io_wait *io_wait); ++ ++/** ++ * Insert io wait compilete or dma task to smdev. ++ * ++ * \param smdev the smdev that io wait insert to. ++ * \param io_wait_r the io wait to be insert. ++ */ ++void ssam_session_insert_io_wait_r(struct spdk_ssam_dev *smdev, ++ struct spdk_ssam_session_io_wait_r *io_wait_r); ++ ++/** ++ * Remove session from sessions and then stop session dev poller. ++ * ++ * \param smsession the session that to be removed. ++ */ ++void ssam_session_destroy(struct spdk_ssam_session *smsession); ++ ++/** ++ * Show a ssam device info in json format. ++ * ++ * \param smdev ssam device. ++ * \param gfunc_id ssam global vf id. ++ * \param arg user-provided parameter. ++ */ ++void ssam_dump_info_json(struct spdk_ssam_dev *smdev, uint16_t gfunc_id, ++ struct spdk_json_write_ctx *w); ++ ++/** ++ * Get a ssam device name. ++ * ++ * \param smdev ssam device. ++ * \return ssam device name or NULL ++ */ ++const char *ssam_dev_get_name(const struct spdk_ssam_dev *smdev); ++ ++/** ++ * Get a ssam session name. ++ * ++ * \param smdev smsession session. ++ * \return ssam session name or NULL ++ */ ++const char *ssam_session_get_name(const struct spdk_ssam_session *smsession); ++ ++/** ++ * Call a function of the provided ssam session. ++ * The function will be called on this session's thread. ++ * ++ * \param smsession ssam session. ++ * \param fn function to call on each session's thread ++ * \param cpl_fn function to be called at the end of the ssam management thread. ++ * Optional, can be NULL. ++ * \param send_event_flag whether an asynchronous operation or response is required ++ * \param ctx additional argument to the both callbacks ++ * \return error code ++ */ ++int ssam_send_event_to_session(struct spdk_ssam_session *smsession, spdk_ssam_session_fn fn, ++ spdk_ssam_session_cpl_fn cpl_fn, struct spdk_ssam_send_event_flag send_event_flag, void *ctx); ++ ++/** ++ * Finish a blocking ssam_send_event_to_session() call and finally ++ * start the session. This must be called on the target lcore, which ++ * will now receive all session-related messages (e.g. from ++ * ssam_send_event_to_session()). ++ * ++ * Must be called under the global ssam lock. ++ * ++ * \param smsession ssam session ++ * \param response return code ++ */ ++void ssam_session_start_done(struct spdk_ssam_session *smsession, int response); ++ ++/** ++ * Finish a blocking ssam_send_event_to_session() call and finally ++ * stop the session. This must be called on the session's lcore which ++ * used to receive all session-related messages (e.g. from ++ * ssam_send_event_to_session()). After this call, the session- ++ * related messages will be once again processed by any arbitrary thread. ++ * ++ * Must be called under the global ssam lock. ++ * ++ * \param smsession ssam session ++ * \param rsp return code ++ * \param ctx user context ++ */ ++void ssam_session_stop_done(struct spdk_ssam_session *smsession, int rsp, void **ctx); ++ ++/** ++ * Set session be freed, so that not access session any more. ++ * ++ * \param ctx user context ++ */ ++void ssam_set_session_be_freed(void **ctx); ++ ++/** ++ * Find a ssam device in the global g_ssam_devices list by gfunc_id, ++ * if find the ssam device, register a session to the existent ssam device ++ * sessions list, if not find, first create a ssam device to the global ++ * g_ssam_devices list, and then register a session to the new ssam device ++ * sessions list. ++ * ++ * Must be called under the global ssam lock. ++ * ++ * \param info ssam session register info. ++ * \param smsession ssam session created. ++ * \return 0 for success or negative for failed. ++ */ ++int ssam_session_register(struct spdk_ssam_session_reg_info *info, ++ struct spdk_ssam_session **smsession); ++ ++/** ++ * unregister smsession response call back function. ++ * ++ * \param smsession ssam session ++\ */ ++void ssam_session_unreg_response_cb(struct spdk_ssam_session *smsession); ++ ++void ssam_dev_unregister(struct spdk_ssam_dev **dev); ++ ++void ssam_send_event_async_done(void **ctx); ++ ++void ssam_send_dev_destroy_msg(struct spdk_ssam_session *smsession, void *args); ++ ++/** ++ * Get ssam config. ++ * ++ * \param smsession ssam session ++ * \param config a memory region to store config. ++ * \param len the input config param memory region length. ++ * \return 0 success or -1 failed. ++ */ ++int ssam_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues); ++ ++/** ++ * Mount gfunc_id volume to the ssam normal queue. ++ * ++ * \param smsession ssam session ++ * \param lun_id lun id of gfunc_id. ++ * ++ * \return 0 success or not 0 failed. ++ */ ++int ssam_mount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id); ++ ++/** ++ * Unmount function. ++ * ++ * \param smsession ssam session ++ * \param lun_id lun id of gfunc_id. ++ * ++ * \return 0 success or not 0 failed. ++ */ ++int ssam_umount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id); ++ ++/** ++ * Mount gfunc_id volume to the ssam normal queue again. ++ * ++ * \param smsession ssam session ++ * \param lun_id lun id of gfunc_id. ++ * ++ * \return 0 success or not 0 failed. ++ */ ++int ssam_remount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id); ++ ++/** ++ * Register worker poller to dev. ++ * ++ * \param smdev the dev that to be registered worker poller. ++ * \return 0 success or not 0 failed. ++ */ ++int ssam_dev_register_worker_poller(struct spdk_ssam_dev *smdev); ++ ++/** ++ * Unregister worker poller for dev. ++ * ++ * \param smdev the dev that to be unregistered woker poller. ++ */ ++void ssam_dev_unregister_worker_poller(struct spdk_ssam_dev *smdev); ++ ++/** ++ * Get the differential value of the current tsc. ++ * ++ * \param tsc the current tsc. ++ * \return the differential value. ++ */ ++uint64_t ssam_get_diff_tsc(uint64_t tsc); ++ ++/** ++ * Get the bdev name of the specific gfunc_id. ++ * ++ * \param gfunc_id ssam global vf id. ++ * ++ * \return the bdev name of gfunc_id ++ */ ++const char *ssam_get_bdev_name_by_gfunc_id(uint16_t gfunc_id); ++ ++/** ++ * Remove a ssam session. Remove a session associate to the unique gfunc_id, ++ * then remove the ssam device if the device not have a session any more. ++ * ++ * Notice that this interface cannot be reentrant, so must call ssam_lock first. ++ * ++ * \param smsession ssam session ++ * ++ * \return 0 on success, negative errno on error. ++ */ ++int ssam_session_unregister(struct spdk_ssam_session *smsession, bool blk_force_delete); ++ ++/** ++ * Get ssam iostat. ++ * ++ * \param smsession ssam session ++ * \param stat a memory region to store iostat. ++ */ ++void spdk_ssam_get_iostat(struct spdk_ssam_session *smsession, ++ struct spdk_bdev_io_stat *stat); ++ ++/** ++ * Decrease dev io num. ++ * ++ * \param smdev ssam device. ++ */ ++void ssam_dev_io_dec(struct spdk_ssam_dev *smdev); ++ ++/** ++ * Get ssam session bdev. ++ * ++ * \param smsession ssam session ++ * ++ * \return the session bdev. ++ */ ++struct spdk_bdev *ssam_get_session_bdev(struct spdk_ssam_session *smsession); ++ ++/** ++ * free memory with rte. ++ * ++ * \param smsession ssam session ++ * ++ * \return 0 on success. ++ */ ++int ssam_free_ex(void *addr); ++ ++/** ++ * Get elem info from memory addr. ++ * ++ * \param memory addr ++ * ++ */ ++int ssam_malloc_elem_from_addr(const void *data, unsigned long long *pg_size, int *socket_id); ++ ++#endif /* SSAM_INTERNAL_H */ +diff --git a/lib/ssam/ssam_malloc.c b/lib/ssam/ssam_malloc.c +new file mode 100644 +index 0000000..2ac8160 +--- /dev/null ++++ b/lib/ssam/ssam_malloc.c +@@ -0,0 +1,31 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include ++#include "spdk/env.h" ++ ++#include "ssam_internal.h" ++ ++int ++ssam_free_ex(void *addr) ++{ ++ spdk_free(addr); ++ return 0; ++} ++ ++int ++ssam_malloc_elem_from_addr(const void *data, unsigned long long *pg_size, int *socket_id) ++{ ++ struct rte_memseg_list *msl = NULL; ++ ++ msl = rte_mem_virt2memseg_list(data); ++ if (msl == NULL) { ++ return -1; ++ } ++ ++ *socket_id = msl->socket_id; ++ *pg_size = msl->page_sz; ++ return 0; ++} +diff --git a/lib/ssam/ssam_rpc.c b/lib/ssam/ssam_rpc.c +new file mode 100644 +index 0000000..67cf6d3 +--- /dev/null ++++ b/lib/ssam/ssam_rpc.c +@@ -0,0 +1,1965 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include ++#include "spdk/string.h" ++#include "spdk/env.h" ++#include "spdk/bdev_module.h" ++#include "spdk/ssam.h" ++#include "spdk/bdev.h" ++ ++#include "ssam_internal.h" ++#include "ssam_config.h" ++#include "rte_malloc.h" ++ ++static int ssam_rpc_get_gfunc_id_by_dbdf(char *dbdf, uint16_t *gfunc_id); ++ ++int g_delete_flag = 0; ++int session_delete_times = 0; ++uint8_t g_hpd_delete_session_times[2000] = {0}; ++ ++struct rpc_ssam_blk_ctrlr { ++ char *dev_name; ++ char *index; ++ bool readonly; ++ char *serial; ++ uint16_t vqueue; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_construct_ssam_blk_ctrlr[] = { ++ {"dev_name", offsetof(struct rpc_ssam_blk_ctrlr, dev_name), spdk_json_decode_string}, ++ {"index", offsetof(struct rpc_ssam_blk_ctrlr, index), spdk_json_decode_string}, ++ {"readonly", offsetof(struct rpc_ssam_blk_ctrlr, readonly), spdk_json_decode_bool, true}, ++ {"serial", offsetof(struct rpc_ssam_blk_ctrlr, serial), spdk_json_decode_string, true}, ++ {"vqueue", offsetof(struct rpc_ssam_blk_ctrlr, vqueue), spdk_json_decode_uint16, true}, ++}; ++ ++static void ++free_rpc_ssam_blk_ctrlr(struct rpc_ssam_blk_ctrlr *req) ++{ ++ if (req->dev_name != NULL) { ++ free(req->dev_name); ++ req->dev_name = NULL; ++ } ++ ++ if (req->index != NULL) { ++ free(req->index); ++ req->index = NULL; ++ } ++ ++ if (req->serial != NULL) { ++ free(req->serial); ++ req->serial = NULL; ++ } ++} ++ ++static int ++ssam_rpc_para_check(uint16_t gfunc_id) ++{ ++ int rc; ++ ++ rc = ssam_check_gfunc_id(gfunc_id); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_rpc_para_check_type(uint16_t gfunc_id, enum ssam_device_type target_type) ++{ ++ int rc; ++ enum ssam_device_type type; ++ ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ type = ssam_get_virtio_type(gfunc_id); ++ if (type == target_type) { ++ return 0; ++ } ++ SPDK_ERRLOG("Invalid virtio type, need type %d, actually %d\n", target_type, type); ++ ++ return -EINVAL; ++} ++ ++static void ++rpc_ssam_send_response_cb(void *arg, int rsp) ++{ ++ struct spdk_jsonrpc_request *request = arg; ++ ++ if (rsp != 0) { ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rsp)); ++ } else { ++ spdk_jsonrpc_send_bool_response(request, true); ++ } ++ return; ++} ++ ++struct ssam_log_command_info { ++ char *user_name; ++ char *event; ++ char *src_addr; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_construct_log_command_info[] = { ++ {"user_name", offsetof(struct ssam_log_command_info, user_name), spdk_json_decode_string}, ++ {"event", offsetof(struct ssam_log_command_info, event), spdk_json_decode_string}, ++ {"src_addr", offsetof(struct ssam_log_command_info, src_addr), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_ssam_log_command_info(struct ssam_log_command_info *req) ++{ ++ if (req->user_name != NULL) { ++ free(req->user_name); ++ req->user_name = NULL; ++ } ++ if (req->event != NULL) { ++ free(req->event); ++ req->event = NULL; ++ } ++ if (req->src_addr != NULL) { ++ free(req->src_addr); ++ req->src_addr = NULL; ++ } ++} ++ ++static void ++rpc_ssam_log_command_info(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct ssam_log_command_info req = {0}; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("log info params error, skip\n"); ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_construct_log_command_info, ++ SPDK_COUNTOF(g_rpc_construct_log_command_info), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("decode cmd info failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ SPDK_NOTICELOG("log event: from %s user %s event %s\n", req.src_addr, req.user_name, req.event); ++ ++invalid: ++ free_rpc_ssam_log_command_info(&req); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++} ++SPDK_RPC_REGISTER("log_command_info", rpc_ssam_log_command_info, ++ SPDK_RPC_RUNTIME) ++ ++static int ++rpc_ssam_session_reg_response_cb(struct spdk_ssam_session *smsession, ++ struct spdk_jsonrpc_request *request) ++{ ++ if (smsession->rsp_fn != NULL) { ++ return -1; ++ } ++ smsession->rsp_fn = rpc_ssam_send_response_cb; ++ smsession->rsp_ctx = request; ++ return 0; ++} ++ ++static void ++rpc_init_session_reg_info(struct spdk_ssam_session_reg_info *info, ++ uint16_t queues, uint16_t gfunc_id, struct spdk_jsonrpc_request *request) ++{ ++ info->queues = queues; ++ info->gfunc_id = gfunc_id; ++ info->rsp_ctx = (void *)request; ++ info->rsp_fn = rpc_ssam_send_response_cb; ++} ++ ++static void ++free_rpc_ssam_session_reg_info(struct spdk_ssam_session_reg_info *info) ++{ ++ if (info->name != NULL) { ++ free(info->name); ++ info->name = NULL; ++ } ++ if (info->dbdf != NULL) { ++ free(info->dbdf); ++ info->dbdf = NULL; ++ } ++} ++ ++static uint16_t ++rpc_ssam_get_gfunc_id_by_index(char *index) ++{ ++ uint16_t gfunc_id, i; ++ int rc; ++ if (strlen(index) <= 0x5) { ++ for (i = 0; i < strlen(index); i++) { ++ if (!isdigit(index[i])) { ++ return SPDK_INVALID_GFUNC_ID; ++ } ++ } ++ gfunc_id = spdk_strtol(index, 10) > SPDK_INVALID_GFUNC_ID ? SPDK_INVALID_GFUNC_ID : spdk_strtol( ++ index, 10); ++ } else { ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(index, &gfunc_id); ++ if (rc != 0) { ++ return SPDK_INVALID_GFUNC_ID; ++ } ++ } ++ return gfunc_id; ++} ++ ++static void ++ssam_set_virtio_blk_config(struct ssam_virtio_config *cfg, uint16_t queues) ++{ ++ struct virtio_blk_config *dev_cfg = (struct virtio_blk_config *)cfg->device_config; ++ ++ cfg->device_feature = SPDK_SSAM_VIRTIO_BLK_DEFAULT_FEATURE; ++ cfg->queue_num = queues; ++ cfg->config_len = sizeof(struct virtio_blk_config); ++ ++ memset(dev_cfg, 0, cfg->config_len); ++ dev_cfg->blk_size = 0x200; ++ dev_cfg->min_io_size = 0; ++ dev_cfg->capacity = 0; ++ dev_cfg->num_queues = cfg->queue_num; ++ dev_cfg->seg_max = 0x7d; ++ dev_cfg->size_max = 0x200000; ++ cfg->queue_size = VIRITO_DEFAULT_QUEUE_SIZE; ++ ++ return; ++} ++ ++static int ++ssam_get_vqueue(struct rpc_ssam_blk_ctrlr *req, uint16_t gfunc_id, uint16_t *queues) ++{ ++ if (gfunc_id <= SSAM_PF_MAX_NUM && (ssam_get_en_hpd() == false)) { ++ if (req->vqueue != SPDK_INVALID_VQUEUE_NUM) { ++ SPDK_ERRLOG("The PF does not allow dynamic modification of the vqueue(%d).\n", req->vqueue); ++ return -1; ++ } ++ *queues = ssam_get_queues(); ++ return 0; ++ } ++ ++ if (req->vqueue == SPDK_INVALID_VQUEUE_NUM) { ++ if (gfunc_id <= SSAM_PF_MAX_NUM) { ++ *queues = ssam_get_queues(); ++ } else { ++ *queues = SPDK_SSAM_VF_DEFAULTE_VQUEUES; ++ } ++ return 0; ++ } ++ ++ if (req->vqueue > SPDK_SSAM_BLK_MAX_VQ_SIZE || req->vqueue == 0) { ++ SPDK_ERRLOG("The queue number is out of range. Currently (%u) .\n", req->vqueue); ++ return -1; ++ } ++ ++ *queues = req->vqueue; ++ return 0; ++} ++ ++static int ++ssam_blk_controller_set_vqueue(uint16_t gfunc_id, uint16_t queues) ++{ ++ int rc; ++ struct ssam_function_config cfg = { 0 }; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ ssam_unlock(); ++ if (smsession != NULL) { ++ SPDK_ERRLOG("Session with function id %d already exists.\n", gfunc_id); ++ return -EEXIST; ++ } ++ ++ if (gfunc_id <= SSAM_PF_MAX_NUM && (ssam_get_en_hpd() == false)) { ++ if (ssam_get_hash_mode() == SSAM_VQ_HASH_MODE) { ++ rc = ssam_virtio_vq_bind_core(gfunc_id, queues); ++ if (rc != 0) { ++ SPDK_ERRLOG("virtio blk vq(%u) bind core failed.\n", queues); ++ return rc; ++ } ++ } ++ return 0; ++ } ++ ++ cfg.gfunc_id = gfunc_id; ++ cfg.type = SSAM_DEVICE_VIRTIO_BLK; ++ ssam_set_virtio_blk_config(&cfg.virtio_config, queues); ++ ++ if (spdk_ssam_is_starting() == false) { ++ rc = ssam_write_function_config(&cfg); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam write function(%d) config failed:%s\n", cfg.gfunc_id, spdk_strerror(-rc)); ++ return rc; ++ } ++ } else { ++ rc = ssam_virtio_blk_alloc_resource(gfunc_id, queues); ++ if (rc != 0) { ++ SPDK_ERRLOG("virtio blk alloc vq(%u) failed.\n", queues); ++ return rc; ++ } ++ } ++ ++ if (ssam_get_hash_mode() == SSAM_VQ_HASH_MODE) { ++ rc = ssam_virtio_vq_bind_core(gfunc_id, queues); ++ if (rc != 0) { ++ SPDK_ERRLOG("virtio blk vq(%u) bind core failed.\n", queues); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++rpc_ssam_create_blk_controller(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct spdk_ssam_session_reg_info info = {0}; ++ struct rpc_ssam_blk_ctrlr req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ uint16_t queues = 0; ++ struct spdk_bdev *bdev; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_create_blk_controller params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ req.vqueue = SPDK_INVALID_VQUEUE_NUM; ++ rc = spdk_json_decode_object(params, g_rpc_construct_ssam_blk_ctrlr, ++ SPDK_COUNTOF(g_rpc_construct_ssam_blk_ctrlr), &req); ++ if (rc != 0) { ++ SPDK_DEBUGLOG(ssam_rpc, "spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = rpc_ssam_get_gfunc_id_by_index(req.index); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_BLK); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ if (req.dev_name == NULL) { ++ rc = -ENODEV; ++ goto invalid; ++ } ++ ++ if (ssam_get_en_hpd() == true) { ++ bdev = spdk_bdev_get_by_name(req.dev_name); ++ if (bdev == NULL) { ++ SPDK_NOTICELOG("Currently unable to find bdev with name: %s\n", req.dev_name); ++ rc = -ENODEV; ++ goto invalid; ++ } ++ } ++ ++ rc = ssam_get_vqueue(&req, gfunc_id, &queues); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = ssam_blk_controller_set_vqueue(gfunc_id, queues); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rpc_init_session_reg_info(&info, queues, gfunc_id, request); ++ ++ rc = ssam_blk_construct(&info, req.dev_name, req.readonly, req.serial); ++ if (rc < 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_blk_ctrlr(&req); ++ free_rpc_ssam_session_reg_info(&info); ++ return; ++ ++invalid: ++ free_rpc_ssam_blk_ctrlr(&req); ++ free_rpc_ssam_session_reg_info(&info); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("create_blk_controller", rpc_ssam_create_blk_controller, ++ SPDK_RPC_RUNTIME) ++ ++struct rpc_delete_ssam_ctrlr { ++ char *index; ++ bool force; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_delete_ssam_ctrlr_decoder[] = { ++ {"index", offsetof(struct rpc_delete_ssam_ctrlr, index), spdk_json_decode_string}, ++ {"force", offsetof(struct rpc_delete_ssam_ctrlr, force), spdk_json_decode_bool}, ++}; ++ ++static void ++free_rpc_delete_ssam_ctrlr(struct rpc_delete_ssam_ctrlr *req) ++{ ++ if (req->index != NULL) { ++ free(req->index); ++ req->index = NULL; ++ } ++} ++ ++static void ++rpc_ssam_delete_controller(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_delete_ssam_ctrlr req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession; ++ struct spdk_ssam_dev *smdev = NULL; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_delete_controller params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_delete_ssam_ctrlr_decoder, ++ SPDK_COUNTOF(g_rpc_delete_ssam_ctrlr_decoder), &req); ++ if (rc != 0) { ++ SPDK_DEBUGLOG(ssam_rpc, "spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = rpc_ssam_get_gfunc_id_by_index(req.index); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ session_delete_times = 0; ++ g_delete_flag = 0; ++ g_hpd_delete_session_times[gfunc_id] = 0; ++ ++ smdev = ssam_dev_next(NULL); ++ if ((smdev->type == VIRTIO_TYPE_BLK) && (req.force != true)) { ++ while (smdev != NULL) { ++ smsession = smdev->smsessions[gfunc_id]; ++ if ((smsession != NULL) && (smsession->task_cnt != 0)) { ++ SPDK_ERRLOG("The controller is busy.\n"); ++ rc = -EBUSY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ } ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = smdev->smsessions[gfunc_id]; ++ if (smsession == NULL && session_delete_times == 0) { ++ SPDK_ERRLOG("Couldn't find session with function id %d.\n", gfunc_id); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ if (smsession == NULL && session_delete_times == 1) { ++ break; ++ } ++ ++ if (smsession == NULL) { ++ continue; ++ } ++ ++ rc = rpc_ssam_session_reg_response_cb(smsession, request); ++ if (rc != 0) { ++ SPDK_ERRLOG("The controller is being operated.\n"); ++ rc = -EALREADY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = ssam_session_unregister(smsession, req.force); ++ if (rc != 0) { ++ /* ++ * Unregitster response cb to avoid use request in the cb function, ++ * because if error happend, request will be responsed immediately ++ */ ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ session_delete_times++; ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++ ++ free_rpc_delete_ssam_ctrlr(&req); ++ return; ++ ++invalid: ++ free_rpc_delete_ssam_ctrlr(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("delete_controller", rpc_ssam_delete_controller, SPDK_RPC_RUNTIME) ++ ++struct rpc_delete_ssam_scsi_ctrlr { ++ char *name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_delete_ssam_scsi_ctrlr_decoder[] = { ++ {"name", offsetof(struct rpc_delete_ssam_scsi_ctrlr, name), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_delete_ssam_scsi_ctrlrs(struct rpc_delete_ssam_scsi_ctrlr *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_delete_scsi_controller(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_delete_ssam_scsi_ctrlr req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_delete_controller params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_delete_ssam_scsi_ctrlr_decoder, ++ SPDK_COUNTOF(g_rpc_delete_ssam_scsi_ctrlr_decoder), &req); ++ if (rc != 0) { ++ SPDK_DEBUGLOG(ssam_rpc, "spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Couldn't find session with function id %d.\n", gfunc_id); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_session_reg_response_cb(smsession, request); ++ if (rc != 0) { ++ SPDK_ERRLOG("The controller is being operated.\n"); ++ rc = -EALREADY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ if (smsession->task_cnt > 0) { ++ SPDK_ERRLOG("%s is processing I/O(%d) and cannot be deleted.\n", ++ smsession->name, smsession->task_cnt); ++ rc = -EBUSY; ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = ssam_session_unregister(smsession, false); ++ if (rc != 0) { ++ /* ++ * Unregitster response cb to avoid use request in the cb function, ++ * because if error happend, request will be responsed immediately ++ */ ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ ssam_unlock(); ++ ++ free_rpc_delete_ssam_scsi_ctrlrs(&req); ++ return; ++ ++invalid: ++ free_rpc_delete_ssam_scsi_ctrlrs(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("delete_scsi_controller", rpc_ssam_delete_scsi_controller, SPDK_RPC_RUNTIME) ++ ++struct rpc_get_ssam_ctrlrs { ++ uint32_t function_id; ++ char *dbdf; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_get_ssam_ctrlrs_decoder[] = { ++ {"function_id", offsetof(struct rpc_get_ssam_ctrlrs, function_id), spdk_json_decode_uint32, true}, ++ {"dbdf", offsetof(struct rpc_get_ssam_ctrlrs, dbdf), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_get_ssam_ctrlrs(struct rpc_get_ssam_ctrlrs *req) ++{ ++ if (req->dbdf != NULL) { ++ free(req->dbdf); ++ req->dbdf = NULL; ++ } ++} ++ ++static void ++_rpc_get_ssam_controller(struct spdk_json_write_ctx *w, ++ struct spdk_ssam_dev *smdev, uint16_t gfunc_id) ++{ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_string(w, "ctrlr", ssam_dev_get_name(smdev)); ++ spdk_json_write_named_string_fmt(w, "cpumask", "0x%s", ++ spdk_cpuset_fmt(spdk_thread_get_cpumask(smdev->thread))); ++ spdk_json_write_named_uint32(w, "session_num", (uint32_t)smdev->active_session_num); ++ ++ spdk_json_write_named_object_begin(w, "backend_specific"); ++ ssam_dump_info_json(smdev, gfunc_id, w); ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static int ++rpc_ssam_show_controllers(struct spdk_jsonrpc_request *request, uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ if (gfunc_id != SPDK_INVALID_GFUNC_ID) { ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } ++ } ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_array_begin(w); ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ if (gfunc_id != SPDK_INVALID_GFUNC_ID && smdev->smsessions[gfunc_id] == NULL) { ++ smdev = ssam_dev_next(smdev); ++ continue; ++ } ++ _rpc_get_ssam_controller(w, smdev, gfunc_id); ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ ++ return 0; ++} ++ ++static int ++rpc_ssam_show_scsi_controllers(struct spdk_jsonrpc_request *request, uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ if (gfunc_id != SPDK_INVALID_GFUNC_ID) { ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } else if (smsession->backend->type != VIRTIO_TYPE_SCSI) { ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ smdev = smsession->smdev; ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_array_begin(w); ++ ++ smsession = smdev->smsessions[gfunc_id]; ++ smsession->backend->dump_info_json(smsession, w); ++ ssam_unlock(); ++ ++ spdk_json_write_array_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_array_begin(w); ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ if (smsession->backend->type == VIRTIO_TYPE_SCSI) { ++ smsession->backend->dump_info_json(smsession, w); ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ ++ return 0; ++} ++ ++static void ++rpc_ssam_get_controllers(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_get_ssam_ctrlrs req = { ++ .function_id = SPDK_INVALID_GFUNC_ID, ++ .dbdf = NULL, ++ }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params != NULL) { ++ rc = spdk_json_decode_object(params, g_rpc_get_ssam_ctrlrs_decoder, ++ SPDK_COUNTOF(g_rpc_get_ssam_ctrlrs_decoder), &req); ++ if (rc != 0) { ++ SPDK_DEBUGLOG(ssam_rpc, "spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ if (req.function_id != SPDK_INVALID_GFUNC_ID && req.dbdf != NULL) { ++ SPDK_ERRLOG("get_controllers can have at most one parameter\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ if (req.function_id != SPDK_INVALID_GFUNC_ID) { ++ gfunc_id = req.function_id; ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ if (req.dbdf != NULL) { ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(req.dbdf, &gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ rc = rpc_ssam_show_controllers(request, gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_get_ssam_ctrlrs(&req); ++ return; ++ ++invalid: ++ free_rpc_get_ssam_ctrlrs(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("get_controllers", rpc_ssam_get_controllers, SPDK_RPC_RUNTIME) ++ ++struct rpc_get_ssam_scsi_ctrlrs { ++ char *name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_get_ssam_scsi_ctrlrs_decoder[] = { ++ {"name", offsetof(struct rpc_get_ssam_scsi_ctrlrs, name), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_ssam_ctrlrs(struct rpc_get_ssam_scsi_ctrlrs *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_get_scsi_controllers(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_get_ssam_scsi_ctrlrs req = { ++ .name = NULL, ++ }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params != NULL) { ++ rc = spdk_json_decode_object(params, g_rpc_get_ssam_scsi_ctrlrs_decoder, ++ SPDK_COUNTOF(g_rpc_get_ssam_scsi_ctrlrs_decoder), &req); ++ if (rc != 0) { ++ SPDK_DEBUGLOG(ssam_rpc, "spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ if (req.name != NULL) { ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ rc = rpc_ssam_show_scsi_controllers(request, gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_ctrlrs(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_ctrlrs(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("get_scsi_controllers", rpc_ssam_get_scsi_controllers, SPDK_RPC_RUNTIME) ++ ++struct rpc_ssam_controller_get_iostat { ++ uint32_t function_id; ++ char *dbdf; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_ssam_controller_get_iostat_decoder[] = { ++ {"function_id", offsetof(struct rpc_ssam_controller_get_iostat, function_id), spdk_json_decode_uint32, true}, ++ {"dbdf", offsetof(struct rpc_ssam_controller_get_iostat, dbdf), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_ssam_controller_get_iostat(struct rpc_ssam_controller_get_iostat *req) ++{ ++ if (req->dbdf != NULL) { ++ free(req->dbdf); ++ req->dbdf = NULL; ++ } ++} ++ ++struct rpc_ssam_show_iostat_args { ++ uint16_t gfunc_id; ++ uint16_t tid; ++ /* vq_idx for blk; tgt_id for scsi */ ++ uint16_t id; ++ enum spdk_ssam_iostat_mode mode; ++}; ++ ++static int ++rpc_ssam_show_iostat(struct spdk_jsonrpc_request *request, struct rpc_ssam_show_iostat_args *args) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_ssam_show_iostat_args iostat_args = { ++ .id = args->id, ++ .mode = args->mode, ++ }; ++ ++ ssam_lock(); ++ if (args->gfunc_id != SPDK_INVALID_GFUNC_ID) { ++ smsession = ssam_session_find(args->gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_uint64(w, "tick_rate", spdk_get_ticks_hz()); ++ spdk_json_write_named_array_begin(w, "dbdfs"); ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ if (args->gfunc_id == SPDK_INVALID_GFUNC_ID) { ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "name", smdev->name); ++ spdk_json_write_named_uint64(w, "flight_io", smdev->io_num); ++ spdk_json_write_named_uint64(w, "discard_io_num", smdev->discard_io_num); ++ spdk_json_write_named_uint64(w, "wait_io", smdev->io_wait_cnt); ++ spdk_json_write_named_uint64(w, "wait_io_r", smdev->io_wait_r_cnt); ++ spdk_json_write_object_end(w); ++ } ++ if (smdev->active_session_num == 0 || (args->tid != SPDK_INVALID_CORE_ID && ++ smdev->tid != args->tid)) { ++ smdev = ssam_dev_next(smdev); ++ continue; ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ if (args->gfunc_id != SPDK_INVALID_GFUNC_ID && args->gfunc_id != smsession->gfunc_id) { ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ continue; ++ } ++ if (smsession->backend->show_iostat_json != NULL) { ++ smsession->backend->show_iostat_json(smsession, &iostat_args, w); ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_json_write_object_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++} ++ ++static void ++rpc_ssam_controller_get_iostat(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_ssam_controller_get_iostat req = { ++ .function_id = SPDK_INVALID_GFUNC_ID, ++ .dbdf = NULL, ++ }; ++ struct rpc_ssam_show_iostat_args iostat_args = { ++ .gfunc_id = SPDK_INVALID_GFUNC_ID, ++ .tid = SPDK_INVALID_CORE_ID, ++ .id = SPDK_INVALID_ID, ++ .mode = SSAM_IOSTAT_NORMAL, ++ }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params != NULL) { ++ rc = spdk_json_decode_object(params, g_rpc_ssam_controller_get_iostat_decoder, ++ SPDK_COUNTOF(g_rpc_ssam_controller_get_iostat_decoder), &req); ++ if (rc != 0) { ++ SPDK_DEBUGLOG(ssam_rpc, "spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ if (req.function_id != SPDK_INVALID_GFUNC_ID && req.dbdf != NULL) { ++ SPDK_ERRLOG("controller_get_iostat can have at most one parameter\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ if (req.function_id != SPDK_INVALID_GFUNC_ID) { ++ gfunc_id = req.function_id; ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ if (req.dbdf != NULL) { ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(req.dbdf, &gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ iostat_args.gfunc_id = gfunc_id; ++ rc = rpc_ssam_show_iostat(request, &iostat_args); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_controller_get_iostat(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_controller_get_iostat(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("controller_get_iostat", rpc_ssam_controller_get_iostat, SPDK_RPC_RUNTIME) ++ ++struct rpc_ssam_blk_device_iostat { ++ char *index; ++ uint16_t tid; ++ uint16_t vq_idx; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_ssam_blk_device_iostat[] = { ++ {"index", offsetof(struct rpc_ssam_blk_device_iostat, index), spdk_json_decode_string}, ++ {"tid", offsetof(struct rpc_ssam_blk_device_iostat, tid), spdk_json_decode_uint16, true}, ++ {"vq_idx", offsetof(struct rpc_ssam_blk_device_iostat, vq_idx), spdk_json_decode_uint16, true}, ++}; ++ ++static void ++free_rpc_ssam_blk_device_iostat(struct rpc_ssam_blk_device_iostat *req) ++{ ++ if (req->index != NULL) { ++ free(req->index); ++ req->index = NULL; ++ } ++} ++ ++static int ++ssam_rpc_set_blk_device_iostat_args(struct rpc_ssam_show_iostat_args *iostat_args, ++ struct rpc_ssam_blk_device_iostat *req, uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ ++ iostat_args->gfunc_id = gfunc_id; ++ iostat_args->tid = req->tid; ++ iostat_args->id = req->vq_idx; ++ if (iostat_args->tid == SPDK_INVALID_CORE_ID && iostat_args->id == SPDK_INVALID_VQUEUE_NUM) { ++ iostat_args->mode = SSAM_IOSTAT_SUM; ++ return 0; ++ } else if (iostat_args->tid == SPDK_INVALID_CORE_ID) { ++ iostat_args->mode = SSAM_IOSTAT_SPARSE; ++ } else if (iostat_args->id == SPDK_INVALID_VQUEUE_NUM) { ++ iostat_args->mode = SSAM_IOSTAT_DUMP_VQ; ++ } else { ++ iostat_args->mode = SSAM_IOSTAT_NORMAL; ++ } ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ if ((iostat_args->tid != SPDK_INVALID_CORE_ID && smdev->tid != iostat_args->tid) || ++ smdev->smsessions[gfunc_id] == NULL) { ++ smdev = ssam_dev_next(smdev); ++ continue; ++ } else if (iostat_args->id != SPDK_INVALID_VQUEUE_NUM && ++ iostat_args->id >= smdev->smsessions[gfunc_id]->max_queues) { ++ SPDK_ERRLOG("vq_index(%u) should less then max_queues(%u)\n", iostat_args->id, ++ smdev->smsessions[gfunc_id]->max_queues); ++ return -ENODEV; ++ } ++ return 0; ++ } ++ SPDK_ERRLOG("cannot find blk device(%u)\n", gfunc_id); ++ return -ENODEV; ++} ++ ++static void ++rpc_ssam_blk_device_iostat(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_ssam_blk_device_iostat req = { ++ .tid = SPDK_INVALID_CORE_ID, ++ .vq_idx = SPDK_INVALID_VQUEUE_NUM, ++ }; ++ struct rpc_ssam_show_iostat_args iostat_args = { 0 }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_scsi_device_iostat params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_ssam_blk_device_iostat, ++ SPDK_COUNTOF(g_rpc_ssam_blk_device_iostat), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = rpc_ssam_get_gfunc_id_by_index(req.index); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_BLK); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_set_blk_device_iostat_args(&iostat_args, &req, gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_show_iostat(request, &iostat_args); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_blk_device_iostat(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_blk_device_iostat(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("blk_device_iostat", rpc_ssam_blk_device_iostat, SPDK_RPC_RUNTIME) ++ ++static void ++rpc_ssam_clear_iostat(void) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ if (smsession->backend->clear_iostat_json != NULL) { ++ smsession->backend->clear_iostat_json(smsession); ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++} ++ ++static void ++rpc_ssam_controller_clear_iostat(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ rpc_ssam_clear_iostat(); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++} ++SPDK_RPC_REGISTER("controller_clear_iostat", rpc_ssam_controller_clear_iostat, SPDK_RPC_RUNTIME) ++ ++struct rpc_bdev_resize { ++ uint32_t function_id; ++ uint64_t new_size_in_mb; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_bdev_resize[] = { ++ {"function_id", offsetof(struct rpc_bdev_resize, function_id), spdk_json_decode_uint32}, ++ {"new_size_in_mb", offsetof(struct rpc_bdev_resize, new_size_in_mb), spdk_json_decode_uint64}, ++}; ++ ++static int ++ssam_bdev_resize(struct spdk_bdev *bdev, uint64_t new_size_in_mb) ++{ ++ char *bdev_name = bdev->name; ++ int rc; ++ uint64_t current_size_in_mb; ++ uint64_t new_size_in_byte; ++ ++ if (bdev->blocklen == 0) { ++ SPDK_ERRLOG("The blocklen of bdev %s is zero\n", bdev_name); ++ return -EINVAL; ++ } ++ ++ if (UINT64_MAX / bdev->blockcnt < bdev->blocklen) { ++ SPDK_ERRLOG("The old size of bdev is too large, blockcnt: %lu, blocklen: %u\n", ++ bdev->blockcnt, bdev->blocklen); ++ return -EINVAL; ++ } ++ ++ if (new_size_in_mb == 0) { ++ goto end; ++ } ++ ++ current_size_in_mb = bdev->blocklen * bdev->blockcnt / SSAM_MB; ++ if (new_size_in_mb < current_size_in_mb) { ++ SPDK_ERRLOG("The new bdev size must not be smaller than current bdev size\n"); ++ return -EINVAL; ++ } ++ ++ if (UINT64_MAX / new_size_in_mb < SSAM_MB) { ++ SPDK_ERRLOG("The new bdev size is too large\n"); ++ return -EINVAL; ++ } ++ ++end: ++ new_size_in_byte = new_size_in_mb * SSAM_MB; ++ ++ rc = spdk_bdev_notify_blockcnt_change(bdev, new_size_in_byte / bdev->blocklen); ++ if (rc != 0) { ++ SPDK_ERRLOG("failed to notify block cnt change\n"); ++ return -EINVAL; ++ } ++ SPDK_NOTICELOG("bdev %s resize %lu(mb) done.\n", bdev->name, new_size_in_mb); ++ ++ return 0; ++} ++ ++static void ++rpc_ssam_bdev_resize(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_bdev_resize req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_bdev_resize params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_bdev_resize, ++ SPDK_COUNTOF(g_rpc_bdev_resize), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = req.function_id; ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_BLK); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Before resize target, there need to create controller.\n"); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ if (smsession->backend->get_bdev != NULL) { ++ bdev = smsession->backend->get_bdev(smsession, 0); ++ } ++ if (bdev == NULL) { ++ SPDK_ERRLOG("The controller hasn't correlated to a bdev.\n"); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ ssam_unlock(); ++ ++ rc = ssam_bdev_resize(bdev, req.new_size_in_mb); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("bdev_resize", rpc_ssam_bdev_resize, SPDK_RPC_RUNTIME) ++ ++struct rpc_scsi_bdev_resize { ++ char *name; ++ uint32_t tgt_id; ++ uint64_t new_size_in_mb; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_scsi_bdev_resize[] = { ++ {"name", offsetof(struct rpc_scsi_bdev_resize, name), spdk_json_decode_string}, ++ {"tgt_id", offsetof(struct rpc_scsi_bdev_resize, tgt_id), spdk_json_decode_uint32}, ++ {"new_size_in_mb", offsetof(struct rpc_scsi_bdev_resize, new_size_in_mb), spdk_json_decode_uint64}, ++}; ++ ++static void ++free_rpc_scsi_bdev_resize(struct rpc_scsi_bdev_resize *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_scsi_bdev_resize(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_scsi_bdev_resize req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_bdev_resize params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_scsi_bdev_resize, ++ SPDK_COUNTOF(g_rpc_scsi_bdev_resize), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Before resize target, there need to create controller.\n"); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ if (smsession->backend->get_bdev != NULL) { ++ bdev = smsession->backend->get_bdev(smsession, req.tgt_id); ++ } ++ if (bdev == NULL) { ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ ssam_unlock(); ++ ++ rc = ssam_bdev_resize(bdev, req.new_size_in_mb); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_scsi_bdev_resize(&req); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ free_rpc_scsi_bdev_resize(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("scsi_bdev_resize", rpc_ssam_scsi_bdev_resize, SPDK_RPC_RUNTIME) ++ ++struct rpc_bdev_aio_resize { ++ char *name; ++ uint64_t new_size_in_mb; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_bdev_aio_resize[] = { ++ {"name", offsetof(struct rpc_bdev_aio_resize, name), spdk_json_decode_string}, ++ {"new_size_in_mb", offsetof(struct rpc_bdev_aio_resize, new_size_in_mb), spdk_json_decode_uint64}, ++}; ++ ++static void ++free_rpc_ssam_bdev_aio_resize(struct rpc_bdev_aio_resize *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_bdev_aio_resize(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_bdev_aio_resize req = {0}; ++ struct spdk_bdev *bdev = NULL; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_bdev_resize params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_bdev_aio_resize, ++ SPDK_COUNTOF(g_rpc_bdev_aio_resize), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ if (req.name) { ++ bdev = spdk_bdev_get_by_name(req.name); ++ if (bdev == NULL) { ++ SPDK_ERRLOG("bdev '%s' does not exist\n", req.name); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ rc = ssam_bdev_resize(bdev, req.new_size_in_mb); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_bdev_aio_resize(&req); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ free_rpc_ssam_bdev_aio_resize(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("bdev_aio_resize", rpc_ssam_bdev_aio_resize, SPDK_RPC_RUNTIME) ++ ++static void ++rpc_os_ready(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) ++{ ++ int rc = 0; ++ int fd; ++ char *enable = "1"; ++ ++ fd = open(SSAM_STORAGE_READY_FILE, O_RDWR); ++ if (fd < 0) { ++ SPDK_ERRLOG("Open storage ready file failed.\n"); ++ rc = EPERM; ++ goto invalid; ++ } ++ ++ rc = write(fd, enable, strlen(enable)); ++ if (rc < 0) { ++ SPDK_ERRLOG("Write storage ready file failed.\n"); ++ close(fd); ++ goto invalid; ++ } ++ ++ close(fd); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("os_ready", rpc_os_ready, SPDK_RPC_RUNTIME) ++ ++static void ++rpc_set_os_status(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) ++{ ++ int rc = 0; ++ int fd; ++ char *disable = "0"; ++ ++ fd = open(SSAM_STORAGE_READY_FILE, O_RDWR); ++ if (fd < 0) { ++ SPDK_ERRLOG("Open storage ready file failed.\n"); ++ rc = -EPERM; ++ goto invalid; ++ } ++ ++ rc = write(fd, disable, strlen(disable)); ++ if (rc < 0) { ++ SPDK_ERRLOG("Write storage ready file failed.\n"); ++ close(fd); ++ goto invalid; ++ } ++ ++ close(fd); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("os_not_ready", rpc_set_os_status, SPDK_RPC_RUNTIME) ++ ++struct rpc_create_scsi_controller { ++ char *dbdf; ++ char *name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_create_scsi_controller[] = { ++ {"dbdf", offsetof(struct rpc_create_scsi_controller, dbdf), spdk_json_decode_string}, ++ {"name", offsetof(struct rpc_create_scsi_controller, name), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_ssam_create_scsi_controller(struct rpc_create_scsi_controller *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++ if (req->dbdf != NULL) { ++ free(req->dbdf); ++ req->dbdf = NULL; ++ } ++} ++ ++static int ++ssam_rpc_get_gfunc_id_by_dbdf(char *dbdf, uint16_t *gfunc_id) ++{ ++ int rc; ++ uint32_t dbdf_num; ++ ++ rc = ssam_dbdf_str2num(dbdf, &dbdf_num); ++ if (rc != 0) { ++ SPDK_ERRLOG("convert dbdf(%s) to num failed, rc: %d.\n", dbdf, rc); ++ return -EINVAL; ++ } ++ ++ rc = ssam_get_funcid_by_dbdf(dbdf_num, gfunc_id); ++ if (rc != 0) { ++ SPDK_ERRLOG("find gfuncid by dbdf(%u) failed, rc: %d.\n", dbdf_num, rc); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_rpc_para_check_name(char *name) ++{ ++ uint16_t gfunc_id = ssam_get_gfunc_id_by_name(name); ++ if (gfunc_id == SPDK_INVALID_GFUNC_ID) { ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++ ++static void ++rpc_ssam_create_scsi_controller(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct spdk_ssam_session_reg_info info = {0}; ++ struct rpc_create_scsi_controller req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ uint16_t queues; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_create_scsi_controller params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_create_scsi_controller, ++ SPDK_COUNTOF(g_rpc_create_scsi_controller), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_para_check_name(req.name); ++ if (rc != 0) { ++ SPDK_ERRLOG("controller name(%s) is existed\n", req.name); ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(req.dbdf, &gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ queues = ssam_get_queues(); ++ if (queues > SPDK_SSAM_MAX_VQUEUES) { ++ SPDK_ERRLOG("Queue number out of range, need less or equal than %u, actually %u.\n", ++ SPDK_SSAM_MAX_VQUEUES, queues); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rpc_init_session_reg_info(&info, queues, gfunc_id, request); ++ ++ info.name = strdup(req.name); ++ if (info.name == NULL) { ++ SPDK_ERRLOG("Failed to create name(%s) for ssam session reg info.\n", req.name); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ info.dbdf = strdup(req.dbdf); ++ if (info.dbdf == NULL) { ++ SPDK_ERRLOG("Failed to create dbdf(%s) for ssam session reg info.\n", req.dbdf); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = ssam_scsi_construct(&info); ++ if (rc < 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_create_scsi_controller(&req); ++ free_rpc_ssam_session_reg_info(&info); ++ return; ++ ++invalid: ++ free_rpc_ssam_create_scsi_controller(&req); ++ free_rpc_ssam_session_reg_info(&info); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++ ++SPDK_RPC_REGISTER("create_scsi_controller", rpc_ssam_create_scsi_controller, SPDK_RPC_RUNTIME) ++ ++struct rpc_scsi_controller_add_target { ++ char *name; ++ int32_t scsi_tgt_num; ++ char *bdev_name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_scsi_controller_add_target[] = { ++ {"name", offsetof(struct rpc_scsi_controller_add_target, name), spdk_json_decode_string}, ++ {"scsi_tgt_num", offsetof(struct rpc_scsi_controller_add_target, scsi_tgt_num), spdk_json_decode_uint32}, ++ {"bdev_name", offsetof(struct rpc_scsi_controller_add_target, bdev_name), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_ssam_scsi_ctrlr_add_target(struct rpc_scsi_controller_add_target *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++ if (req->bdev_name != NULL) { ++ free(req->bdev_name); ++ req->bdev_name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_scsi_controller_add_target(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_scsi_controller_add_target req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_scsi_controller_add_target params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_scsi_controller_add_target, ++ SPDK_COUNTOF(g_rpc_scsi_controller_add_target), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Before adding a SCSI target, there should be a SCSI controller.\n"); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_session_reg_response_cb(smsession, request); ++ if (rc != 0) { ++ SPDK_ERRLOG("The controller is being operated.\n"); ++ rc = -EALREADY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = ssam_scsi_dev_add_tgt(smsession, req.scsi_tgt_num, req.bdev_name); ++ if (rc != 0) { ++ /* ++ * Unregitster response cb to avoid use request in the cb function, ++ * because if error happend, request will be responsed immediately ++ */ ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ ssam_unlock(); ++ ++ free_rpc_ssam_scsi_ctrlr_add_target(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_scsi_ctrlr_add_target(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("scsi_controller_add_target", rpc_ssam_scsi_controller_add_target, ++ SPDK_RPC_RUNTIME) ++ ++struct rpc_scsi_controller_remove_target { ++ char *name; ++ int32_t scsi_tgt_num; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_scsi_controller_remove_target[] = { ++ {"name", offsetof(struct rpc_scsi_controller_remove_target, name), spdk_json_decode_string}, ++ {"scsi_tgt_num", offsetof(struct rpc_scsi_controller_remove_target, scsi_tgt_num), spdk_json_decode_int32}, ++}; ++ ++static void ++free_rpc_scsi_controller_remove_target(struct rpc_scsi_controller_remove_target *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_scsi_controller_remove_target(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_scsi_controller_remove_target req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_scsi_controller_remove_target params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_scsi_controller_remove_target, ++ SPDK_COUNTOF(g_rpc_scsi_controller_remove_target), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_session_reg_response_cb(smsession, request); ++ if (rc != 0) { ++ SPDK_ERRLOG("The controller is being operated.\n"); ++ rc = -EALREADY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = ssam_scsi_dev_remove_tgt(smsession, req.scsi_tgt_num, ++ rpc_ssam_send_response_cb, request); ++ if (rc != 0) { ++ /* ++ * Unregitster response cb to avoid use request in the cb function, ++ * because if error happend, request will be responsed immediately ++ */ ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ ssam_unlock(); ++ free_rpc_scsi_controller_remove_target(&req); ++ return; ++ ++invalid: ++ free_rpc_scsi_controller_remove_target(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("scsi_controller_remove_target", rpc_ssam_scsi_controller_remove_target, ++ SPDK_RPC_RUNTIME) ++ ++struct rpc_ssam_scsi_device_iostat { ++ char *name; ++ int32_t scsi_tgt_num; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_ssam_scsi_device_iostat[] = { ++ {"name", offsetof(struct rpc_ssam_scsi_device_iostat, name), spdk_json_decode_string}, ++ {"scsi_tgt_num", offsetof(struct rpc_ssam_scsi_device_iostat, scsi_tgt_num), spdk_json_decode_int32}, ++}; ++ ++static void ++free_rpc_ssam_scsi_device_iostat(struct rpc_ssam_scsi_device_iostat *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static int ++rpc_ssam_show_scsi_iostat(struct spdk_jsonrpc_request *request, uint16_t gfunc_id, ++ uint16_t scsi_tgt_num) ++{ ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_ssam_show_iostat_args iostat_args = { ++ .id = scsi_tgt_num, ++ .mode = SSAM_IOSTAT_NORMAL, ++ }; ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } else if (smsession->backend->type != VIRTIO_TYPE_SCSI) { ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ ++ if (smsession->backend->show_iostat_json != NULL) { ++ smsession->backend->show_iostat_json(smsession, &iostat_args, w); ++ } ++ ++ ssam_unlock(); ++ ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++} ++ ++static void ++rpc_ssam_scsi_device_iostat(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_ssam_scsi_device_iostat req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_scsi_device_iostat params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_ssam_scsi_device_iostat, ++ SPDK_COUNTOF(g_rpc_ssam_scsi_device_iostat), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ if (req.scsi_tgt_num < 0 || req.scsi_tgt_num > SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("scsi_tgt_num is out of range\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_show_scsi_iostat(request, gfunc_id, req.scsi_tgt_num); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_scsi_device_iostat(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_scsi_device_iostat(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("scsi_device_iostat", rpc_ssam_scsi_device_iostat, SPDK_RPC_RUNTIME) ++ ++struct rpc_limit_log_interval { ++ int interval; ++}; ++ ++static void ++rpc_ssam_device_pcie_list(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct spdk_json_write_ctx *w = NULL; ++ int rc; ++ uint32_t size = ssam_get_device_pcie_list_size(); ++ if (size == 0) { ++ rc = ssam_init_device_pcie_list(); ++ if (rc != 0) { ++ SPDK_ERRLOG("init device_pcie_list failed\n"); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++ } ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_object_begin(w); ++ ++ ssam_dump_device_pcie_list(w); ++ ++ spdk_json_write_object_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return; ++} ++ ++SPDK_RPC_REGISTER("device_pcie_list", rpc_ssam_device_pcie_list, SPDK_RPC_RUNTIME) ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_rpc) +diff --git a/lib/ssam/ssam_scsi.c b/lib/ssam/ssam_scsi.c +new file mode 100644 +index 0000000..9c52bad +--- /dev/null ++++ b/lib/ssam/ssam_scsi.c +@@ -0,0 +1,2421 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "linux/virtio_scsi.h" ++ ++#include "spdk/stdinc.h" ++ ++ ++#include "spdk/likely.h" ++#include "spdk/scsi_spec.h" ++#include "spdk/env.h" ++#include "spdk/scsi.h" ++#include "spdk/ssam.h" ++#include "spdk/string.h" ++#include "spdk/bdev_module.h" ++ ++#include "ssam_internal.h" ++ ++#define SESSION_STOP_POLLER_PERIOD 1000 ++#define IOV_HEADER_TAIL_NUM 2 ++#define PAYLOAD_SIZE_MAX (2048U * 2048) ++#define VMIO_TYPE_VIRTIO_SCSI_CTRL 4 ++#define SSAM_SPDK_SCSI_DEV_MAX_LUN 1 ++#define SSAM_SENSE_DATE_LEN 32 ++#define PERF_STAT ++ ++/* Features supported by virtio-scsi lib. */ ++#define SPDK_SSAM_SCSI_FEATURES (SPDK_SSAM_FEATURES | \ ++ (1ULL << VIRTIO_SCSI_F_INOUT) | \ ++ (1ULL << VIRTIO_SCSI_F_HOTPLUG) | \ ++ (1ULL << VIRTIO_SCSI_F_CHANGE) | \ ++ (1ULL << VIRTIO_SCSI_F_T10_PI)) ++ ++/* Features that are specified in VIRTIO SCSI but currently not supported: ++ * - Live migration not supported yet ++ * - T10 PI ++ */ ++#define SPDK_SSAM_SCSI_DISABLED_FEATURES (SPDK_SSAM_DISABLED_FEATURES | \ ++ (1ULL << VIRTIO_SCSI_F_T10_PI)) ++ ++/* ssam-user-scsi support protocol features */ ++#define SPDK_SSAM_SCSI_PROTOCOL_FEATURES (1ULL << SSAM_USER_PROTOCOL_F_INFLIGHT_SHMFD) ++ ++enum spdk_scsi_dev_ssam_status { ++ /* Target ID is empty. */ ++ SSAM_SCSI_DEV_EMPTY, ++ ++ /* Target is still being added. */ ++ SSAM_SCSI_DEV_ADDING, ++ ++ /* Target ID occupied. */ ++ SSAM_SCSI_DEV_PRESENT, ++ ++ /* Target ID is occupied but removal is in progress. */ ++ SSAM_SCSI_DEV_REMOVING, ++ ++ /* In session - device (SCSI target) seen but removed. */ ++ SSAM_SCSI_DEV_REMOVED, ++}; ++ ++struct ssam_scsi_stat { ++ uint64_t count; ++ uint64_t total_tsc; /* pre_dma <- -> post_return */ ++ uint64_t dma_tsc; /* pre_dma <- -> post_dma */ ++ uint64_t bdev_tsc; /* pre_bdev <- -> post_bdev */ ++ uint64_t bdev_submit_tsc; /* <- spdk_bdev_xxx -> */ ++ uint64_t complete_tsc; /* pre_return <- -> post_return */ ++ uint64_t internel_tsc; /* total_tsc - dma_tsc - bdev_tsc - complete_tsc */ ++ ++ uint64_t complete_read_ios; /* Number of successfully completed read requests */ ++ uint64_t err_read_ios; /* Number of failed completed read requests */ ++ uint64_t complete_write_ios; /* Number of successfully completed write requests */ ++ uint64_t err_write_ios; /* Number of failed completed write requests */ ++ uint64_t flush_ios; /* Total number of flush requests */ ++ uint64_t complete_flush_ios; /* Number of successfully completed flush requests */ ++ uint64_t err_flush_ios; /* Number of failed completed flush requests */ ++ uint64_t fatal_ios; ++ uint64_t io_retry; ++ ++ uint64_t start_count; ++ uint64_t dma_count; ++ uint64_t dma_complete_count; ++ uint64_t bdev_count; ++ uint64_t bdev_complete_count; ++}; ++ ++struct spdk_scsi_dev_io_state { ++ struct spdk_bdev_io_stat stat; ++ uint64_t submit_tsc; ++ struct ssam_scsi_stat scsi_stat; ++}; ++ ++/** Context for a SCSI target in a ssam device */ ++struct spdk_scsi_dev_ssam_state { ++ struct spdk_scsi_dev_io_state *io_stat[SSAM_SPDK_SCSI_DEV_MAX_LUN]; ++ struct spdk_scsi_dev *dev; ++ ++ enum spdk_scsi_dev_ssam_status status; ++ ++ uint64_t flight_io; ++}; ++ ++struct ssam_scsi_tgt_hotplug_ctx { ++ unsigned scsi_tgt_num; ++}; ++ ++struct spdk_ssam_scsi_session { ++ struct spdk_ssam_session smsession; ++ int ref; ++ bool registered; ++ struct spdk_poller *stop_poller; ++ struct spdk_scsi_dev_ssam_state scsi_dev_state[SPDK_SSAM_SCSI_CTRLR_MAX_DEVS]; ++ char *dbdf; ++}; ++ ++struct ssam_scsi_session_ctx { ++ struct spdk_ssam_scsi_session *ssmsession; ++ void **user_ctx; ++}; ++ ++struct ssam_scsi_task_stat { ++ uint64_t start_tsc; ++ uint64_t dma_start_tsc; ++ uint64_t dma_end_tsc; ++ uint64_t bdev_start_tsc; ++ uint64_t bdev_func_tsc; ++ uint64_t bdev_end_tsc; ++ uint64_t complete_start_tsc; ++ uint64_t complete_end_tsc; ++}; ++ ++struct spdk_ssam_scsi_task { ++ struct spdk_scsi_task scsi_task; ++ /* Returned status of I/O processing, it can be VIRTIO_BLK_S_OK, ++ * VIRTIO_BLK_S_IOERR or VIRTIO_BLK_S_UNSUPP ++ */ ++ union { ++ struct virtio_scsi_cmd_resp resp; ++ struct virtio_scsi_ctrl_tmf_resp tmf_resp; ++ }; ++ ++ /* Number of bytes processed successfully */ ++ uint32_t used_len; ++ ++ /* Records the amount of valid data in the struct iovec iovs array. */ ++ uint32_t iovcnt; ++ struct ssam_iovec iovs; ++ ++ /* If set, the task is currently used for I/O processing. */ ++ bool used; ++ ++ /* For bdev io wait */ ++ struct spdk_ssam_scsi_session *ssmsession; ++ struct spdk_ssam_session_io_wait session_io_wait; ++ ++ /* ssam request data */ ++ struct ssam_request *io_req; ++ ++ uint16_t vq_idx; ++ uint16_t task_idx; ++ int32_t tgt_id; ++ struct spdk_ssam_session *smsession; ++ struct spdk_scsi_dev *scsi_dev; ++ struct ssam_scsi_task_stat task_stat; ++}; ++ ++struct ssam_add_tgt_ev_ctx { ++ char *bdev_name; ++ int tgt_num; ++}; ++ ++static void ssam_scsi_request_worker(struct spdk_ssam_session *smsession, void *arg); ++static void ssam_scsi_destroy_bdev_device(struct spdk_ssam_session *smsession, void *args); ++static void ssam_scsi_response_worker(struct spdk_ssam_session *smsession, void *arg); ++static int ssam_scsi_remove_session(struct spdk_ssam_session *smsession); ++static void ssam_scsi_remove_self(struct spdk_ssam_session *smsession); ++static void ssam_scsi_dump_info_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++static void ssam_scsi_write_config_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++static int ssam_scsi_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues); ++static void ssam_scsi_show_iostat_json(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_show_iostat_args *args, ++ struct spdk_json_write_ctx *w); ++static void ssam_scsi_clear_iostat_json(struct spdk_ssam_session *smsession); ++static void ssam_scsi_print_stuck_io_info(struct spdk_ssam_session *smsession); ++static void ssam_scsi_req_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, ++ uint8_t status); ++static struct spdk_bdev *ssam_scsi_get_bdev(struct spdk_ssam_session *smsession, uint32_t id); ++ ++static void ssam_free_scsi_task_pool(struct spdk_ssam_scsi_session *ssmsession); ++static int ssam_scsi_dev_hot_remove_tgt(struct spdk_ssam_session *smsession, void **_ctx); ++static void ssam_scsi_process_io_task(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_scsi_task *task); ++static int ssam_scsi_task_iovs_memory_get(struct spdk_ssam_scsi_task *task, uint32_t payload_size); ++static void ssam_scsi_submit_io_task(struct spdk_ssam_scsi_task *task); ++static void ssam_scsi_destruct_tgt(struct spdk_ssam_scsi_session *ssmsession, int scsi_tgt_num); ++ ++static const struct spdk_ssam_session_backend g_ssam_scsi_session_backend = { ++ .type = VIRTIO_TYPE_SCSI, ++ .request_worker = ssam_scsi_request_worker, ++ .destroy_bdev_device = ssam_scsi_destroy_bdev_device, ++ .response_worker = ssam_scsi_response_worker, ++ .remove_session = ssam_scsi_remove_session, ++ .remove_self = ssam_scsi_remove_self, ++ .print_stuck_io_info = ssam_scsi_print_stuck_io_info, ++ .dump_info_json = ssam_scsi_dump_info_json, ++ .write_config_json = ssam_scsi_write_config_json, ++ .ssam_get_config = ssam_scsi_get_config, ++ .show_iostat_json = ssam_scsi_show_iostat_json, ++ .clear_iostat_json = ssam_scsi_clear_iostat_json, ++ .get_bdev = ssam_scsi_get_bdev, ++}; ++ ++static void ++ssam_scsi_task_stat_tick(uint64_t *tsc) ++{ ++#ifdef PERF_STAT ++ *tsc = spdk_get_ticks(); ++#endif ++ return; ++} ++ ++static void ++ssam_scsi_stat_statistics(struct spdk_ssam_scsi_task *task) ++{ ++#ifdef PERF_STAT ++ if (task->scsi_task.lun == NULL || task->io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL || ++ task->task_stat.bdev_func_tsc == 0 || task->task_stat.bdev_end_tsc == 0) { ++ return; ++ } ++ ++ int32_t lun_id = spdk_scsi_lun_get_id(task->scsi_task.lun); ++ struct ssam_scsi_stat *scsi_stat = ++ &task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[lun_id]->scsi_stat; ++ ++ uint64_t dma_tsc = task->task_stat.dma_end_tsc - task->task_stat.dma_start_tsc; ++ uint64_t bdev_tsc = task->task_stat.bdev_end_tsc - task->task_stat.bdev_start_tsc; ++ uint64_t bdev_submit_tsc = task->task_stat.bdev_func_tsc - task->task_stat.bdev_start_tsc; ++ uint64_t complete_tsc = task->task_stat.complete_end_tsc - task->task_stat.complete_start_tsc; ++ uint64_t total_tsc = task->task_stat.complete_end_tsc - task->task_stat.start_tsc; ++ ++ struct ssam_io_message *io_cmd = &task->io_req->req.cmd; ++ if (io_cmd->writable) { /* read io */ ++ if (task->scsi_task.status == SPDK_SCSI_STATUS_GOOD) { ++ scsi_stat->complete_read_ios++; ++ } else { ++ scsi_stat->err_read_ios++; ++ } ++ } else { ++ if (task->scsi_task.status == SPDK_SCSI_STATUS_GOOD) { ++ scsi_stat->complete_write_ios++; ++ } else { ++ scsi_stat->err_write_ios++; ++ } ++ } ++ ++ scsi_stat->dma_tsc += dma_tsc; ++ scsi_stat->bdev_tsc += bdev_tsc; ++ scsi_stat->bdev_submit_tsc += bdev_submit_tsc; ++ scsi_stat->complete_tsc += complete_tsc; ++ scsi_stat->total_tsc += total_tsc; ++ scsi_stat->internel_tsc += total_tsc - complete_tsc - bdev_tsc - dma_tsc; ++ scsi_stat->count += 1; ++#endif ++} ++ ++static uint32_t ++ssam_scsi_tgtid_to_lunid(uint32_t tgt_id) ++{ ++ return (((tgt_id) << 0x8) | SSAM_VIRTIO_SCSI_LUN_ID); ++} ++ ++static int ++ssam_scsi_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues) ++{ ++ struct virtio_scsi_config scsi_cfg; ++ scsi_cfg.num_queues = 0x80; ++ scsi_cfg.seg_max = 0x6f; ++ scsi_cfg.max_sectors = 0x1ff; ++ scsi_cfg.cmd_per_lun = 0x80; ++ scsi_cfg.event_info_size = 0; ++ scsi_cfg.sense_size = 0x60; ++ scsi_cfg.cdb_size = 0x20; ++ scsi_cfg.max_channel = 0; ++ scsi_cfg.max_target = SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; ++ scsi_cfg.max_lun = 0xff; ++ ++ memcpy(config, (void *)&scsi_cfg, sizeof(struct virtio_scsi_config)); ++ return 0; ++} ++ ++static int ++ssam_scsi_send_event(struct spdk_ssam_session *smsession, unsigned scsi_dev_num, ++ uint32_t event, uint32_t reason) ++{ ++ struct virtio_scsi_event vscsi_event = {0}; ++ int ret; ++ ++ vscsi_event.event = event; ++ vscsi_event.reason = reason; ++ ++ vscsi_event.lun[0] = 1; ++ vscsi_event.lun[0x1] = (uint8_t)scsi_dev_num; ++ vscsi_event.lun[0x2] = 0; ++ vscsi_event.lun[0x3] = 0; ++ memset(&vscsi_event.lun[0x4], 0, 0x4); ++ ++ ret = ssam_send_action(smsession->gfunc_id, SSAM_FUNCTION_ACTION_SCSI_EVENT, ++ (const void *)&vscsi_event, sizeof(struct virtio_scsi_event)); ++ if (ret < 0) { ++ SPDK_ERRLOG("%s: SCSI target %d send event %u(reason %u) failed: %s.\n", ++ smsession->name, scsi_dev_num, event, reason, strerror(-ret)); ++ } ++ return ret; ++} ++ ++static void ++ssam_scsi_stop_cpl_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ spdk_ssam_session_rsp_fn rsp_fn = smsession->rsp_fn; ++ void *rsp_ctx = smsession->rsp_ctx; ++ ++ SPDK_NOTICELOG("SCSI controller %s deleted\n", smsession->name); ++ ++ if (smsession->name != NULL) { ++ free(smsession->name); ++ smsession->name = NULL; ++ } ++ ++ if (ssmsession->dbdf != NULL) { ++ free(ssmsession->dbdf); ++ ssmsession->dbdf = NULL; ++ } ++ ++ ssam_set_session_be_freed(ctx); ++ memset(ssmsession, 0, sizeof(*ssmsession)); ++ free(ssmsession); ++ ++ if (rsp_fn != NULL) { ++ rsp_fn(rsp_ctx, 0); ++ rsp_fn = NULL; ++ } ++} ++ ++static void ++ssam_scsi_destroy_session(struct ssam_scsi_session_ctx *ctx) ++{ ++ struct spdk_ssam_session *smsession = &ctx->ssmsession->smsession; ++ struct spdk_ssam_scsi_session *ssmsession = ctx->ssmsession; ++ ++ if (smsession->task_cnt > 0) { ++ return; ++ } ++ ++ if (ssmsession->ref > 0) { ++ return; ++ } ++ ++ ssam_session_destroy(smsession); ++ ++ ssmsession->registered = false; ++ spdk_poller_unregister(&ssmsession->stop_poller); ++ ssam_free_scsi_task_pool(ssmsession); ++ ssam_session_stop_done(&ssmsession->smsession, 0, ctx->user_ctx); ++ free(ctx); ++ ++ return; ++} ++ ++static int ++ssam_scsi_destroy_session_poller_cb(void *arg) ++{ ++ struct ssam_scsi_session_ctx *ctx = arg; ++ ++ if (ssam_trylock() != 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ ssam_scsi_destroy_session(ctx); ++ ++ ssam_unlock(); ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static int ++ssam_scsi_stop_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct ssam_scsi_session_ctx *_ctx = ++ (struct ssam_scsi_session_ctx *)calloc(1, sizeof(struct ssam_scsi_session_ctx)); ++ ++ if (_ctx == NULL) { ++ SPDK_ERRLOG("%s: calloc scsi session ctx error.\n", smsession->name); ++ return -ENOMEM; ++ } ++ ++ _ctx->ssmsession = ssmsession; ++ _ctx->user_ctx = ctx; ++ ++ ssmsession->stop_poller = SPDK_POLLER_REGISTER(ssam_scsi_destroy_session_poller_cb, ++ _ctx, SESSION_STOP_POLLER_PERIOD); ++ if (ssmsession->stop_poller == NULL) { ++ SPDK_ERRLOG("%s: ssam_destroy_session_poller_cb start failed.\n", smsession->name); ++ ssam_session_stop_done(smsession, -EBUSY, ctx); ++ free(_ctx); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_scsi_stop(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = true, ++ .need_rsp = true, ++ }; ++ return ssam_send_event_to_session(smsession, ssam_scsi_stop_cb, ssam_scsi_stop_cpl_cb, ++ send_event_flag, NULL); ++} ++ ++/* sync interface for hot-remove */ ++static void ++ssam_scsi_remove_self(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ /* no need error */ ++ if (ssmsession->ref > 0) { ++ return; /* still have targets */ ++ } ++ ++ SPDK_NOTICELOG("%s: is being freed\n", smsession->name); ++ ++ ssmsession->registered = false; ++ ssam_free_scsi_task_pool(ssmsession); ++ ++ ssam_sessions_remove(smsession->smdev->smsessions, smsession); ++ ++ if (smsession->smdev->active_session_num > 0) { ++ smsession->smdev->active_session_num--; ++ } ++ smsession->smdev = NULL; ++ /* free smsession */ ++ free(smsession->name); ++ free(ssmsession->dbdf); ++ free(ssmsession); ++} ++ ++/* async interface */ ++static int ++ssam_scsi_remove_session(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ int ret; ++ ++ if (smsession->registered && ssmsession->ref != 0) { ++ SPDK_ERRLOG("%s: SCSI target %d is still present.\n", smsession->name, ssmsession->ref); ++ return -EBUSY; ++ } ++ ++ ret = ssam_scsi_stop(smsession); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static struct spdk_scsi_dev * ++ssam_scsi_dev_get_tgt(struct spdk_ssam_scsi_session *ssmsession, uint8_t num) ++{ ++ if (ssmsession == NULL) { ++ SPDK_ERRLOG("ssmsession is null.\n"); ++ return NULL; ++ } ++ if (num >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: tgt num %u over %u.\n", ssmsession->smsession.name, num, ++ SPDK_SSAM_SCSI_CTRLR_MAX_DEVS); ++ return NULL; ++ } ++ if (ssmsession->scsi_dev_state[num].status != SSAM_SCSI_DEV_PRESENT) { ++ return NULL; ++ } ++ ++ if (ssmsession->scsi_dev_state[num].dev == NULL) { ++ SPDK_ERRLOG("%s: no tgt num %u device.\n", ssmsession->smsession.name, num); ++ return NULL; ++ } ++ return ssmsession->scsi_dev_state[num].dev; ++} ++ ++static void ++ssam_scsi_dump_device_info(struct spdk_ssam_session *smsession, struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev *sdev; ++ struct spdk_scsi_lun *lun; ++ int32_t tgt_id; ++ ++ spdk_json_write_named_array_begin(w, "scsi_targets"); ++ for (tgt_id = 0; tgt_id < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; tgt_id++) { ++ sdev = ssam_scsi_dev_get_tgt(ssmsession, tgt_id); ++ if (!sdev) { ++ continue; ++ } ++ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_uint32(w, "scsi_target_num", tgt_id); ++ spdk_json_write_named_uint32(w, "id", spdk_scsi_dev_get_id(sdev)); ++ spdk_json_write_named_string(w, "target_name", spdk_scsi_dev_get_name(sdev)); ++ lun = spdk_scsi_dev_get_lun(sdev, 0); ++ if (!lun) { ++ continue; ++ } ++ spdk_json_write_named_string(w, "bdev_name", spdk_scsi_lun_get_bdev_name(lun)); ++ ++ spdk_json_write_object_end(w); ++ } ++ ++ spdk_json_write_array_end(w); ++} ++ ++static void ++ssam_scsi_dump_info_json(struct spdk_ssam_session *smsession, struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_string(w, "dbdf", ssmsession->dbdf); ++ spdk_json_write_named_string(w, "name", ssam_session_get_name(smsession)); ++ spdk_json_write_named_uint32(w, "function_id", (uint32_t)smsession->gfunc_id); ++ spdk_json_write_named_uint32(w, "queues", (uint32_t)smsession->max_queues); ++ spdk_json_write_named_string(w, "ctrlr", ssam_dev_get_name(smsession->smdev)); ++ spdk_json_write_named_string_fmt(w, "cpumask", "0x%s", ++ spdk_cpuset_fmt(spdk_thread_get_cpumask(smsession->smdev->thread))); ++ ++ ssam_scsi_dump_device_info(smsession, w); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_scsi_write_config_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev *sdev; ++ struct spdk_scsi_lun *lun; ++ int32_t tgt_id; ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "method", "create_scsi_controller"); ++ ++ spdk_json_write_named_object_begin(w, "params"); ++ spdk_json_write_named_string(w, "dbdf", ssmsession->dbdf); ++ spdk_json_write_named_string(w, "name", smsession->name); ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++ ++ for (tgt_id = 0; tgt_id < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; tgt_id++) { ++ sdev = ssam_scsi_dev_get_tgt(ssmsession, tgt_id); ++ if (!sdev) { ++ continue; ++ } ++ ++ lun = spdk_scsi_dev_get_lun(sdev, 0); ++ if (!lun) { ++ SPDK_ERRLOG("%s: no lun, continue.\n", smsession->name); ++ continue; ++ } ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "method", "scsi_controller_add_target"); ++ ++ spdk_json_write_named_object_begin(w, "params"); ++ spdk_json_write_named_string(w, "name", smsession->name); ++ spdk_json_write_named_uint32(w, "scsi_tgt_num", tgt_id); ++ ++ spdk_json_write_named_string(w, "bdev_name", spdk_scsi_lun_get_bdev_name(lun)); ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++ } ++} ++ ++static void ++ssam_scsi_show_tgt_iostat_json(struct spdk_ssam_scsi_session *ssmsession, ++ struct spdk_json_write_ctx *w, int32_t tgt_id, struct spdk_scsi_dev *sdev) ++{ ++ struct spdk_scsi_dev_io_state *io_stat; ++ struct spdk_scsi_lun *lun; ++ struct ssam_scsi_stat scsi_stat; ++ uint64_t ticks_hz = spdk_get_ticks_hz(); ++ uint64_t count; ++ uint64_t poll_count; ++ ++ lun = spdk_scsi_dev_get_lun(sdev, 0); ++ if (lun == NULL) { ++ return; ++ } ++ ++ io_stat = ssmsession->scsi_dev_state[tgt_id].io_stat[0]; ++ if (io_stat == NULL) { ++ SPDK_ERRLOG("No scsi iostat, tgt_id %d\n", tgt_id); ++ return; ++ } ++ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_uint32(w, "scsi_dev_num", tgt_id); ++ spdk_json_write_named_uint32(w, "id", spdk_scsi_dev_get_id(sdev)); ++ spdk_json_write_named_string(w, "target_name", spdk_scsi_dev_get_name(sdev)); ++ ++ memcpy(&scsi_stat, &io_stat->scsi_stat, sizeof(struct ssam_scsi_stat)); ++ ++ spdk_json_write_named_int32(w, "id", spdk_scsi_lun_get_id(lun)); ++ spdk_json_write_named_string(w, "bdev_name", spdk_scsi_lun_get_bdev_name(lun)); ++ spdk_json_write_named_uint64(w, "bytes_read", io_stat->stat.bytes_read); ++ spdk_json_write_named_uint64(w, "num_read_ops", io_stat->stat.num_read_ops); ++ spdk_json_write_named_uint64(w, "bytes_written", io_stat->stat.bytes_written); ++ spdk_json_write_named_uint64(w, "num_write_ops", io_stat->stat.num_write_ops); ++ spdk_json_write_named_uint64(w, "read_latency_ticks", io_stat->stat.read_latency_ticks); ++ spdk_json_write_named_uint64(w, "write_latency_ticks", io_stat->stat.write_latency_ticks); ++ ++ spdk_json_write_named_uint64(w, "complete_read_ios", scsi_stat.complete_read_ios); ++ spdk_json_write_named_uint64(w, "err_read_ios", scsi_stat.err_read_ios); ++ spdk_json_write_named_uint64(w, "complete_write_ios", scsi_stat.complete_write_ios); ++ spdk_json_write_named_uint64(w, "err_write_ios", scsi_stat.err_write_ios); ++ spdk_json_write_named_uint64(w, "flush_ios", scsi_stat.flush_ios); ++ spdk_json_write_named_uint64(w, "complete_flush_ios", scsi_stat.complete_flush_ios); ++ spdk_json_write_named_uint64(w, "err_flush_ios", scsi_stat.err_flush_ios); ++ spdk_json_write_named_uint64(w, "fatal_ios", scsi_stat.fatal_ios); ++ spdk_json_write_named_uint64(w, "io_retry", scsi_stat.io_retry); ++ ++ spdk_json_write_named_uint64(w, "start_count", scsi_stat.start_count); ++ spdk_json_write_named_uint64(w, "dma_count", scsi_stat.dma_count); ++ spdk_json_write_named_uint64(w, "dma_complete_count", scsi_stat.dma_complete_count); ++ spdk_json_write_named_uint64(w, "bdev_count", scsi_stat.bdev_count); ++ spdk_json_write_named_uint64(w, "bdev_complete_count", scsi_stat.bdev_complete_count); ++ spdk_json_write_named_uint64(w, "flight_io", ssmsession->scsi_dev_state[tgt_id].flight_io); ++ ++ if (scsi_stat.count == 0) { ++ count = 1; ++ } else { ++ count = scsi_stat.count; ++ } ++ ++ if (ssmsession->smsession.smdev->stat.poll_count == 0) { ++ poll_count = 1; ++ } else { ++ poll_count = ssmsession->smsession.smdev->stat.poll_count; ++ } ++ ++ spdk_json_write_named_string_fmt(w, "poll_lat", "%.9f", ++ (float)ssmsession->smsession.smdev->stat.poll_tsc / poll_count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "total_lat", "%.9f", ++ (float)scsi_stat.total_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "dma_lat", "%.9f", (float)scsi_stat.dma_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "bdev_lat", "%.9f", ++ (float)scsi_stat.bdev_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "bdev_submit_lat", "%.9f", ++ (float)scsi_stat.bdev_submit_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "complete_lat", "%.9f", ++ (float)scsi_stat.complete_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "internal_lat", "%.9f", ++ (float)scsi_stat.internel_tsc / count / ticks_hz); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_scsi_show_iostat_json(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_show_iostat_args *args, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev *sdev; ++ int32_t tgt_id; ++ ++ if (args->id != SPDK_INVALID_ID) { ++ sdev = ssam_scsi_dev_get_tgt(ssmsession, args->id); ++ if (sdev != NULL) { ++ ssam_scsi_show_tgt_iostat_json(ssmsession, w, args->id, sdev); ++ } else { ++ spdk_json_write_object_begin(w); ++ spdk_json_write_object_end(w); ++ } ++ return; ++ } ++ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_uint32(w, "function_id", smsession->gfunc_id); ++ ++ spdk_json_write_named_array_begin(w, "scsi_target"); ++ ++ for (tgt_id = 0; tgt_id < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; tgt_id++) { ++ sdev = ssam_scsi_dev_get_tgt(ssmsession, tgt_id); ++ if (!sdev) { ++ continue; ++ } ++ ssam_scsi_show_tgt_iostat_json(ssmsession, w, tgt_id, sdev); ++ } ++ ++ spdk_json_write_array_end(w); ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_scsi_clear_iostat_json(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev_io_state *io_stat; ++ int32_t tgt_id; ++ int32_t lun_id; ++ for (tgt_id = 0; tgt_id < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; tgt_id++) { ++ for (lun_id = 0; lun_id < SSAM_SPDK_SCSI_DEV_MAX_LUN; lun_id++) { ++ io_stat = ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ if (io_stat == NULL) { ++ continue; ++ } ++ memset(io_stat, 0, sizeof(struct spdk_scsi_dev_io_state)); ++ } ++ } ++ return; ++} ++ ++static struct spdk_bdev * ++ssam_scsi_get_bdev(struct spdk_ssam_session *smsession, uint32_t tgt_id) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev *scsi_dev; ++ struct spdk_scsi_lun *scsi_lun = NULL; ++ const char *bdev_name = NULL; ++ if (tgt_id >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: tgt %d invalid\n", smsession->name, tgt_id); ++ return NULL; ++ } ++ if (ssmsession->scsi_dev_state[tgt_id].dev == NULL) { ++ SPDK_ERRLOG("%s: tgt %d not be created\n", smsession->name, tgt_id); ++ return NULL; ++ } ++ ++ scsi_dev = ssmsession->scsi_dev_state[tgt_id].dev; ++ /* lun id use 0 */ ++ scsi_lun = spdk_scsi_dev_get_lun(scsi_dev, 0); ++ if (scsi_lun == NULL) { ++ return NULL; ++ } ++ bdev_name = spdk_scsi_lun_get_bdev_name(scsi_lun); ++ if (bdev_name == NULL) { ++ return NULL; ++ } ++ return spdk_bdev_get_by_name(bdev_name); ++} ++ ++static int ++ssam_scsi_iostat_construct(struct spdk_ssam_scsi_session *ssmsession, int32_t tgt_id, ++ int *lun_id_list, int num_luns) ++{ ++ struct spdk_scsi_dev_io_state *io_stat; ++ int32_t lun_id; ++ int i; ++ ++ for (i = 0; i < num_luns; i++) { ++ lun_id = lun_id_list[i]; ++ io_stat = ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ if (io_stat != NULL) { ++ SPDK_ERRLOG("io_stat with tgt %d lun %d already exist\n", tgt_id, lun_id); ++ return -EEXIST; ++ } ++ ++ io_stat = calloc(1, sizeof(*io_stat)); ++ if (io_stat == NULL) { ++ SPDK_ERRLOG("Could not allocate io_stat for tgt %d lun %d\n", tgt_id, lun_id); ++ return -ENOMEM; ++ } ++ ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id] = io_stat; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_scsi_iostat_destruct(struct spdk_scsi_dev_ssam_state *state) ++{ ++ int32_t lun_id; ++ ++ for (lun_id = 0; lun_id < SSAM_SPDK_SCSI_DEV_MAX_LUN; lun_id++) { ++ if (state->io_stat[lun_id] != NULL) { ++ free(state->io_stat[lun_id]); ++ state->io_stat[lun_id] = NULL; ++ } ++ } ++ ++ return; ++} ++ ++static void ++ssam_remove_scsi_tgt(struct spdk_ssam_scsi_session *ssmsession, unsigned scsi_tgt_num) ++{ ++ struct spdk_scsi_dev_ssam_state *state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ struct spdk_ssam_session *smsession = &ssmsession->smsession; ++ spdk_ssam_session_rsp_fn rsp_fn = smsession->rsp_fn; ++ void *rsp_ctx = smsession->rsp_ctx; ++ ++ smsession->rsp_fn = NULL; ++ smsession->rsp_ctx = NULL; ++ ++ /* delete scsi port */ ++ spdk_scsi_dev_delete_port(state->dev, 0); ++ ++ /* destruct scsi dev */ ++ spdk_scsi_dev_destruct(state->dev, NULL, NULL); ++ state->dev = NULL; ++ ++ /* free iostat */ ++ ssam_scsi_iostat_destruct(state); ++ state->status = SSAM_SCSI_DEV_EMPTY; ++ ++ /* ref-- */ ++ if (ssmsession->ref > 0) { ++ ssmsession->ref--; ++ } else { ++ SPDK_ERRLOG("%s: ref internel error\n", smsession->name); ++ } ++ if (rsp_fn != NULL) { ++ rsp_fn(rsp_ctx, 0); ++ rsp_fn = NULL; ++ } ++ SPDK_NOTICELOG("%s: target %u is removed\n", smsession->name, scsi_tgt_num); ++} ++ ++static int ++ssam_scsi_get_payload_size(struct ssam_request *io_req, uint32_t *payload_size) ++{ ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ uint32_t payload = 0; ++ uint32_t first_vec; ++ uint32_t end_vec; ++ uint32_t loop; ++ ++ if (io_cmd->writable) { /* read io */ ++ /* FROM_DEV: [req][resp][write_buf]...[write_buf ]*, write_buf start at index 2 */ ++ first_vec = 2; ++ end_vec = io_cmd->iovcnt - 1; ++ } else { /* write io */ ++ first_vec = 1; ++ /* TO_DEV: [req][read_buf]...[read_buf][resp], read_buf last index is iovnt-2 */ ++ end_vec = io_cmd->iovcnt - 2; ++ } ++ ++ for (loop = first_vec; loop <= end_vec; loop++) { ++ if (spdk_unlikely((UINT32_MAX - io_cmd->iovs[loop].iov_len) < payload)) { ++ SPDK_ERRLOG("payload size overflow\n"); ++ return -1; ++ } ++ payload += io_cmd->iovs[loop].iov_len; ++ } ++ ++ if (spdk_unlikely(payload > PAYLOAD_SIZE_MAX)) { ++ SPDK_ERRLOG("payload size larger than %u, payload_size = %u\n", ++ PAYLOAD_SIZE_MAX, payload); ++ return -1; ++ } ++ ++ *payload_size = payload; ++ ++ return 0; ++} ++ ++static void ++ssam_session_io_resubmit(void *arg) ++{ ++ struct spdk_ssam_scsi_task *task = (struct spdk_ssam_scsi_task *)arg; ++ struct spdk_ssam_session *smsession = &task->ssmsession->smsession; ++ uint32_t payload_size = task->scsi_task.transfer_len; ++ int rc; ++ ++ rc = ssam_scsi_task_iovs_memory_get(task, payload_size); ++ if (rc != 0) { ++ ssam_session_insert_io_wait(smsession, &task->session_io_wait); ++ return; ++ } ++ ssam_scsi_process_io_task(smsession, task); ++} ++ ++static void ++ssam_scsi_task_init(struct spdk_ssam_scsi_task *task) ++{ ++ memset(&task->scsi_task, 0, sizeof(struct spdk_scsi_task)); ++ ++ task->used = true; ++ task->iovcnt = 0; ++ task->io_req = NULL; ++ task->session_io_wait.cb_fn = ssam_session_io_resubmit; ++ task->session_io_wait.cb_arg = task; ++} ++ ++static void ++ssam_scsi_task_dma_request_para(struct ssam_dma_request *data_request, ++ struct spdk_ssam_scsi_task *task, ++ uint32_t type, uint8_t status) ++{ ++ struct spdk_scsi_task *scsi_task = &task->scsi_task; ++ struct ssam_io_message *io_cmd = NULL; ++ struct spdk_ssam_dma_cb dma_cb = { ++ .status = status, ++ .req_dir = type, ++ .gfunc_id = task->io_req->gfunc_id, ++ .vq_idx = task->vq_idx, ++ .task_idx = task->task_idx ++ }; ++ ++ io_cmd = &task->io_req->req.cmd; ++ data_request->cb = (void *) * (uint64_t *)&dma_cb; ++ data_request->gfunc_id = task->io_req->gfunc_id; ++ data_request->flr_seq = task->io_req->flr_seq; ++ data_request->direction = type; ++ data_request->data_len = scsi_task->transfer_len; ++ if (type == SSAM_REQUEST_DATA_STORE) { ++ data_request->src = task->iovs.phys.sges; ++ data_request->src_num = task->iovcnt; ++ /* FROM_DEV: [req][resp][write_buf]...[write_buf ]*, write_buf start at index 2 */ ++ data_request->dst = &io_cmd->iovs[2]; ++ /* dma data iovs does not contain header and tail */ ++ data_request->dst_num = io_cmd->iovcnt - IOV_HEADER_TAIL_NUM; ++ } else if (type == SSAM_REQUEST_DATA_LOAD) { ++ data_request->src = &io_cmd->iovs[1]; ++ /* dma data iovs does not contain header and tail */ ++ data_request->src_num = io_cmd->iovcnt - IOV_HEADER_TAIL_NUM; ++ data_request->dst = task->iovs.phys.sges; ++ data_request->dst_num = task->iovcnt; ++ } ++} ++ ++static void ++ssam_scsi_task_finish(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_ssam_session *smsession = task->smsession; ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[task->vq_idx]; ++ ++ if (smsession->task_cnt == 0) { ++ SPDK_ERRLOG("%s: task count internel error\n", smsession->name); ++ return; ++ } ++ ++ task->io_req = NULL; ++ ++ if (task->iovs.virt.sges[0].iov_base != NULL) { ++ ssam_mempool_free(smsession->mp, task->iovs.virt.sges[0].iov_base); ++ task->iovs.virt.sges[0].iov_base = NULL; ++ } ++ ++ memset(&task->iovs, 0, sizeof(task->iovs)); ++ ++ task->iovcnt = 0; ++ smsession->task_cnt--; ++ task->used = false; ++ vq->index[vq->index_l] = task->task_idx; ++ vq->index_l = (vq->index_l + 1) & 0xFF; ++ vq->use_num--; ++} ++ ++static int ++ssam_scsi_io_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, void *rsp_buf, ++ uint32_t rsp_len) ++{ ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ struct ssam_virtio_res *virtio_res = NULL; ++ struct ssam_io_response io_resp; ++ struct iovec io_vec; ++ int rc; ++ ++ memset(&io_resp, 0, sizeof(io_resp)); ++ io_resp.gfunc_id = io_req->gfunc_id; ++ io_resp.iocb_id = io_req->iocb_id; ++ io_resp.status = io_req->status; ++ io_resp.req = io_req; ++ io_resp.flr_seq = io_req->flr_seq; ++ ++ virtio_res = (struct ssam_virtio_res *)&io_resp.data; ++ virtio_res->iovs = &io_vec; ++ if (io_cmd->writable) { /* FROM_DEV: [req][resp][write_buf]...[write_buf ] */ ++ virtio_res->iovs->iov_base = io_cmd->iovs[1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[1].iov_len; ++ } else { /* TO_DEV: [req][read_buf]...[read_buf][resp] */ ++ virtio_res->iovs->iov_base = io_cmd->iovs[io_cmd->iovcnt - 1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[io_cmd->iovcnt - 1].iov_len; ++ } ++ virtio_res->iovcnt = 1; ++ virtio_res->rsp = rsp_buf; ++ virtio_res->rsp_len = rsp_len; ++ ++ rc = ssam_io_complete(smdev->tid, &io_resp); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ ssam_dev_io_dec(smdev); ++ return 0; ++} ++ ++struct ssam_scsi_req_complete_arg { ++ struct spdk_ssam_dev *smdev; ++ struct ssam_request *io_req; ++ uint8_t status; ++}; ++ ++static void ++ssam_scsi_req_complete_cb(void *arg) ++{ ++ struct ssam_scsi_req_complete_arg *cb_arg = (struct ssam_scsi_req_complete_arg *)arg; ++ struct virtio_scsi_cmd_resp resp = {0}; ++ struct virtio_scsi_ctrl_tmf_resp tmf_resp = {0}; ++ int rc; ++ ++ if (spdk_unlikely(cb_arg->io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ tmf_resp.response = cb_arg->status; ++ rc = ssam_scsi_io_complete(cb_arg->smdev, cb_arg->io_req, &tmf_resp, ++ sizeof(struct virtio_scsi_ctrl_tmf_resp)); ++ } else { ++ resp.response = cb_arg->status; ++ rc = ssam_scsi_io_complete(cb_arg->smdev, cb_arg->io_req, &resp, ++ sizeof(struct virtio_scsi_cmd_resp)); ++ } ++ ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_scsi_req_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_scsi_req_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, uint8_t status) ++{ ++ struct virtio_scsi_cmd_resp resp = {0}; ++ struct virtio_scsi_ctrl_tmf_resp tmf_resp = {0}; ++ int rc; ++ ++ if (spdk_unlikely(io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ tmf_resp.response = status; ++ rc = ssam_scsi_io_complete(smdev, io_req, &tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); ++ } else { ++ resp.response = status; ++ rc = ssam_scsi_io_complete(smdev, io_req, &resp, sizeof(struct virtio_scsi_cmd_resp)); ++ } ++ ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_scsi_req_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_scsi_req_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smdev; ++ cb_arg->io_req = io_req; ++ cb_arg->status = status; ++ io_wait_r->cb_fn = ssam_scsi_req_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smdev, io_wait_r); ++ } ++} ++ ++static void ++ssam_scsi_task_put(struct spdk_ssam_scsi_task *task) ++{ ++ memset(&task->resp, 0, sizeof(task->resp)); ++ if (task->io_req->type != VMIO_TYPE_VIRTIO_SCSI_CTRL) { ++ task->ssmsession->scsi_dev_state[task->tgt_id].flight_io--; ++ } ++ spdk_scsi_task_put(&task->scsi_task); ++} ++ ++static void ++ssam_scsi_submit_completion_cb(void *arg) ++{ ++ struct spdk_ssam_scsi_task *task = (struct spdk_ssam_scsi_task *)arg; ++ struct spdk_ssam_session *smsession = task->smsession; ++ int rc; ++ ++ if (spdk_unlikely(task->io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ rc = ssam_scsi_io_complete(smsession->smdev, task->io_req, &task->tmf_resp, ++ sizeof(struct virtio_scsi_ctrl_tmf_resp)); ++ } else { ++ rc = ssam_scsi_io_complete(smsession->smdev, task->io_req, &task->resp, ++ sizeof(struct virtio_scsi_cmd_resp)); ++ } ++ ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_scsi_submit_completion_cb; ++ io_wait_r->cb_arg = task; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_scsi_task_stat_tick(&task->task_stat.complete_end_tsc); ++ ssam_scsi_stat_statistics(task); ++ ++ /* after spdk_task_construct called, put task */ ++ ssam_scsi_task_put(task); ++} ++ ++static void ++ssam_scsi_submit_completion(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_ssam_session *smsession = task->smsession; ++ struct ssam_request *io_req = task->io_req; ++ int rc; ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.complete_start_tsc); ++ if (spdk_unlikely(io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ rc = ssam_scsi_io_complete(smsession->smdev, io_req, &task->tmf_resp, ++ sizeof(struct virtio_scsi_ctrl_tmf_resp)); ++ } else { ++ rc = ssam_scsi_io_complete(smsession->smdev, io_req, &task->resp, ++ sizeof(struct virtio_scsi_cmd_resp)); ++ } ++ ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_scsi_submit_completion_cb; ++ io_wait_r->cb_arg = task; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_scsi_task_stat_tick(&task->task_stat.complete_end_tsc); ++ ssam_scsi_stat_statistics(task); ++ ++ /* after spdk_task_construct called, put task */ ++ ssam_scsi_task_put(task); ++} ++ ++struct ssam_scsi_dma_data_request_arg { ++ struct spdk_ssam_dev *smdev; ++ struct spdk_ssam_scsi_task *task; ++ struct ssam_dma_request dma_req; ++}; ++ ++static void ++ssam_scsi_dma_data_request_cb(void *arg) ++{ ++ struct ssam_scsi_dma_data_request_arg *cb_arg = (struct ssam_scsi_dma_data_request_arg *)arg; ++ int ret = ssam_dma_data_request(cb_arg->smdev->tid, &cb_arg->dma_req); ++ if (ret == -ENOMEM || ret == -EIO) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_scsi_dma_data_request_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ if (ret < 0) { ++ SPDK_ERRLOG("ssam dma data request failed(%d)\n", ret); ++ cb_arg->task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(cb_arg->task); ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_scsi_task_dma_request(struct spdk_ssam_scsi_task *task, enum data_request_dma_type data_dir) ++{ ++ struct spdk_ssam_session *smsession = task->smsession; ++ struct ssam_dma_request data_request = {0}; ++ int ret = 0; ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.dma_start_tsc); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.dma_count++; ++ ++ switch (data_dir) { ++ case SSAM_REQUEST_DATA_STORE: ++ ssam_scsi_task_dma_request_para(&data_request, task, SSAM_REQUEST_DATA_STORE, 0); ++ ++ /* dma request: ipu -> Host */ ++ ret = ssam_dma_data_request(smsession->smdev->tid, &data_request); ++ break; ++ ++ case SSAM_REQUEST_DATA_LOAD: ++ ssam_scsi_task_dma_request_para(&data_request, task, SSAM_REQUEST_DATA_LOAD, 0); ++ ++ /* dma request: Host -> ipu */ ++ ret = ssam_dma_data_request(smsession->smdev->tid, &data_request); ++ break; ++ ++ default: ++ SPDK_ERRLOG("Invalid data dir: %u.\n", data_dir); ++ break; ++ } ++ ++ if (ret == -ENOMEM || ret == -EIO) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_scsi_dma_data_request_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_scsi_dma_data_request_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->dma_req = data_request; ++ cb_arg->task = task; ++ io_wait_r->cb_fn = ssam_scsi_dma_data_request_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ++ if (ret < 0) { ++ SPDK_ERRLOG("ssam dma data request failed(%d)\n", ret); ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ } ++} ++ ++static void ++ssam_scsi_task_copy_resp(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_scsi_task *scsi_task = &task->scsi_task; ++ ++ if (spdk_unlikely(task->io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ task->tmf_resp.response = scsi_task->status; ++ } else { ++ task->resp.status = scsi_task->status; ++ if (spdk_unlikely(scsi_task->sense_data_len > SSAM_SENSE_DATE_LEN)) { ++ return; ++ } ++ if (scsi_task->status != SPDK_SCSI_STATUS_GOOD) { ++ memcpy(task->resp.sense, scsi_task->sense_data, scsi_task->sense_data_len); ++ task->resp.sense_len = scsi_task->sense_data_len; ++ } ++ ++ if (scsi_task->transfer_len != scsi_task->length) { ++ SPDK_ERRLOG("task transfer_len(%u) not equal length(%u), internel error.\n", ++ scsi_task->transfer_len, scsi_task->length); ++ } ++ ++ task->resp.resid = scsi_task->length - scsi_task->data_transferred; ++ } ++} ++ ++static void ++ssam_scsi_read_task_cpl_cb(struct spdk_scsi_task *scsi_task) ++{ ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the task and bdev_io memory may have been released. ++ * Therefore, task and bdev_io are not released in this scenario. ++ */ ++ return; ++ } ++ struct spdk_ssam_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_ssam_scsi_task, ++ scsi_task); ++ int32_t tgt_id = task->tgt_id; ++ int32_t lun_id = spdk_scsi_lun_get_id(scsi_task->lun); ++ struct spdk_scsi_dev_io_state *io_stat = task->ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ ++ /* Second part start of read */ ++ io_stat->submit_tsc = spdk_get_ticks(); ++ ++ ssam_scsi_task_copy_resp(task); ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.bdev_end_tsc); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.bdev_complete_count++; ++ ++ /* 1) Read request without data is no need to dma; ++ 2) Read request failed just complete it. ++ */ ++ if (scsi_task->length == 0 || scsi_task->status != SPDK_SCSI_STATUS_GOOD) { ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ ++ /* Dma data from IPU to HOST */ ++ ssam_scsi_task_dma_request(task, SSAM_REQUEST_DATA_STORE); ++ ++ return; ++} ++ ++static void ++ssam_scsi_write_task_cpl_cb(struct spdk_scsi_task *scsi_task) ++{ ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the task and bdev_io memory may have been released. ++ * Therefore, task and bdev_io are not released in this scenario. ++ */ ++ return; ++ } ++ struct spdk_ssam_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_ssam_scsi_task, ++ scsi_task); ++ int32_t tgt_id = task->tgt_id; ++ int32_t lun_id = spdk_scsi_lun_get_id(scsi_task->lun); ++ struct spdk_scsi_dev_io_state *io_stat = task->ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ uint32_t payload_size = task->scsi_task.transfer_len; ++ ++ /* Second part start of write */ ++ io_stat->submit_tsc = spdk_get_ticks(); ++ ++ /* copy result from spdk_scsi_task to spdk_ssam_scsi_task->resp */ ++ ssam_scsi_task_copy_resp(task); ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.bdev_end_tsc); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.bdev_complete_count++; ++ ++ ssam_scsi_submit_completion(task); ++ /* Second part end of write */ ++ io_stat->stat.write_latency_ticks += ssam_get_diff_tsc(io_stat->submit_tsc); ++ io_stat->stat.bytes_written += payload_size; ++ io_stat->stat.num_write_ops++; ++ ++ return; ++} ++ ++static void ++ssam_scsi_ctl_task_cpl_cb(struct spdk_scsi_task *scsi_task) ++{ ++ struct spdk_ssam_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_ssam_scsi_task, ++ scsi_task); ++ ++ ssam_scsi_task_copy_resp(task); ++ ++ ssam_scsi_submit_completion(task); ++} ++ ++static void ++ssam_scsi_task_free_cb(struct spdk_scsi_task *scsi_task) ++{ ++ struct spdk_ssam_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_ssam_scsi_task, ++ scsi_task); ++ ++ ssam_scsi_task_finish(task); ++} ++ ++static int ++ssam_scsi_task_init_target(struct spdk_ssam_scsi_task *task, const __u8 *lun) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = task->ssmsession; ++ struct spdk_scsi_dev_ssam_state *state = NULL; ++ int32_t lun_id = (((uint16_t)lun[2] << 8) | lun[3]) & 0x3FFF; ++ int32_t tgt_id = lun[1]; ++ ++ if (lun[0] != 1 || tgt_id >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("First byte must be 1 and second is target\n"); ++ ssmsession->smsession.smdev->discard_io_num++; ++ return -1; ++ } ++ ++ state = &ssmsession->scsi_dev_state[tgt_id]; ++ task->scsi_dev = state->dev; ++ if (state->dev == NULL || state->status != SSAM_SCSI_DEV_PRESENT) { ++ return -1; ++ } ++ ++ task->tgt_id = tgt_id; ++ task->scsi_task.target_port = spdk_scsi_dev_find_port_by_id(task->scsi_dev, 0); ++ task->scsi_task.lun = spdk_scsi_dev_get_lun(state->dev, lun_id); ++ if (task->scsi_task.lun == NULL) { ++ SPDK_ERRLOG("Failed to init scsi task lun by lun_id(%d)\n", lun_id); ++ return -1; ++ } ++ return 0; ++} ++ ++static void ++ssam_scsi_submit_io_task(struct spdk_ssam_scsi_task *task) ++{ ++ task->resp.response = VIRTIO_SCSI_S_OK; ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.bdev_start_tsc); ++ spdk_scsi_dev_queue_task(task->scsi_dev, &task->scsi_task); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.bdev_count++; ++ ssam_scsi_task_stat_tick(&task->task_stat.bdev_func_tsc); ++ ++ SPDK_DEBUGLOG(ssam_blk_data, "====== Task: task_idx %u submitted ======\n", task->task_idx); ++} ++ ++static int ++ssam_scsi_task_iovs_memory_get(struct spdk_ssam_scsi_task *task, uint32_t payload_size) ++{ ++ struct ssam_mempool *mp = task->smsession->mp; ++ void *buffer = NULL; ++ uint64_t phys_addr = 0; ++ uint32_t alloc_size; ++ ++ if (payload_size == 0) { /* A little strange */ ++ alloc_size = 1; /* Alloc one iov at least */ ++ } else { ++ alloc_size = payload_size; ++ } ++ ++ buffer = ssam_mempool_alloc(mp, alloc_size, &phys_addr); ++ if (spdk_unlikely(buffer == NULL)) { ++ return -ENOMEM; ++ } ++ ++ /* ssam request max IO size is PAYLOAD_SIZE_MAX, only use one iov to save data */ ++ task->iovs.virt.sges[0].iov_base = buffer; ++ task->iovs.phys.sges[0].iov_base = (void *)phys_addr; ++ task->iovs.virt.sges[0].iov_len = payload_size; ++ task->iovs.phys.sges[0].iov_len = payload_size; ++ task->iovcnt = 1; ++ ++ return 0; ++} ++ ++static void ++scsi_mgmt_task_submit(struct spdk_ssam_scsi_task *task, enum spdk_scsi_task_func func) ++{ ++ task->tmf_resp.response = VIRTIO_SCSI_S_OK; ++ task->scsi_task.function = func; ++ spdk_scsi_dev_queue_mgmt_task(task->scsi_dev, &task->scsi_task); ++} ++ ++static void ++ssam_scsi_process_ctl_task(struct spdk_ssam_session *smsession, struct spdk_ssam_scsi_task *task) ++{ ++ struct virtio_scsi_ctrl_tmf_req *ctrl_req = (struct virtio_scsi_ctrl_tmf_req *) ++ task->io_req->req.cmd.header; ++ int32_t lun_id = spdk_scsi_lun_get_id(task->scsi_task.lun); ++ struct spdk_scsi_dev_io_state *io_stat = ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[lun_id]; ++ int ret = 0; ++ ++ spdk_scsi_task_construct(&task->scsi_task, ssam_scsi_ctl_task_cpl_cb, ssam_scsi_task_free_cb); ++ ret = ssam_scsi_task_init_target(task, ctrl_req->lun); ++ if (ret < 0) { ++ task->tmf_resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ ++ switch (ctrl_req->type) { ++ case VIRTIO_SCSI_T_TMF: ++ /* Check if we are processing a valid request */ ++ if (task->scsi_dev == NULL) { ++ task->tmf_resp.response = VIRTIO_SCSI_S_BAD_TARGET; ++ ssam_scsi_submit_completion(task); ++ break; ++ } ++ ++ switch (ctrl_req->subtype) { ++ case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: ++ /* Handle LUN reset */ ++ SPDK_DEBUGLOG(ssam_scsi, "%s: LUN reset\n", smsession->name); ++ ++ scsi_mgmt_task_submit(task, SPDK_SCSI_TASK_FUNC_LUN_RESET); ++ return; ++ default: ++ task->tmf_resp.response = VIRTIO_SCSI_S_ABORTED; ++ ssam_scsi_submit_completion(task); ++ /* Unsupported command */ ++ SPDK_DEBUGLOG(ssam_scsi, "%s: unsupported TMF command %x\n", ++ smsession->name, ctrl_req->subtype); ++ break; ++ } ++ break; ++ ++ case VIRTIO_SCSI_T_AN_QUERY: ++ case VIRTIO_SCSI_T_AN_SUBSCRIBE: ++ task->tmf_resp.response = VIRTIO_SCSI_S_ABORTED; ++ ssam_scsi_submit_completion(task); ++ break; ++ ++ default: ++ SPDK_DEBUGLOG(ssam_scsi, "%s: Unsupported control command %x\n", ++ smsession->name, ctrl_req->type); ++ io_stat->scsi_stat.fatal_ios++; ++ break; ++ } ++} ++ ++static void ++ssam_scsi_io_task_construct(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_scsi_task *scsi_task = &task->scsi_task; ++ struct ssam_io_message *io_cmd = &task->io_req->req.cmd; ++ ++ if (io_cmd->writable) { /* read io */ ++ spdk_scsi_task_construct(scsi_task, ssam_scsi_read_task_cpl_cb, ssam_scsi_task_free_cb); ++ } else { /* write io */ ++ spdk_scsi_task_construct(scsi_task, ssam_scsi_write_task_cpl_cb, ssam_scsi_task_free_cb); ++ } ++} ++ ++static int32_t ++ssam_scsi_io_task_setup(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_scsi_task *scsi_task = &task->scsi_task; ++ struct ssam_io_message *io_cmd = &task->io_req->req.cmd; ++ struct virtio_scsi_cmd_req *req = (struct virtio_scsi_cmd_req *)io_cmd->header; ++ uint32_t payload_size; ++ int ret; ++ ++ ssam_scsi_io_task_construct(task); ++ ++ ret = ssam_scsi_get_payload_size(task->io_req, &payload_size); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ ret = ssam_scsi_task_init_target(task, req->lun); ++ if (ret < 0) { ++ return ret; ++ } ++ ++ scsi_task->dxfer_dir = (io_cmd->writable ? SPDK_SCSI_DIR_FROM_DEV : SPDK_SCSI_DIR_TO_DEV); ++ scsi_task->iovs = task->iovs.virt.sges; ++ scsi_task->cdb = req->cdb; ++ scsi_task->transfer_len = payload_size; ++ scsi_task->length = payload_size; ++ ++ ret = ssam_scsi_task_iovs_memory_get(task, payload_size); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_scsi_process_io_task(struct spdk_ssam_session *smsession, struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev_io_state *io_stat; ++ uint64_t cur_tsc; ++ int32_t lun_id; ++ ++ ssmsession->scsi_dev_state[task->tgt_id].flight_io++; ++ ++ if (spdk_unlikely(task->scsi_task.lun == NULL)) { ++ spdk_scsi_task_process_null_lun(&task->scsi_task); ++ task->resp.response = VIRTIO_SCSI_S_OK; ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ ++ lun_id = spdk_scsi_lun_get_id(task->scsi_task.lun); ++ io_stat = ssmsession->scsi_dev_state[task->tgt_id].io_stat[lun_id]; ++ if (io_stat == NULL) { ++ SPDK_ERRLOG("No io_stat with tgt %d lun %d\n", task->tgt_id, lun_id); ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ /* First part start of read and write */ ++ cur_tsc = spdk_get_ticks(); ++ io_stat->submit_tsc = cur_tsc; ++ memset(&task->task_stat, 0, sizeof(task->task_stat)); ++ task->task_stat.start_tsc = cur_tsc; ++ io_stat->scsi_stat.start_count++; ++ ++ switch (task->scsi_task.dxfer_dir) { ++ case SPDK_SCSI_DIR_FROM_DEV: /* read: read data from backend to ipu, then dma to host */ ++ ssam_scsi_submit_io_task(task); ++ /* First part end of read */ ++ uint8_t rw_type = task->scsi_task.cdb[0]; ++ if (rw_type == SPDK_SBC_READ_6 || rw_type == SPDK_SBC_READ_10 || ++ rw_type == SPDK_SBC_READ_12 || rw_type == SPDK_SBC_READ_16) { ++ io_stat->stat.read_latency_ticks += ssam_get_diff_tsc(io_stat->submit_tsc); ++ io_stat->stat.bytes_read += task->scsi_task.transfer_len; ++ io_stat->stat.num_read_ops++; ++ } ++ break; ++ ++ case SPDK_SCSI_DIR_TO_DEV: /* write: dma data from host to ipu, then submit to backend */ ++ ssam_scsi_task_dma_request(task, SSAM_REQUEST_DATA_LOAD); ++ break; ++ ++ default: ++ SPDK_ERRLOG("scsi task dxfer dir error, dir is %u.\n", task->scsi_task.dxfer_dir); ++ io_stat->scsi_stat.fatal_ios++; ++ break; ++ } ++} ++ ++static void ++ssam_scsi_pre_process_io_task(struct spdk_ssam_session *smsession, struct spdk_ssam_scsi_task *task) ++{ ++ int ret; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ ++ ret = ssam_scsi_io_task_setup(task); ++ if (ret != 0) { ++ if (ret == -ENOMEM) { ++ ssam_session_insert_io_wait(smsession, &task->session_io_wait); ++ return; ++ } ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssmsession->scsi_dev_state[task->tgt_id].flight_io++; ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ ++ ssam_scsi_process_io_task(smsession, task); ++} ++ ++static void ++ssam_scsi_process_request(struct spdk_ssam_session *smsession, struct ssam_request *io_req, ++ uint16_t vq_idx) ++{ ++ struct spdk_ssam_scsi_task *task = NULL; ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[vq_idx]; ++ ++ if (spdk_unlikely(vq->use_num >= vq->num)) { ++ SPDK_ERRLOG("Session:%s vq(%hu) task_cnt(%u) limit(%u).\n", smsession->name, vq_idx, vq->use_num, ++ vq->num); ++ ssam_scsi_req_complete(smsession->smdev, io_req, VIRTIO_SCSI_S_FAILURE); ++ return; ++ } ++ ++ uint32_t index = vq->index[vq->index_r]; ++ task = &((struct spdk_ssam_scsi_task *)vq->tasks)[index]; ++ if (spdk_unlikely(task->used)) { ++ SPDK_ERRLOG("%s: vq(%hu) task_idx(%hu) is already pending.\n", smsession->name, vq_idx, ++ task->task_idx); ++ ssam_scsi_req_complete(smsession->smdev, io_req, VIRTIO_SCSI_S_FAILURE); ++ return; ++ } ++ ++ smsession->task_cnt++; ++ vq->index_r = (vq->index_r + 1) & 0xFF; ++ vq->use_num++; ++ ssam_scsi_task_init(task); ++ task->io_req = io_req; ++ ++ if (spdk_unlikely(io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ ssam_scsi_process_ctl_task(smsession, task); ++ } else { ++ ssam_scsi_pre_process_io_task(smsession, task); ++ } ++ ++ return; ++} ++ ++static void ++ssam_scsi_request_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct ssam_request *io_req = (struct ssam_request *)arg; ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ struct spdk_ssam_dev *smdev = smsession->smdev; ++ struct virtio_scsi_cmd_req *req = (struct virtio_scsi_cmd_req *)io_cmd->header; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ uint16_t vq_idx = io_cmd->virtio.vq_idx; ++ uint32_t tgt_id = req->lun[1]; ++ ++ smdev->io_num++; ++ ++ if (vq_idx >= smsession->max_queues) { ++ SPDK_ERRLOG("vq_idx out of range, need less than %u, actually %u\n", ++ smsession->max_queues, vq_idx); ++ goto err; ++ } ++ ++ if (io_req->status != SSAM_IO_STATUS_OK) { ++ SPDK_WARNLOG("%s: ssam request status invalid, but still process, status=%d\n", ++ smsession->name, io_req->status); ++ goto err; ++ } ++ ++ if (ssmsession->scsi_dev_state[tgt_id].status != SSAM_SCSI_DEV_PRESENT) { ++ /* If dev has been deleted, return io err */ ++ goto err; ++ } ++ ++ ssam_scsi_process_request(smsession, io_req, vq_idx); ++ ++ return; ++ ++err: ++ ssam_scsi_req_complete(smsession->smdev, io_req, VIRTIO_SCSI_S_FAILURE); ++ return; ++} ++ ++static void ++ssam_scsi_response_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct ssam_dma_rsp *dma_rsp = (struct ssam_dma_rsp *)arg; ++ const struct spdk_ssam_dma_cb *dma_cb = (const struct spdk_ssam_dma_cb *)&dma_rsp->cb; ++ struct spdk_ssam_scsi_task *task = NULL; ++ uint16_t vq_idx = dma_cb->vq_idx; ++ uint16_t task_idx = dma_cb->task_idx; ++ uint8_t req_dir = dma_cb->req_dir; ++ ++ if (spdk_unlikely(vq_idx >= smsession->max_queues)) { ++ smsession->smdev->discard_io_num++; ++ SPDK_ERRLOG("vq_idx out of range, need less than %u, actually %u\n", ++ smsession->max_queues, vq_idx); ++ return; ++ } ++ ++ task = &((struct spdk_ssam_scsi_task *)smsession->virtqueue[vq_idx].tasks)[task_idx]; ++ if (dma_rsp->status != 0) { ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ SPDK_ERRLOG("dma data process failed!\n"); ++ return; ++ } ++ if (dma_rsp->last_flag == 0) { ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ SPDK_ERRLOG("last_flag should not equal 0!\n"); ++ return; ++ } ++ int32_t tgt_id = task->tgt_id; ++ int32_t lun_id = spdk_scsi_lun_get_id(task->scsi_task.lun); ++ struct spdk_scsi_dev_io_state *io_stat = task->ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.dma_end_tsc); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.dma_complete_count++; ++ ++ if (req_dir == SSAM_REQUEST_DATA_LOAD) { ++ /* Write: write data ready, submit task to backend */ ++ ssam_scsi_submit_io_task(task); ++ /* First part end of write */ ++ io_stat->stat.write_latency_ticks += ssam_get_diff_tsc(io_stat->submit_tsc); ++ } else if (req_dir == SSAM_REQUEST_DATA_STORE) { ++ /* Read: data have been read by user, complete the task */ ++ task->resp.response = VIRTIO_SCSI_S_OK; ++ ssam_scsi_submit_completion(task); ++ /* Second part end of read */ ++ io_stat->stat.read_latency_ticks += ssam_get_diff_tsc(io_stat->submit_tsc); ++ } else { ++ io_stat->scsi_stat.fatal_ios++; ++ } ++} ++ ++static void ++ssam_scsi_destroy_bdev_device(struct spdk_ssam_session *smsession, void *args) ++{ ++ unsigned scsi_tgt_num = (unsigned)(uintptr_t)(args); ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ ++ ssam_remove_scsi_tgt(ssmsession, scsi_tgt_num); ++} ++ ++static void ++ssam_free_scsi_task_pool(struct spdk_ssam_scsi_session *ssmsession) ++{ ++ struct spdk_ssam_session *smsession = &ssmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = NULL; ++ uint16_t max_queues = smsession->max_queues; ++ uint16_t i; ++ ++ if (max_queues > SPDK_SSAM_MAX_VQUEUES) { ++ return; ++ } ++ ++ for (i = 0; i < max_queues; i++) { ++ vq = &smsession->virtqueue[i]; ++ if (vq->tasks != NULL) { ++ spdk_free(vq->tasks); ++ vq->tasks = NULL; ++ } ++ ++ if (vq->index != NULL) { ++ spdk_free(vq->index); ++ vq->index = NULL; ++ } ++ } ++} ++ ++static int ++ssam_alloc_scsi_task_pool(struct spdk_ssam_scsi_session *ssmsession) ++{ ++ struct spdk_ssam_session *smsession = &ssmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = NULL; ++ struct spdk_ssam_scsi_task *task = NULL; ++ uint16_t max_queues = smsession->max_queues; ++ uint32_t task_cnt = smsession->queue_size; ++ uint16_t i; ++ uint32_t j; ++ ++ if ((max_queues > SPDK_SSAM_MAX_VQUEUES) || (max_queues == 0)) { ++ SPDK_ERRLOG("%s: max_queues %u invalid\n", smsession->name, max_queues); ++ return -EINVAL; ++ } ++ ++ if ((task_cnt == 0) || (task_cnt > SPDK_SSAM_MAX_VQ_SIZE)) { ++ SPDK_ERRLOG("%s: virtuque size %u invalid\n", smsession->name, task_cnt); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < max_queues; i++) { ++ vq = &smsession->virtqueue[i]; ++ vq->smsession = smsession; ++ vq->num = task_cnt; ++ vq->use_num = 0; ++ vq->index_l = 0; ++ vq->index_r = 0; ++ vq->tasks = spdk_zmalloc(sizeof(struct spdk_ssam_scsi_task) * task_cnt, ++ SPDK_CACHE_LINE_SIZE, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ vq->index = spdk_zmalloc(sizeof(uint32_t) * task_cnt, ++ SPDK_CACHE_LINE_SIZE, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ if (vq->tasks == NULL || vq->index == NULL) { ++ SPDK_ERRLOG("%s: failed to allocate %"PRIu32" tasks for virtqueue %"PRIu16"\n", ++ smsession->name, task_cnt, i); ++ ssam_free_scsi_task_pool(ssmsession); ++ return -ENOMEM; ++ } ++ ++ for (j = 0; j < task_cnt; j++) { ++ task = &((struct spdk_ssam_scsi_task *)vq->tasks)[j]; ++ task->ssmsession = ssmsession; ++ task->smsession = &ssmsession->smsession; ++ task->vq_idx = i; ++ task->task_idx = j; ++ vq->index[j] = j; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_scsi_print_stuck_io_info(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_task *tasks; ++ struct spdk_ssam_scsi_task *task; ++ int i, j; ++ ++ for (i = 0; i < smsession->max_queues; i++) { ++ for (j = 0; j < smsession->queue_size; j++) { ++ tasks = (struct spdk_ssam_scsi_task *)smsession->virtqueue[i].tasks; ++ task = &tasks[j]; ++ if (task == NULL) { ++ continue; ++ } ++ if (task->used) { ++ SPDK_INFOLOG(ssam_scsi, "%s: stuck io payload_size %u, vq_idx %u, task_idx %u\n", ++ smsession->name, task->scsi_task.length, task->vq_idx, task->task_idx); ++ } ++ } ++ } ++} ++ ++static int ++ssam_scsi_start_cb(struct spdk_ssam_session *smsession, void **unused) ++{ ++ SPDK_NOTICELOG("SCSI controller %s created with queues %u\n", ++ smsession->name, smsession->max_queues); ++ ++ ssam_session_start_done(smsession, 0); ++ ++ return 0; ++} ++ ++static int ++ssam_scsi_start(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ int rc = ssam_alloc_scsi_task_pool(ssmsession); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: failed to alloc task pool.\n", smsession->name); ++ return rc; ++ } ++ return ssam_send_event_to_session(smsession, ssam_scsi_start_cb, NULL, send_event_flag, NULL); ++} ++ ++static int ++ssam_scsi_session_connect(struct spdk_ssam_session *smsession, uint16_t queues) ++{ ++ uint16_t queue_cnt = queues; ++ ++ if (queue_cnt == 0) { ++ queue_cnt = SPDK_SSAM_SCSI_DEFAULT_VQUEUES; ++ } ++ ++ smsession->max_queues = queue_cnt; ++ smsession->queue_size = SPDK_SSAM_DEFAULT_VQ_SIZE; ++ ++ return ssam_scsi_start(smsession); ++} ++ ++int ++ssam_scsi_construct(struct spdk_ssam_session_reg_info *info) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_ssam_scsi_session *ssmsession = NULL; ++ uint32_t session_ctx_size = sizeof(struct spdk_ssam_scsi_session) - sizeof( ++ struct spdk_ssam_session); ++ uint16_t tid; ++ int rc = 0; ++ ++ ssam_lock(); ++ ++ tid = ssam_get_tid(); ++ if (tid == SPDK_INVALID_TID) { ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ info->tid = tid; ++ info->backend = &g_ssam_scsi_session_backend; ++ info->session_ctx_size = session_ctx_size; ++ snprintf(info->type_name, SPDK_SESSION_TYPE_MAX_LEN, "%s", SPDK_SESSION_TYPE_SCSI); ++ rc = ssam_session_register(info, &smsession); ++ if (rc != 0) { ++ ssam_unlock(); ++ return rc; ++ } ++ smsession->started = true; ++ ++ ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ ssmsession->registered = true; ++ ssmsession->dbdf = strdup(info->dbdf); ++ if (ssmsession->dbdf == NULL) { ++ ssam_session_unregister(smsession, false); ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ rc = ssam_scsi_session_connect(smsession, info->queues); ++ if (rc != 0) { ++ ssam_session_unreg_response_cb(smsession); ++ ssam_session_unregister(smsession, false); ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ ssam_unlock(); ++ ++ return 0; ++} ++ ++static int ++ssam_get_scsi_tgt_num(struct spdk_ssam_scsi_session *ssmsession, int *scsi_tgt_num_out) ++{ ++ int scsi_tgt_num = *scsi_tgt_num_out; ++ if (scsi_tgt_num < 0) { ++ for (scsi_tgt_num = 0; scsi_tgt_num < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; scsi_tgt_num++) { ++ if (ssmsession->scsi_dev_state[scsi_tgt_num].dev == NULL) { ++ break; ++ } ++ } ++ ++ if (scsi_tgt_num == SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: all SCSI target slots are already in use.\n", ssmsession->smsession.name); ++ return -ENOSPC; ++ } ++ } else { ++ if (scsi_tgt_num >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: SCSI target number is too big (got %d, max %d)\n", ++ ssmsession->smsession.name, scsi_tgt_num, SPDK_SSAM_SCSI_CTRLR_MAX_DEVS - 1); ++ return -EINVAL; ++ } ++ } ++ *scsi_tgt_num_out = scsi_tgt_num; ++ return 0; ++} ++ ++static int ++ssam_scsi_dev_param_changed(struct spdk_ssam_scsi_session *ssmsession, ++ unsigned scsi_tgt_num) ++{ ++ struct spdk_scsi_dev_ssam_state *state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ ++ if (state->dev == NULL) { ++ return 0; ++ } ++ int rc = ssam_scsi_send_event(&ssmsession->smsession, scsi_tgt_num, VIRTIO_SCSI_T_PARAM_CHANGE, ++ 0x2a | (0x09 << 0x8)); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: tgt %d resize send action failed\n", ssmsession->smsession.name, scsi_tgt_num); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static unsigned ++ssam_get_scsi_dev_num(const struct spdk_ssam_scsi_session *ssmsession, ++ const struct spdk_scsi_lun *lun) ++{ ++ const struct spdk_scsi_dev *scsi_dev; ++ unsigned scsi_dev_num; ++ ++ scsi_dev = spdk_scsi_lun_get_dev(lun); ++ for (scsi_dev_num = 0; scsi_dev_num < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; scsi_dev_num++) { ++ if (ssmsession->scsi_dev_state[scsi_dev_num].dev == scsi_dev) { ++ break; ++ } ++ } ++ return scsi_dev_num; ++} ++ ++static void ++ssam_scsi_lun_resize(const struct spdk_scsi_lun *lun, void *arg) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = arg; ++ unsigned scsi_dev_num; ++ ++ scsi_dev_num = ssam_get_scsi_dev_num(ssmsession, lun); ++ if (scsi_dev_num == SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ /* The entire device has been already removed. */ ++ return; ++ } ++ ++ (void)ssam_scsi_dev_param_changed(ssmsession, scsi_dev_num); ++} ++ ++static void ++ssam_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg) ++{ ++ struct ssam_scsi_tgt_hotplug_ctx *ctx; ++ struct spdk_ssam_scsi_session *ssmsession = arg; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = false, ++ }; ++ unsigned scsi_dev_num; ++ ++ scsi_dev_num = ssam_get_scsi_dev_num(ssmsession, lun); ++ if (scsi_dev_num == SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ /* The entire device has been already removed. */ ++ return; ++ } ++ ++ ctx = calloc(1, sizeof(*ctx)); ++ if (ctx == NULL) { ++ SPDK_ERRLOG("calloc failed\n"); ++ return; ++ } ++ ++ ctx->scsi_tgt_num = scsi_dev_num; ++ ssam_send_event_to_session(&ssmsession->smsession, ssam_scsi_dev_hot_remove_tgt, ++ NULL, send_event_flag, ctx); ++} ++ ++static int ++ssam_scsi_session_add_tgt(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct ssam_add_tgt_ev_ctx *args = (struct ssam_add_tgt_ev_ctx *)(*ctx); ++ unsigned scsi_tgt_num = args->tgt_num; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ int rc; ++ ++ rc = spdk_scsi_dev_allocate_io_channels(ssmsession->scsi_dev_state[scsi_tgt_num].dev); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: Couldn't allocate io channnel for SCSI target %u.\n", ++ smsession->name, scsi_tgt_num); ++ } ++ ++ rc = ssam_scsi_send_event(smsession, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, ++ VIRTIO_SCSI_EVT_RESET_RESCAN); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: send event %d(reason %d) to target %hu failed, ret: %d, host maynot boot.\n", ++ smsession->name, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_RESCAN, scsi_tgt_num, rc); ++ if (rc == -ENOSPC) { ++ spdk_scsi_dev_free_io_channels(ssmsession->scsi_dev_state[scsi_tgt_num].dev); ++ ssam_scsi_destruct_tgt(ssmsession, scsi_tgt_num); ++ return rc; ++ } ++ } ++ ++ ssmsession->scsi_dev_state[scsi_tgt_num].status = SSAM_SCSI_DEV_PRESENT; ++ ssmsession->scsi_dev_state[scsi_tgt_num].flight_io = 0; ++ ++ return 0; ++} ++ ++static void ++ssam_scsi_dev_add_tgt_cpl_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct ssam_add_tgt_ev_ctx *args = (struct ssam_add_tgt_ev_ctx *)(*ctx); ++ unsigned scsi_tgt_num = args->tgt_num; ++ ssmsession->ref++; ++ ++ SPDK_NOTICELOG("SCSI controller %s target %u added with bdev %s\n", ++ smsession->name, scsi_tgt_num, args->bdev_name); ++ ++ free(args->bdev_name); ++ args->bdev_name = NULL; ++ free(args); ++} ++ ++struct ssam_scsi_session_remove_tgt_arg { ++ struct spdk_ssam_session *smsession; ++ unsigned scsi_tgt_num; ++}; ++ ++static void ++ssam_scsi_session_remove_tgt_cpl(struct spdk_ssam_session *smsession, void **_ctx) ++{ ++ struct ssam_scsi_tgt_hotplug_ctx *ctx = *_ctx; ++ unsigned scsi_tgt_num = ctx->scsi_tgt_num; ++ int rc; ++ rc = ssam_umount_normal(smsession, ssam_scsi_tgtid_to_lunid(scsi_tgt_num)); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: function umount failed when remove scsi tgt:%s.\n", ++ smsession->name, strerror(-rc)); ++ } ++ free(ctx); ++} ++ ++static int ++ssam_scsi_session_remove_tgt(struct spdk_ssam_session *smsession, void **_ctx) ++{ ++ struct ssam_scsi_tgt_hotplug_ctx *ctx = *_ctx; ++ unsigned scsi_tgt_num = ctx->scsi_tgt_num; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev_ssam_state *state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ int rc = 0; ++ ++ if (state->status != SSAM_SCSI_DEV_PRESENT) { ++ SPDK_WARNLOG("%s: SCSI target %u is not present, skip.\n", smsession->name, scsi_tgt_num); ++ rc = -ENODEV; ++ goto out; ++ } ++ ++ if (ssmsession->scsi_dev_state[scsi_tgt_num].flight_io != 0) { ++ SPDK_ERRLOG("%s: SCSI target %u is busy.\n", smsession->name, scsi_tgt_num); ++ rc = -EBUSY; ++ goto out; ++ } ++ ++ state->status = SSAM_SCSI_DEV_REMOVING; ++ ++ SPDK_NOTICELOG("%s: target %d is removing\n", smsession->name, scsi_tgt_num); ++ ++ rc = ssam_scsi_send_event(smsession, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, ++ VIRTIO_SCSI_EVT_RESET_REMOVED); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: scsi send remove event failed\n", smsession->name); ++ if (rc == -ENOSPC) { ++ state->status = SSAM_SCSI_DEV_PRESENT; ++ goto out; ++ } ++ } ++ ++ spdk_scsi_dev_free_io_channels(state->dev); ++ ++ ssam_send_dev_destroy_msg(smsession, (void *)(uintptr_t)scsi_tgt_num); ++ ++ /* free ctx see ssam_scsi_session_remove_tgt_cpl() */ ++ return rc; ++ ++out: ++ free(ctx); ++ ++ return rc; ++} ++ ++static int ++ssam_scsi_construct_tgt(struct spdk_ssam_scsi_session *ssmsession, int scsi_tgt_num, ++ const char *bdev_name) ++{ ++ struct spdk_scsi_dev_ssam_state *state = NULL; ++ char target_name[SPDK_SCSI_DEV_MAX_NAME] = {0}; ++ int lun_id_list[SSAM_SPDK_SCSI_DEV_MAX_LUN]; ++ const char *bdev_names_list[SSAM_SPDK_SCSI_DEV_MAX_LUN]; ++ int rc; ++ ++ state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ if (state->dev != NULL) { ++ SPDK_ERRLOG("%s: SCSI target %u already occupied\n", ssmsession->smsession.name, scsi_tgt_num); ++ return -EEXIST; ++ } ++ ++ (void)snprintf(target_name, sizeof(target_name), "Target %u", scsi_tgt_num); ++ lun_id_list[0] = 0; ++ bdev_names_list[0] = (char *)bdev_name; ++ ++ state->status = SSAM_SCSI_DEV_ADDING; ++ rc = ssam_scsi_iostat_construct(ssmsession, scsi_tgt_num, lun_id_list, SSAM_SPDK_SCSI_DEV_MAX_LUN); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ state->dev = spdk_scsi_dev_construct_ext(target_name, bdev_names_list, lun_id_list, ++ SSAM_SPDK_SCSI_DEV_MAX_LUN, ++ SPDK_SPC_PROTOCOL_IDENTIFIER_SAS, ++ ssam_scsi_lun_resize, ssmsession, ++ ssam_scsi_lun_hotremove, ssmsession); ++ if (state->dev == NULL) { ++ SPDK_ERRLOG("%s: couldn't create SCSI target %u using bdev '%s'\n", ++ ssmsession->smsession.name, scsi_tgt_num, bdev_name); ++ rc = -EINVAL; ++ goto dev_fail; ++ } ++ ++ rc = spdk_scsi_dev_add_port(state->dev, 0, "ssam"); ++ if (rc != 0) { ++ goto port_fail; ++ } ++ ++ return rc; ++ ++port_fail: ++ spdk_scsi_dev_destruct(state->dev, NULL, NULL); ++ ++dev_fail: ++ ssam_scsi_iostat_destruct(state); ++ ++ return rc; ++} ++ ++static void ++ssam_scsi_destruct_tgt(struct spdk_ssam_scsi_session *ssmsession, int scsi_tgt_num) ++{ ++ struct spdk_scsi_dev_ssam_state *state = NULL; ++ state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ ++ if (state->dev) { ++ spdk_scsi_dev_delete_port(state->dev, 0); ++ spdk_scsi_dev_destruct(state->dev, NULL, NULL); ++ state->dev = NULL; ++ } ++ ssam_scsi_iostat_destruct(state); ++ ++ state->status = SSAM_SCSI_DEV_EMPTY; ++} ++ ++int ++ssam_scsi_dev_add_tgt(struct spdk_ssam_session *smsession, int scsi_tgt_num, ++ const char *bdev_name) ++{ ++ int rc; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct ssam_add_tgt_ev_ctx *ctx = calloc(1, sizeof(struct ssam_add_tgt_ev_ctx)); ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ ++ if (ctx == NULL) { ++ SPDK_ERRLOG("calloc ssam_add_tgt_ev_ctx failed\n"); ++ return -ENOMEM; ++ } ++ ++ if (bdev_name == NULL) { ++ SPDK_ERRLOG("No lun name specified\n"); ++ free(ctx); ++ return -EINVAL; ++ } ++ ++ ctx->bdev_name = spdk_sprintf_alloc("%s", bdev_name); ++ if (ctx->bdev_name == NULL) { ++ SPDK_ERRLOG("calloc ssam_add_tgt_ev_ctx bdev_name failed\n"); ++ free(ctx); ++ return -ENOMEM; ++ } ++ ++ rc = ssam_get_scsi_tgt_num(ssmsession, &scsi_tgt_num); ++ if (rc < 0) { ++ free(ctx->bdev_name); ++ free(ctx); ++ return rc; ++ } ++ ++ rc = ssam_mount_normal(smsession, ssam_scsi_tgtid_to_lunid(scsi_tgt_num)); ++ if (rc != SSAM_MOUNT_OK) { ++ SPDK_ERRLOG("%s: mount ssam volume failed, tgt id %d\n", smsession->name, scsi_tgt_num); ++ free(ctx->bdev_name); ++ free(ctx); ++ return rc; ++ } ++ ++ rc = ssam_scsi_construct_tgt(ssmsession, scsi_tgt_num, bdev_name); ++ if (rc != 0) { ++ free(ctx->bdev_name); ++ free(ctx); ++ return rc; ++ } ++ ++ ctx->tgt_num = scsi_tgt_num; ++ rc = ssam_send_event_to_session(&ssmsession->smsession, ssam_scsi_session_add_tgt, ++ ssam_scsi_dev_add_tgt_cpl_cb, send_event_flag, (void *)ctx); ++ if (rc != 0) { ++ ssam_scsi_destruct_tgt(ssmsession, scsi_tgt_num); ++ free(ctx->bdev_name); ++ free(ctx); ++ return rc; ++ } ++ ++ SPDK_INFOLOG(ssam_scsi, "%s: added SCSI target %u using bdev '%s'\n", ++ ssmsession->smsession.name, scsi_tgt_num, bdev_name); ++ ++ return 0; ++} ++ ++static int ++ssam_scsi_dev_hot_remove_tgt(struct spdk_ssam_session *smsession, void **_ctx) ++{ ++ int rc = 0; ++ struct ssam_scsi_tgt_hotplug_ctx *ctx = *_ctx; ++ struct spdk_ssam_scsi_session *ssmsession; ++ ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ unsigned scsi_tgt_num = ctx->scsi_tgt_num; ++ if (!ssmsession) { ++ SPDK_ERRLOG("invalid SCSI device"); ++ rc = -EINVAL; ++ goto out; ++ } ++ ++ struct spdk_scsi_dev_ssam_state *scsi_dev_state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ if (scsi_dev_state->dev == NULL) { ++ /* Nothing to do */ ++ SPDK_WARNLOG("%s: There is no need to remove scsi target\n", smsession->name); ++ rc = -ENODEV; ++ goto out; ++ } ++ ++ if (scsi_dev_state->status != SSAM_SCSI_DEV_PRESENT) { ++ SPDK_INFOLOG(ssam_scsi, "%s: SCSI target %u is being removed\n", smsession->name, scsi_tgt_num); ++ rc = 0; ++ goto out; ++ } ++ ++ scsi_dev_state->status = SSAM_SCSI_DEV_REMOVING; ++ ++ SPDK_NOTICELOG("%s: target %d is hot removing\n", smsession->name, scsi_tgt_num); ++ ++ rc = ssam_scsi_send_event(smsession, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, ++ VIRTIO_SCSI_EVT_RESET_REMOVED); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: scsi send remove event failed\n", smsession->name); ++ if (rc == -ENOSPC) { ++ scsi_dev_state->status = SSAM_SCSI_DEV_PRESENT; ++ goto out; ++ } ++ } ++ ++ spdk_scsi_dev_free_io_channels(scsi_dev_state->dev); ++ ++ ssam_send_dev_destroy_msg(smsession, (void *)(uintptr_t)scsi_tgt_num); ++ ++out: ++ free(ctx); ++ return rc; ++} ++ ++int ++ssam_scsi_dev_remove_tgt(struct spdk_ssam_session *smsession, unsigned scsi_tgt_num, ++ spdk_ssam_session_rsp_fn cb_fn, void *cb_arg) ++{ ++ struct spdk_ssam_scsi_session *ssmsession; ++ struct ssam_scsi_tgt_hotplug_ctx *ctx; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ ++ if (scsi_tgt_num >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: invalid SCSI target number %d\n", smsession->name, scsi_tgt_num); ++ return -EINVAL; ++ } ++ ++ ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ if (!ssmsession) { ++ SPDK_ERRLOG("An invalid SCSI device that removing from a SCSI target."); ++ return -EINVAL; ++ } ++ ++ ctx = calloc(1, sizeof(*ctx)); ++ if (ctx == NULL) { ++ SPDK_ERRLOG("calloc failed\n"); ++ return -ENOMEM; ++ } ++ ++ ctx->scsi_tgt_num = scsi_tgt_num; ++ ++ ssam_send_event_to_session(smsession, ssam_scsi_session_remove_tgt, ++ ssam_scsi_session_remove_tgt_cpl, send_event_flag, ctx); ++ ++ return 0; ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_scsi) +diff --git a/mk/spdk.lib_deps.mk b/mk/spdk.lib_deps.mk +index 834675e..bb79830 100644 +--- a/mk/spdk.lib_deps.mk ++++ b/mk/spdk.lib_deps.mk +@@ -179,6 +179,7 @@ DEPDIRS-event_vhost_scsi := init vhost event_scheduler event_scsi + DEPDIRS-event_sock := init sock + DEPDIRS-event_vfu_tgt := init vfu_tgt + DEPDIRS-event_iobuf := init log thread util $(JSON_LIBS) ++DEPDIRS-event_ssam := init ssam event_scsi + + # module/vfu_device + +diff --git a/module/event/subsystems/Makefile b/module/event/subsystems/Makefile +index 8242329..e881fe9 100644 +--- a/module/event/subsystems/Makefile ++++ b/module/event/subsystems/Makefile +@@ -16,6 +16,7 @@ endif + endif + + DIRS-$(CONFIG_VHOST) += vhost_blk vhost_scsi ++DIRS-$(CONFIG_SSAM) += ssam + DIRS-$(CONFIG_VFIO_USER) += vfu_tgt + + # These dependencies are not based specifically on symbols, but rather +@@ -30,6 +31,7 @@ DEPDIRS-ublk := bdev + DEPDIRS-nvmf := bdev + DEPDIRS-scsi := bdev + DEPDIRS-vhost_scsi := scsi ++DEPDIRS-ssam := scsi + + .PHONY: all clean $(DIRS-y) + +diff --git a/module/event/subsystems/ssam/Makefile b/module/event/subsystems/ssam/Makefile +new file mode 100644 +index 0000000..77f74a8 +--- /dev/null ++++ b/module/event/subsystems/ssam/Makefile +@@ -0,0 +1,17 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright (C) 2021-2025 Huawei Technologies Co. ++# All rights reserved. ++# ++ ++SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..) ++include $(SPDK_ROOT_DIR)/mk/spdk.common.mk ++ ++SO_VER := 3 ++SO_MINOR := 0 ++ ++C_SRCS = ssam.c ++LIBNAME = event_ssam ++ ++SPDK_MAP_FILE = $(SPDK_ROOT_DIR)/mk/spdk_blank.map ++ ++include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk +diff --git a/module/event/subsystems/ssam/ssam.c b/module/event/subsystems/ssam/ssam.c +new file mode 100644 +index 0000000..5291b34 +--- /dev/null ++++ b/module/event/subsystems/ssam/ssam.c +@@ -0,0 +1,45 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "spdk/stdinc.h" ++ ++#include "spdk/ssam.h" ++ ++#include "spdk_internal/event.h" ++#include "spdk_internal/init.h" ++ ++static void ++ssam_subsystem_init_done(int rc) ++{ ++ spdk_subsystem_init_next(rc); ++} ++ ++static void ++ssam_subsystem_init(void) ++{ ++ spdk_ssam_subsystem_init(ssam_subsystem_init_done); ++} ++ ++static void ++ssam_subsystem_fini_done(void) ++{ ++ spdk_subsystem_fini_next(); ++} ++ ++static void ++ssam_subsystem_fini(void) ++{ ++ spdk_ssam_subsystem_fini(ssam_subsystem_fini_done); ++} ++ ++static struct spdk_subsystem g_spdk_subsystem_ssam = { ++ .name = SSAM_SERVER_NAME, ++ .init = ssam_subsystem_init, ++ .fini = ssam_subsystem_fini, ++ .write_config_json = spdk_ssam_config_json, ++}; ++ ++SPDK_SUBSYSTEM_REGISTER(g_spdk_subsystem_ssam); ++SPDK_SUBSYSTEM_DEPEND(ssam, scsi) +diff --git a/python/spdk/rpc/__init__.py b/python/spdk/rpc/__init__.py +index 0541544..2407983 100644 +--- a/python/spdk/rpc/__init__.py ++++ b/python/spdk/rpc/__init__.py +@@ -29,6 +29,7 @@ from . import pmem + from . import subsystem + from . import trace + from . import vhost ++from . import ssam + from . import vmd + from . import sock + from . import vfio_user +diff --git a/python/spdk/rpc/ssam.py b/python/spdk/rpc/ssam.py +new file mode 100644 +index 0000000..9ff173e +--- /dev/null ++++ b/python/spdk/rpc/ssam.py +@@ -0,0 +1,274 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright (C) 2021-2025 Huawei Technologies Co. ++# All rights reserved. ++ ++from .helpers import deprecated_alias ++from getpass import getuser ++ ++ ++def log_command_info(client, event): ++ """log event info. ++ Args: ++ user_name: event user ++ event: function id of PCI device ++ src_addr: queue number of ssam ++ """ ++ params = { ++ 'user_name': getuser(), ++ 'event': event, ++ 'src_addr': "localhost", ++ } ++ return client.call('log_command_info', params) ++ ++ ++def log_info(func): ++ def wrapper_log_info(arg, *args, **kw): ++ log_command_info(arg.client, func.__name__) ++ return func(arg, *args, **kw) ++ return wrapper_log_info ++ ++ ++def create_blk_controller(client, dev_name, index, readonly=None, serial=None, vqueue=None): ++ """Create ssam BLK controller. ++ Args: ++ dev_name: device name to add to controller ++ index: function id or dbdf of PCI device ++ queues: queue number of ssam ++ readonly: set controller as read-only ++ serial: set volume id ++ vqueue: set virtio queue num ++ """ ++ params = { ++ 'dev_name': dev_name, ++ 'index': index, ++ } ++ if readonly: ++ params['readonly'] = readonly ++ if serial: ++ params['serial'] = serial ++ if vqueue is not None: ++ params['vqueue'] = vqueue ++ return client.call('create_blk_controller', params) ++ ++ ++def get_controllers(client, function_id=None, dbdf=None): ++ """Get information about configured ssam controllers. ++ ++ Args: ++ function_id: function id of PCI device ++ dbdf: dbdf of PCI device ++ ++ Returns: ++ List of ssam controllers. ++ """ ++ params = {} ++ if function_id is not None: ++ params['function_id'] = function_id ++ if dbdf is not None: ++ params['dbdf'] = dbdf ++ return client.call('get_controllers', params) ++ ++ ++def get_scsi_controllers(client, name=None): ++ """Get information about configured ssam controllers. ++ ++ Args: ++ name: name of scsi controller ++ ++ Returns: ++ List of ssam scsi controllers. ++ """ ++ params = {} ++ if name is not None: ++ params['name'] = name ++ return client.call('get_scsi_controllers', params) ++ ++ ++def delete_controller(client, index, force): ++ """Delete ssam controller from configuration. ++ Args: ++ index: function id or dbdf of PCI device ++ """ ++ params = { ++ 'index': index, ++ 'force': force, ++ } ++ return client.call('delete_controller', params) ++ ++ ++def delete_scsi_controller(client, name): ++ """Delete ssam controller from configuration. ++ Args: ++ name: scsi controller name to be delete ++ """ ++ params = {'name': name} ++ return client.call('delete_scsi_controller', params) ++ ++ ++def controller_get_iostat(client, function_id=None, dbdf=None): ++ """Get iostat about configured ssam controllers. ++ ++ Args: ++ function_id: function id of PCI device ++ dbdf: dbdf of PCI device ++ ++ Returns: ++ List of iostat of ssam controllers. ++ """ ++ params = {} ++ if function_id is not None: ++ params['function_id'] = function_id ++ if dbdf is not None: ++ params['dbdf'] = dbdf ++ return client.call('controller_get_iostat', params) ++ ++ ++def blk_device_iostat(client, index, tid=None, vq_idx=None): ++ """Get iostat about blk device. ++ ++ Args: ++ index: index ++ tid: tid ++ vq_idx: vqueue id ++ ++ Returns: ++ List of iostat of ssam controllers. ++ """ ++ params = { ++ 'index': index, ++ } ++ if tid is not None: ++ params['tid'] = tid ++ if vq_idx is not None: ++ params['vq_idx'] = vq_idx ++ return client.call('blk_device_iostat', params) ++ ++ ++def controller_clear_iostat(client): ++ """Clear iostat about configured ssam controllers. ++ """ ++ return client.call('controller_clear_iostat') ++ ++ ++def bdev_resize(client, function_id, new_size_in_mb): ++ """Resize bdev in the system. ++ Args: ++ function_id: function id of PCI device ++ new_size_in_mb: new bdev size for resize operation. The unit is MiB ++ """ ++ params = { ++ 'function_id': function_id, ++ 'new_size_in_mb': new_size_in_mb, ++ } ++ return client.call('bdev_resize', params) ++ ++ ++def scsi_bdev_resize(client, name, tgt_id, new_size_in_mb): ++ """Resize scsi bdev in the system. ++ Args: ++ name: controller name of PCI device ++ tgt_id: tgt id of bdev ++ new_size_in_mb: new bdev size for resize operation. The unit is MiB ++ """ ++ params = { ++ 'name': name, ++ 'tgt_id': tgt_id, ++ 'new_size_in_mb': new_size_in_mb, ++ } ++ return client.call('scsi_bdev_resize', params) ++ ++ ++def bdev_aio_resize(client, name, new_size_in_mb): ++ """Resize aio bdev in the system. ++ Args: ++ name: aio bdev name ++ new_size_in_mb: new bdev size for resize operation. The unit is MiB ++ """ ++ params = { ++ 'name': name, ++ 'new_size_in_mb': new_size_in_mb, ++ } ++ return client.call('bdev_aio_resize', params) ++ ++ ++def os_ready(client): ++ """Write ready flag for booting OS. ++ ++ """ ++ return client.call('os_ready') ++ ++ ++def os_not_ready(client): ++ """Write not ready flag for booting OS. ++ ++ """ ++ return client.call('os_not_ready') ++ ++ ++def create_scsi_controller(client, dbdf, name): ++ """Create ssam scsi controller. ++ Args: ++ dbdf: the pci dbdf of virtio scsi controller ++ name: controller name to be create ++ """ ++ params = { ++ 'dbdf': dbdf, ++ 'name': name, ++ } ++ ++ return client.call('create_scsi_controller', params) ++ ++ ++def scsi_controller_add_target(client, name, scsi_tgt_num, bdev_name): ++ """Add LUN to ssam scsi controller target. ++ Args: ++ name: controller name where add lun ++ scsi_tgt_num: target number to use ++ bdev_name: name of bdev to add to target ++ """ ++ params = { ++ 'name': name, ++ 'scsi_tgt_num': scsi_tgt_num, ++ 'bdev_name': bdev_name, ++ } ++ return client.call('scsi_controller_add_target', params) ++ ++ ++def scsi_controller_remove_target(client, name, scsi_tgt_num): ++ """Remove LUN from ssam scsi controller target. ++ Args: ++ name: controller name to remove lun ++ scsi_tgt_num: target number to use ++ """ ++ params = { ++ 'name': name, ++ 'scsi_tgt_num': scsi_tgt_num, ++ } ++ return client.call('scsi_controller_remove_target', params) ++ ++ ++def scsi_device_iostat(client, name, scsi_tgt_num): ++ """Get iostat about scsi device. ++ ++ Args: ++ name: controller name ++ scsi_tgt_num: target number ++ ++ Returns: ++ List of iostat of ssam controllers. ++ """ ++ params = { ++ 'name': name, ++ 'scsi_tgt_num': scsi_tgt_num, ++ } ++ return client.call('scsi_device_iostat', params) ++ ++ ++def device_pcie_list(client): ++ """Show storage device pcie list. ++ ++ Returns: ++ List of storage device pcie. ++ """ ++ ++ return client.call('device_pcie_list') +diff --git a/scripts/hw_dpu_rpc.py b/scripts/hw_dpu_rpc.py +new file mode 100644 +index 0000000..b47151f +--- /dev/null ++++ b/scripts/hw_dpu_rpc.py +@@ -0,0 +1,328 @@ ++#!/usr/bin/env python3 ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright (C) 2021-2025 Huawei Technologies Co. ++# All rights reserved. ++# ++ ++import argparse ++import logging ++import sys ++import os ++import stat ++import pwd ++import grp ++import json ++ ++sys.path.append(os.path.dirname(__file__) + '/../python') ++ ++import spdk.rpc as rpc # noqa ++from spdk.rpc.client import print_dict, JSONRPCException # noqa ++from spdk.rpc.helpers import deprecated_aliases # noqa ++ ++ ++def get_parser(): ++ parser = argparse.ArgumentParser( ++ description='SPDK RPC command line interface', usage='%(prog)s [options]', add_help=False) ++ ++ parser.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ parser.add_argument('-r', dest='conn_retries', ++ help='Retry connecting to the RPC server N times with 0.2s interval. Default: 0', ++ default=0, type=int) ++ parser.add_argument('-t', dest='timeout', ++ help='Timeout as a floating point number expressed in seconds, waiting for response. Default: 60.0', ++ default=60.0, type=float) ++ ++ parser.set_defaults(is_server=False) ++ parser.set_defaults(dry_run=False) ++ parser.set_defaults(port=5260) ++ parser.set_defaults(verbose="ERROR") ++ parser.set_defaults(server_addr='/var/tmp/spdk.sock') ++ return parser ++ ++ ++def change_queues_num(client, number): ++ if not (1 <= number <= 32): ++ print("the number is not legal, it should be 1 <= number <= 32") ++ return ++ path = "/etc/dpak/ssam/parameter.json" ++ with open(path, 'r') as file: ++ try: ++ data = json.load(file) ++ except json.JSONDecodeError: ++ print("JSON file is wrong") ++ return ++ ++ if "queues" not in data: ++ print("JSON file do not have 'queues'") ++ return ++ ++ data["queues"] = number ++ with open(path, 'w') as file: ++ json.dump(data, file, indent=4) ++ ++ ++def init_rpc_func(): ++ parser = get_parser() ++ subparsers = parser.add_subparsers(help='RPC methods', dest='called_rpc_name', metavar='') ++ ++ @rpc.ssam.log_info ++ def create_blk_controller(args): ++ rpc.ssam.create_blk_controller(args.client, ++ dev_name=args.dev_name, ++ index=args.index, ++ readonly=args.readonly, ++ serial=args.serial, ++ vqueue=args.vqueue) ++ ++ p = subparsers.add_parser('create_blk_controller', ++ help='Add a new block controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('dev_name', help='Name of block device') ++ p.add_argument('index', help='Function ID or dbdf') ++ p.add_argument("-r", "--readonly", action='store_true', help='Set controller as read-only') ++ p.add_argument("-s", "--serial", help='Set volume ID') ++ p.add_argument("-q", "--vqueue", help='Set virtio queue num with a range of [1, 32]', type=int, required=False) ++ p.set_defaults(func=create_blk_controller) ++ ++ @rpc.ssam.log_info ++ def get_controllers(args): ++ print_dict(rpc.ssam.get_controllers(args.client, args.function_id, args.dbdf)) ++ ++ p = subparsers.add_parser('get_controllers', ++ help='List all or specific controller(s)', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('-f', '--function_id', help="Function ID of PCI device", type=int, required=False) ++ p.add_argument('-d', '--dbdf', help="Dbdf of PCI device", required=False) ++ p.set_defaults(func=get_controllers) ++ ++ @rpc.ssam.log_info ++ def get_scsi_controllers(args): ++ print_dict(rpc.ssam.get_scsi_controllers(args.client, args.name)) ++ ++ p = subparsers.add_parser('get_scsi_controllers', aliases=['scsi_controller_list'], ++ help='List all or specific scsi controller(s)', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('-n', '--name', help="Name of controller", required=False) ++ p.set_defaults(func=get_scsi_controllers) ++ ++ @rpc.ssam.log_info ++ def delete_controller(args): ++ rpc.ssam.delete_controller(args.client, ++ index=args.index, ++ force=args.force) ++ ++ p = subparsers.add_parser('delete_controller', ++ help='Delete a controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('index', help='Function ID or dbdf of PCI device') ++ p.add_argument('-f', '--force', dest='force', action='store_true', help="Force to delete when io exists") ++ p.set_defaults(force=False) ++ p.set_defaults(func=delete_controller) ++ ++ @rpc.ssam.log_info ++ def delete_scsi_controller(args): ++ rpc.ssam.delete_scsi_controller(args.client, name=args.name) ++ ++ p = subparsers.add_parser('delete_scsi_controller', aliases=['scsi_controller_delete'], ++ help='Delete a scsi controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller to be deleted', type=str) ++ p.set_defaults(func=delete_scsi_controller) ++ ++ @rpc.ssam.log_info ++ def bdev_resize(args): ++ rpc.ssam.bdev_resize(args.client, ++ function_id=args.function_id, ++ new_size_in_mb=args.new_size_in_mb) ++ ++ p = subparsers.add_parser('bdev_resize', ++ help='Resize a blk bdev by blk controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('function_id', help='Function ID of PCI device', type=int) ++ p.add_argument('new_size_in_mb', help='New size of bdev for resize operation. The unit is MiB', type=int) ++ p.set_defaults(func=bdev_resize) ++ ++ @rpc.ssam.log_info ++ def scsi_bdev_resize(args): ++ rpc.ssam.scsi_bdev_resize(args.client, ++ name=args.name, ++ tgt_id=args.tgt_id, ++ new_size_in_mb=args.new_size_in_mb) ++ ++ p = subparsers.add_parser('scsi_bdev_resize', ++ help='Resize a scsi bdev by scsi controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller for the PCI device', type=str) ++ p.add_argument('tgt_id', help='Tgt ID of bdev', type=int) ++ p.add_argument('new_size_in_mb', help='New size of bdev for resize operation. The unit is MiB', type=int) ++ p.set_defaults(func=scsi_bdev_resize) ++ ++ @rpc.ssam.log_info ++ def bdev_aio_resize(args): ++ rpc.ssam.bdev_aio_resize(args.client, ++ name=args.name, ++ new_size_in_mb=args.new_size_in_mb) ++ ++ p = subparsers.add_parser('bdev_aio_resize', ++ help='Resize a bdev by bdev name', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of aio bdev', type=str) ++ p.add_argument('new_size_in_mb', help='New size of bdev for resize operation. The unit is MiB', type=int) ++ p.set_defaults(func=bdev_aio_resize) ++ ++ @rpc.ssam.log_info ++ def set_queues_num(args): ++ change_queues_num(args.client, ++ number=args.number) ++ p = subparsers.add_parser('set_queues_num', ++ help='Set the queues of translate', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('number', help='Number of queues', type=int) ++ p.set_defaults(func=set_queues_num) ++ ++ @rpc.ssam.log_info ++ def os_ready(args): ++ rpc.ssam.os_ready(args.client) ++ ++ p = subparsers.add_parser('os_ready', ++ help='Write ready flag for booting OS', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.set_defaults(func=os_ready) ++ ++ @rpc.ssam.log_info ++ def os_not_ready(args): ++ rpc.ssam.os_not_ready(args.client) ++ ++ p = subparsers.add_parser('os_not_ready', ++ help='Write not ready flag for booting OS', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.set_defaults(func=os_not_ready) ++ ++ @rpc.ssam.log_info ++ def controller_get_iostat(args): ++ print_dict(rpc.ssam.controller_get_iostat(args.client, args.function_id, args.dbdf)) ++ ++ p = subparsers.add_parser('controller_get_iostat', ++ help='Show all or specific controller(s) iostat', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('-f', '--function_id', help="Function ID of PCI device", type=int, required=False) ++ p.add_argument('-d', '--dbdf', help="Dbdf of PCI device", required=False) ++ p.set_defaults(func=controller_get_iostat) ++ ++ @rpc.ssam.log_info ++ def blk_device_iostat(args): ++ print_dict(rpc.ssam.blk_device_iostat(args.client, ++ index=args.index, ++ tid=args.tid, ++ vq_idx=args.vq_idx)) ++ ++ p = subparsers.add_parser('blk_device_iostat', ++ help='Show iostat of blk device', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('index', help='Function ID or dbdf') ++ p.add_argument('-t', "--tid", help='Tid', type=int, required=False) ++ p.add_argument("-q", "--vq_idx", help='Index of vqueue', type=int, required=False) ++ p.set_defaults(func=blk_device_iostat) ++ ++ @rpc.ssam.log_info ++ def controller_clear_iostat(args): ++ rpc.ssam.controller_clear_iostat(args.client) ++ ++ p = subparsers.add_parser('controller_clear_iostat', ++ help='Clear all controllers iostat', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.set_defaults(func=controller_clear_iostat) ++ ++ @rpc.ssam.log_info ++ def create_scsi_controller(args): ++ rpc.ssam.create_scsi_controller(args.client, ++ dbdf=args.dbdf, ++ name=args.name) ++ ++ p = subparsers.add_parser('create_scsi_controller', aliases=['scsi_controller_create'], ++ help='Add a new scsi controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('dbdf', help='The pci dbdf of virtio scsi controller, which is obtained by \'device_pcie_list\'', type=str) ++ p.add_argument('name', help='Name of controller to be created', type=str) ++ p.set_defaults(func=create_scsi_controller) ++ ++ @rpc.ssam.log_info ++ def scsi_controller_add_target(args): ++ rpc.ssam.scsi_controller_add_target(args.client, ++ name=args.name, ++ scsi_tgt_num=int(args.scsi_tgt_num), ++ bdev_name=args.bdev_name) ++ ++ p = subparsers.add_parser('scsi_controller_add_target', ++ help='Add LUN to ssam scsi controller target', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller where lun is added', type=str) ++ p.add_argument('scsi_tgt_num', help='ID of target to use') ++ p.add_argument('bdev_name', help='Name of bdev to be added to target') ++ p.set_defaults(func=scsi_controller_add_target) ++ ++ @rpc.ssam.log_info ++ def scsi_controller_remove_target(args): ++ rpc.ssam.scsi_controller_remove_target(args.client, ++ name=args.name, ++ scsi_tgt_num=int(args.scsi_tgt_num)) ++ ++ p = subparsers.add_parser('scsi_controller_remove_target', ++ help='Remove LUN from ssam scsi controller target', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller to remove lun', type=str) ++ p.add_argument('scsi_tgt_num', help='ID of target to use') ++ p.set_defaults(func=scsi_controller_remove_target) ++ ++ @rpc.ssam.log_info ++ def scsi_device_iostat(args): ++ print_dict(rpc.ssam.scsi_device_iostat(args.client, ++ name=args.name, ++ scsi_tgt_num=int(args.scsi_tgt_num))) ++ ++ p = subparsers.add_parser('scsi_device_iostat', ++ help='Show iostat of scsi device', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller', type=str) ++ p.add_argument('scsi_tgt_num', help='ID of target', type=int) ++ p.set_defaults(func=scsi_device_iostat) ++ ++ @rpc.ssam.log_info ++ def device_pcie_list(args): ++ print_dict(rpc.ssam.device_pcie_list(args.client)) ++ ++ p = subparsers.add_parser('device_pcie_list', ++ help='Show storage device pcie list', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.set_defaults(func=device_pcie_list) ++ ++ return parser ++ ++ ++if __name__ == "__main__": ++ def call_rpc_func(args): ++ args.func(args) ++ check_called_name(args.called_rpc_name) ++ ++ def check_called_name(name): ++ if name in deprecated_aliases: ++ print("{} is deprecated, use {} instead.".format(name, deprecated_aliases[name]), file=sys.stderr) ++ ++ parser = init_rpc_func() ++ args = parser.parse_args() ++ ++ if sys.stdin.isatty() and not hasattr(args, 'func'): ++ # No arguments and no data piped through stdin ++ parser.print_help() ++ exit(1) ++ ++ if args.called_rpc_name != "get_version": ++ args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, ++ log_level=getattr(logging, args.verbose.upper()), ++ conn_retries=args.conn_retries) ++ ++ try: ++ call_rpc_func(args) ++ except JSONRPCException as ex: ++ print(ex.message) ++ exit(1) +diff --git a/scripts/parameter.json b/scripts/parameter.json +new file mode 100644 +index 0000000..b6958a6 +--- /dev/null ++++ b/scripts/parameter.json +@@ -0,0 +1,5 @@ ++{ ++ "mempool_size_mb": 1024, ++ "queues": 16, ++ "mode": "default" ++} diff --git a/spdk-24.09.patch b/spdk-24.09.patch new file mode 100644 index 0000000000000000000000000000000000000000..e36b72fd0ac8e73ebb5b279c9333d48345d65baf --- /dev/null +++ b/spdk-24.09.patch @@ -0,0 +1,15796 @@ +diff --git a/CONFIG b/CONFIG +index 89c34e9..93258e7 100644 +--- a/CONFIG ++++ b/CONFIG +@@ -122,6 +122,9 @@ CONFIG_UBLK=n + # Build vhost library. + CONFIG_VHOST=y + ++# Build ssam library. ++CONFIG_SSAM=y ++ + # Build vhost initiator (Virtio) driver. + CONFIG_VIRTIO=y + +diff --git a/app/Makefile b/app/Makefile +index e4fefe8..02b2778 100644 +--- a/app/Makefile ++++ b/app/Makefile +@@ -13,6 +13,7 @@ DIRS-y += nvmf_tgt + DIRS-y += iscsi_tgt + DIRS-y += spdk_tgt + DIRS-y += spdk_lspci ++DIRS-y += ssam + DIRS-y += spdk_nvme_perf + DIRS-y += spdk_nvme_identify + DIRS-y += spdk_nvme_discover +diff --git a/app/fio/bdev/fio_plugin.c b/app/fio/bdev/fio_plugin.c +index 8f6d51d..63f6650 100644 +--- a/app/fio/bdev/fio_plugin.c ++++ b/app/fio/bdev/fio_plugin.c +@@ -297,7 +297,7 @@ spdk_fio_bdev_startup_done(int rc, void *cb_arg) + } + + if (g_rpc_listen_addr != NULL) { +- if (spdk_rpc_initialize(g_rpc_listen_addr, NULL) != 0) { ++ if (spdk_rpc_initialize(g_rpc_listen_addr, NULL, RPC_SELECT_INTERVAL) != 0) { + SPDK_ERRLOG("could not initialize RPC address %s\n", g_rpc_listen_addr); + exit(1); + } +diff --git a/app/ssam/Makefile b/app/ssam/Makefile +new file mode 100644 +index 0000000..639c62b +--- /dev/null ++++ b/app/ssam/Makefile +@@ -0,0 +1,31 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright (C) 2021-2025 Huawei Technologies Co. ++# All rights reserved. ++# ++ ++SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..) ++include $(SPDK_ROOT_DIR)/mk/spdk.common.mk ++include $(SPDK_ROOT_DIR)/mk/spdk.modules.mk ++ ++APP = ssam ++ ++C_SRCS := ssam.c ++ ++SYS_LIBS += -lcap ++SPDK_LIB_LIST = $(ALL_MODULES_LIST) event_ssam event ssam ++ ++ifeq ($(OS),Linux) ++SPDK_LIB_LIST += event_nbd ++endif ++ ++ifeq ($(SPDK_ROOT_DIR)/lib/env_dpdk,$(CONFIG_ENV)) ++SPDK_LIB_LIST += env_dpdk_rpc ++endif ++ ++include $(SPDK_ROOT_DIR)/mk/spdk.app.mk ++ ++install: $(APP) ++ $(INSTALL_APP) ++ ++uninstall: ++ $(UNINSTALL_APP) +diff --git a/app/ssam/ssam.c b/app/ssam/ssam.c +new file mode 100644 +index 0000000..7ad7cab +--- /dev/null ++++ b/app/ssam/ssam.c +@@ -0,0 +1,77 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "spdk/ssam.h" ++#include "spdk/string.h" ++ ++#define IOVA_MODE_PA "pa" ++ ++static bool g_start_flag = false; ++ ++bool ++spdk_ssam_is_starting(void) ++{ ++ return g_start_flag; ++} ++ ++static void ++ssam_started(void *ctx) ++{ ++ spdk_ssam_poller_start(); ++ SPDK_NOTICELOG("hot restart %d\n", spdk_ssam_get_hot_restart()); ++ spdk_ssam_set_hot_restart(false); ++ g_start_flag = false; ++ SPDK_NOTICELOG("%s server started.\n", SSAM_SERVER_NAME); ++} ++ ++int ++main(int argc, char *argv[]) ++{ ++ struct spdk_app_opts opts = {}; ++ int rc; ++ int shm_id; ++ ++ spdk_app_opts_init(&opts, sizeof(opts)); ++ opts.name = SSAM_SERVER_NAME; ++ opts.iova_mode = IOVA_MODE_PA; ++ opts.num_entries = 0; ++ g_start_flag = true; ++ ++ rc = spdk_ssam_user_config_init(); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam user config init failed: %s\n", spdk_strerror(-rc)); ++ exit(rc); ++ } ++ ++ shm_id = shm_open(SSAM_SHM, O_RDWR, SSAM_SHM_PERMIT); ++ if (shm_id < 0) { ++ SPDK_NOTICELOG("ssam share memory hasn't been created.\n"); ++ g_start_flag = false; ++ } else { ++ spdk_ssam_set_shm_created(true); ++ SPDK_NOTICELOG("ssam share memory has been created.\n"); ++ } ++ ++ rc = spdk_ssam_rc_preinit(); ++ if (rc < 0) { ++ exit(rc); ++ } ++ ++ rc = spdk_app_parse_args(argc, argv, &opts, NULL, NULL, NULL, NULL); ++ if (rc != SPDK_APP_PARSE_ARGS_SUCCESS) { ++ SPDK_ERRLOG("spdk app parse args fail: %d\n", rc); ++ exit(rc); ++ } ++ spdk_ssam_set_hot_restart(opts.hot_restart); ++ ++ /* Blocks until the application is exiting */ ++ rc = spdk_app_start(&opts, ssam_started, NULL); ++ spdk_ssam_exit(); ++ ++ spdk_app_fini(); ++ SPDK_NOTICELOG("%s server exited.\n", SSAM_SERVER_NAME); ++ ++ return rc; ++} +diff --git a/configure b/configure +index 26c9b0f..81e9c6f 100644 +--- a/configure ++++ b/configure +@@ -134,6 +134,8 @@ function usage() { + echo " --without-golang No path required." + echo " --with-aio-fsdev Build aio FSDEV component." + echo " --without-aio-fsdev No path required." ++ echo " --with-ssam Support to build ssam for DPU storage accel." ++ echo " --without-ssam No path required." + echo "" + echo "Environment variables:" + echo "" +@@ -628,6 +630,15 @@ for i in "$@"; do + --without-fuse) + CONFIG[FUSE]=n + ;; ++ --with-ssam) ++ CONFIG[SSAM]=y ++ ;; ++ --without-ssam) ++ CONFIG[SSAM]=n ++ ;; ++ --with-ssam-only) ++ CONFIG[SSAM_ONLY]=y ++ ;; + --with-nvme-cuse) + CONFIG[NVME_CUSE]=y + ;; +@@ -1215,6 +1226,13 @@ if [[ "${CONFIG[FUSE]}" = "y" ]]; then + fi + fi + ++if [[ "${CONFIG[SSAM_ONLY]}" = "y" ]]; then ++ if [[ "${CONFIG[SSAM]}" = "n" ]]; then ++ echo "--with-ssam-only requires --with-ssam." ++ exit 1 ++ fi ++fi ++ + if [ "${CONFIG[CET]}" = "y" ]; then + if ! echo -e 'int main(void) { return 0; }\n' | "${BUILD_CMD[@]}" -fcf-protection - 2> /dev/null; then + echo "--enable-cet requires compiler/linker that supports CET." +diff --git a/doc/jsonrpc.md b/doc/jsonrpc.md +index 70addf6..1eefa0e 100644 +--- a/doc/jsonrpc.md ++++ b/doc/jsonrpc.md +@@ -505,7 +505,25 @@ Example response: + "bdev_lvol_set_parent_bdev", + "bdev_daos_delete", + "bdev_daos_create", +- "bdev_daos_resize" ++ "bdev_daos_resize", ++ "create_blk_controller", ++ "delete_controller", ++ "delete_scsi_controller", ++ "get_controllers", ++ "get_scsi_controllers", ++ "controller_get_iostat", ++ "blk_device_iostat", ++ "controller_clear_iostat", ++ "bdev_resize", ++ "scsi_bdev_resize", ++ "bdev_aio_resize", ++ "os_ready", ++ "os_not_ready", ++ "create_scsi_controller", ++ "scsi_controller_add_target", ++ "scsi_controller_remove_target", ++ "scsi_device_iostat", ++ "device_pcie_list" + ] + } + ~~~ +@@ -13868,3 +13886,845 @@ Example response: + "result": true + } + ~~~ ++ ++### log_command_info {#rpc_ssam_log_command_info} ++ ++Record operation logs ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++event | Required | string | Function id of PCI device ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "event": "create_blk_controller" ++ }, ++ "jsonrpc": "2.0", ++ "method": "log_command_info", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### create_blk_controller {#rpc_ssam_create_blk_controller} ++ ++Create ssam blk controller ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++dev_name | Required | string | Device name to add to controller ++index | Required | string | Function id or dbdf of PCI device ++readonly | Optional | bool | Set controller as read-only ++serial | Optional | string | Set volume id ++vqueue | Optional | number | Set virtio queue num ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "dev_name": "aio0", ++ "index": "16", ++ "readonly": true, ++ "serial": "blk_disk", ++ "vqueue": 16 ++ }, ++ "jsonrpc": "2.0", ++ "method": "create_blk_controller", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### delete_controller {#rpc_ssam_delete_controller} ++ ++Delete ssam controller from configuration ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++index | Required | string | Function id or dbdf of PCI device ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "index": "16" ++ }, ++ "jsonrpc": "2.0", ++ "method": "delete_controller", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### delete_scsi_controller {#rpc_ssam_delete_scsi_controller} ++ ++Delete ssam scsi controller from configuration ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Required | string | Scsi controller name to be delete ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "scsi0" ++ }, ++ "jsonrpc": "2.0", ++ "method": "delete_scsi_controller", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### get_controllers {#rpc_ssam_get_controllers} ++ ++Get information about configured ssam controllers ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++function_id | Optional | number | Function id of PCI device ++dbdf | Optional | string | Dbdf of PCI device ++ ++#### Result ++ ++List of ssam controllers. ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "function_id": null, ++ "dbdf": null ++ }, ++ "jsonrpc": "2.0", ++ "method": "get_controllers", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": [ ++ { ++ "ctrlr": "ssam.0", ++ "cpumask": "0x1", ++ "session_num": 1, ++ "backend_specific": { ++ "session": [ ++ { ++ "name": "ssam.0_blk_16", ++ "function_id": 16, ++ "queues": 8, ++ "block": { ++ "readonly": false, ++ "bdev": "Malloc0" ++ } ++ } ++ ] ++ } ++ } ++ ] ++} ++~~~ ++ ++### get_scsi_controllers {#rpc_ssam_get_scsi_controllers} ++ ++Get information about configured ssam scsi controllers ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Optional | string | Name of scsi controller ++ ++#### Result ++ ++List of ssam scsi controllers. ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "scsi0" ++ }, ++ "jsonrpc": "2.0", ++ "method": "get_scsi_controllers", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": [] ++} ++~~~ ++ ++### controller_get_iostat {#rpc_ssam_controller_get_iostat} ++ ++Get iostat about configured ssam controllers ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++function_id | Optional | number | Function id of PCI device ++dbdf | Optional | string | Dbdf of PCI device ++ ++#### Result ++ ++List of iostat of ssam controllers. ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "function_id": null, ++ "dbdf": null ++ }, ++ "jsonrpc": "2.0", ++ "method": "controller_get_iostat", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": { ++ "tick_rate": 100000000, ++ "dbdfs": [ ++ { ++ "name": "ssam.0", ++ "flight_io": 0, ++ "discard_io_num": 0, ++ "wait_io": 0, ++ "wait_io_r": 0 ++ }, ++ { ++ "function_id": 16, ++ "poll_lat": "0.000000268", ++ "bdev_name": "Malloc0", ++ "bytes_read": 0, ++ "num_read_ops": 0, ++ "bytes_written": 0, ++ "num_write_ops": 0, ++ "read_latency_ticks": 0, ++ "write_latency_ticks": 0, ++ "complete_read_ios": 0, ++ "err_read_ios": 0, ++ "complete_write_ios": 0, ++ "err_write_ios": 0, ++ "flush_ios": 0, ++ "complete_flush_ios": 0, ++ "err_flush_ios": 0, ++ "other_ios": 0, ++ "complete_other_ios": 0, ++ "err_other_ios": 0, ++ "fatal_ios": 0, ++ "io_retry": 0, ++ "counters": { ++ "start_count": 0, ++ "dma_count": 0, ++ "dma_complete_count": 0, ++ "bdev_count": 0, ++ "bdev_complete_count": 0 ++ }, ++ "details": { ++ "count": 0, ++ "total_lat": "0.000000000", ++ "dma_lat": "0.000000000", ++ "bdev_lat": "0.000000000", ++ "bdev_submit_lat": "0.000000000", ++ "complete_lat": "0.000000000", ++ "internal_lat": "0.000000000" ++ } ++ } ++ ] ++ } ++} ++~~~ ++ ++### blk_device_iostat {#rpc_ssam_blk_device_iostat} ++ ++Get iostat about blk device ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++index | Required | number | Function id or dbdf of PCI device ++tid | Optional | number | Tid ++vq_idx | Optional | number | Index of vqueue ++ ++#### Result ++ ++List of iostat of ssam blk controllers. ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "index": "16", ++ "tid": null, ++ "vq_idx": null ++ }, ++ "jsonrpc": "2.0", ++ "method": "blk_device_iostat", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": { ++ "tick_rate": 100000000, ++ "dbdfs": [ ++ { ++ "function_id": 16, ++ "poll_lat": "0.000000267", ++ "bdev_name": "Malloc0", ++ "bytes_read": 0, ++ "num_read_ops": 0, ++ "bytes_written": 0, ++ "num_write_ops": 0, ++ "read_latency_ticks": 0, ++ "write_latency_ticks": 0, ++ "complete_read_ios": 0, ++ "err_read_ios": 0, ++ "complete_write_ios": 0, ++ "err_write_ios": 0, ++ "flush_ios": 0, ++ "complete_flush_ios": 0, ++ "err_flush_ios": 0, ++ "other_ios": 0, ++ "complete_other_ios": 0, ++ "err_other_ios": 0, ++ "fatal_ios": 0, ++ "io_retry": 0, ++ "counters": { ++ "start_count": 0, ++ "dma_count": 0, ++ "dma_complete_count": 0, ++ "bdev_count": 0, ++ "bdev_complete_count": 0 ++ }, ++ "details": { ++ "count": 0, ++ "total_lat": "0.000000000", ++ "dma_lat": "0.000000000", ++ "bdev_lat": "0.000000000", ++ "bdev_submit_lat": "0.000000000", ++ "complete_lat": "0.000000000", ++ "internal_lat": "0.000000000" ++ } ++ } ++ ] ++ } ++} ++~~~ ++ ++### controller_clear_iostat {#rpc_ssam_controller_clear_iostat} ++ ++Clear iostat about configured ssam controllers ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "method": "controller_clear_iostat", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### bdev_resize {#rpc_ssam_bdev_resize} ++ ++Resize bdev in the system ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++function_id | Required | number | Function id of PCI device ++new_size_in_mb | Required | number | New bdev size for resize operation. The unit is MiB ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "function_id": "16", ++ "new_size_in_mb": 1024 ++ }, ++ "jsonrpc": "2.0", ++ "method": "bdev_resize", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### scsi_bdev_resize {#rpc_ssam_scsi_bdev_resize} ++ ++Resize scsi bdev in the system ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Required | string | Controller name of PCI device ++tgt_id | Required | number | Tgt id of bdev ++new_size_in_mb | Required | number | New bdev size for resize operation. The unit is MiB ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "scsi0", ++ "function_id": "0", ++ "new_size_in_mb": 1024 ++ }, ++ "jsonrpc": "2.0", ++ "method": "scsi_bdev_resize", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### bdev_aio_resize {#rpc_ssam_bdev_aio_resize} ++ ++Resize aio bdev in the system ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Required | string | Aio bdev name ++new_size_in_mb | Required | number | New bdev size for resize operation. The unit is MiB ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "aio0", ++ "new_size_in_mb": 1024 ++ }, ++ "jsonrpc": "2.0", ++ "method": "bdev_aio_resize", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### os_ready {#rpc_os_ready} ++ ++Write ready flag for booting OS ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "method": "os_ready", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### os_not_ready {#rpc_set_os_status} ++ ++Write not ready flag for booting OS ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "method": "os_not_ready", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### create_scsi_controller {#rpc_ssam_create_scsi_controller} ++ ++Create ssam scsi controller ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++dbdf | Required | string | The pci dbdf of virtio scsi controller ++name | Required | string | Controller name to be create ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "dbdf": "0000:01:02.0", ++ "name": "scsi0" ++ }, ++ "jsonrpc": "2.0", ++ "method": "create_scsi_controller", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### scsi_controller_add_target {#rpc_ssam_scsi_controller_add_target} ++ ++Add LUN to ssam scsi controller target ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Required | string | Controller name where add lun ++scsi_tgt_num | Required | number | Target number to use ++bdev_name | Required | string | Name of bdev to add to target ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "scsi0", ++ "scsi_tgt_num": 0, ++ "bdev_name": "aio0" ++ }, ++ "jsonrpc": "2.0", ++ "method": "scsi_controller_add_target", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### scsi_controller_remove_target {#rpc_ssam_scsi_controller_remove_target} ++ ++Remove LUN from ssam scsi controller target ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Required | string | Controller name where remove lun ++scsi_tgt_num | Required | number | Target number to use ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "scsi0", ++ "scsi_tgt_num": 0 ++ }, ++ "jsonrpc": "2.0", ++ "method": "scsi_controller_remove_target", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": "true" ++} ++~~~ ++ ++### scsi_device_iostat {#rpc_ssam_scsi_device_iostat} ++ ++Get iostat about scsi device ++ ++#### Parameters ++ ++Name | Optional | Type | Description ++----------------------- | -------- | ----------- | ----------- ++name | Required | string | Controller name ++scsi_tgt_num | Required | number | Target number ++ ++#### Result ++ ++List of iostat of ssam scsi controllers. ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "params": { ++ "name": "scsi0", ++ "scsi_tgt_num": 0 ++ }, ++ "jsonrpc": "2.0", ++ "method": "scsi_device_iostat", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": [] ++} ++~~~ ++ ++### device_pcie_list {#rpc_ssam_device_pcie_list} ++ ++Show storage device pcie list ++ ++#### Result ++ ++List of storage device pcie. ++ ++#### Example ++ ++Example request: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "method": "device_pcie_list", ++ "id": 1 ++} ++~~~ ++ ++Example response: ++ ++~~~json ++{ ++ "jsonrpc": "2.0", ++ "id": 1, ++ "result": { ++ "device_pcie_list": [ ++ { ++ "index": 16, ++ "dbdf": "0001:75:02.0", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 17, ++ "dbdf": "0001:75:02.1", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 18, ++ "dbdf": "0001:75:02.2", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 19, ++ "dbdf": "0001:75:02.3", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 20, ++ "dbdf": "0001:75:02.4", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 21, ++ "dbdf": "0001:75:02.5", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 22, ++ "dbdf": "0001:75:02.6", ++ "type": "virtio-blk" ++ }, ++ { ++ "index": 23, ++ "dbdf": "0001:75:02.7", ++ "type": "virtio-blk" ++ } ++ ] ++ } ++} ++~~~ +diff --git a/examples/nvmf/nvmf/nvmf.c b/examples/nvmf/nvmf/nvmf.c +index 408e333..d39065f 100644 +--- a/examples/nvmf/nvmf/nvmf.c ++++ b/examples/nvmf/nvmf/nvmf.c +@@ -688,7 +688,7 @@ nvmf_subsystem_init_done(int rc, void *cb_arg) + { + fprintf(stdout, "bdev subsystem init successfully\n"); + +- rc = spdk_rpc_initialize(g_rpc_addr, NULL); ++ rc = spdk_rpc_initialize(g_rpc_addr, NULL, RPC_SELECT_INTERVAL); + if (rc) { + spdk_app_stop(rc); + return; +diff --git a/include/spdk/env.h b/include/spdk/env.h +index 359ccd6..cc4a3f4 100644 +--- a/include/spdk/env.h ++++ b/include/spdk/env.h +@@ -79,7 +79,8 @@ struct spdk_env_opts { + size_t opts_size; + + bool enforce_numa; +- uint8_t reserved2[7]; ++ bool hot_restart; ++ uint8_t reserved2[6]; + + /* All new fields must be added at the end of this structure. */ + }; +diff --git a/include/spdk/event.h b/include/spdk/event.h +index fb2f33c..0ce3d48 100644 +--- a/include/spdk/event.h ++++ b/include/spdk/event.h +@@ -151,8 +151,8 @@ struct spdk_app_opts { + + bool enforce_numa; + +- /* Hole at bytes 187-191. */ +- uint8_t reserved187[5]; ++ /* Hole at bytes 187-190. */ ++ uint8_t reserved187[4]; + + /** + * The allocated size for the message pool used by the threading library. +@@ -161,6 +161,7 @@ struct spdk_app_opts { + */ + size_t msg_mempool_size; + ++ bool hot_restart; + /* + * If non-NULL, a string array of allowed RPC methods. + */ +@@ -274,6 +275,8 @@ void spdk_app_stop(int rc); + */ + int spdk_app_get_shm_id(void); + ++bool spdk_get_shutdown_sig_received(void); ++ + /** + * Convert a string containing a CPU core mask into a bitmask + * +diff --git a/include/spdk/init.h b/include/spdk/init.h +index 1206c9c..899a773 100644 +--- a/include/spdk/init.h ++++ b/include/spdk/init.h +@@ -21,6 +21,7 @@ extern "C" { + #endif + + #define SPDK_DEFAULT_RPC_ADDR "/var/tmp/spdk.sock" ++#define RPC_SELECT_INTERVAL 4000 /* 4ms */ + + /** + * Structure with optional parameters for the JSON-RPC server initialization. +@@ -53,7 +54,7 @@ SPDK_STATIC_ASSERT(sizeof(struct spdk_rpc_opts) == 24, "Incorrect size"); + * \return Negated errno on failure. 0 on success. + */ + int spdk_rpc_initialize(const char *listen_addr, +- const struct spdk_rpc_opts *opts); ++ const struct spdk_rpc_opts *opts, int internval); + + /** + * Stop SPDK JSON-RPC servers and stop polling for new connections on all addresses. +@@ -127,6 +128,10 @@ void spdk_rpc_server_pause(const char *listen_addr); + */ + void spdk_rpc_server_resume(const char *listen_addr); + ++void spdk_ssam_set_hot_restart(bool value); ++ ++bool spdk_ssam_get_hot_restart(void); ++ + #ifdef __cplusplus + } + #endif +diff --git a/include/spdk/ssam.h b/include/spdk/ssam.h +new file mode 100644 +index 0000000..091d539 +--- /dev/null ++++ b/include/spdk/ssam.h +@@ -0,0 +1,220 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#ifndef SSAM_H ++#define SSAM_H ++ ++#include ++ ++#include "spdk/stdinc.h" ++#include "spdk/cpuset.h" ++#include "spdk/json.h" ++#include "spdk/thread.h" ++#include "spdk/event.h" ++ ++#include "../../lib/ssam/ssam_driver/dpak_ssam.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#ifdef DEBUG ++#define ASSERT(f) assert(f) ++#else ++#define ASSERT(f) ((void)0) ++#endif ++ ++#define SPDK_INVALID_TID UINT16_MAX ++#define SPDK_SESSION_TYPE_MAX_LEN 64 ++ ++#define SPDK_SESSION_TYPE_BLK "blk" ++#define SPDK_SESSION_TYPE_SCSI "scsi" ++ ++#define SSAM_SHM "ssam_shm" ++#define SSAM_SHM_PERMIT 0640 ++#define SSAM_STORAGE_READY_FILE "/proc/sdi_storage/storage_ready" ++ ++enum spdk_virtio_type { ++ VIRTIO_TYPE_UNKNOWN, ++ VIRTIO_TYPE_BLK, ++ VIRTIO_TYPE_SCSI, ++}; ++ ++/** ++ * ssam subsystem init callback ++ * ++ * \param rc The preceding processing result, ++ * 0 on success, negative errno on error. ++ */ ++typedef void (*spdk_ssam_init_cb)(int rc); ++ ++/** ++ * ssam subsystem fini callback ++ */ ++typedef void (*spdk_ssam_fini_cb)(void); ++ ++/** ++ * ssam dump config json ++ */ ++void spdk_ssam_config_json(struct spdk_json_write_ctx *w); ++ ++/** ++ * Check if ssam support the global vf id. ++ * ++ * \param gfunc_id ssam global vf id. ++ * ++ * \return -EINVAL indicate gfunc_id invalid, -ENODEV indicate no such vf or ++ * 0 indicate gfunc_id valid. ++ */ ++int ssam_check_gfunc_id(uint16_t gfunc_id); ++ ++/** ++ * Find a ssam session by global vf id. ++ * ++ * \param gfunc_id ssam global vf id. ++ * ++ * \return ssam session or NULL indicate not find. ++ */ ++struct spdk_ssam_session *ssam_session_find(uint16_t gfunc_id); ++ ++/** ++ * Get gfunc id by controller name. ++ * ++ * \param name controller name. ++ * ++ * \return gfunc id or SPDK_INVALID_GFUNC_ID gfunc id not find. ++ */ ++uint16_t ssam_get_gfunc_id_by_name(char *name); ++ ++/** ++ * Get the next ssam device. If there's no more devices to iterate ++ * through, NULL will be returned. ++ * ++ * \param smdev ssam device. If NULL, this function will return the ++ * very first device. ++ * ++ * \return smdev ssam device or NULL indicate no more devices ++ */ ++struct spdk_ssam_dev *ssam_dev_next(const struct spdk_ssam_dev *smdev); ++ ++/** ++ * Lock the global ssam mutex synchronizing all the ssam device accesses. ++ */ ++void ssam_lock(void); ++ ++/** ++ * Lock the global ssam mutex synchronizing all the ssam device accesses. ++ * ++ * \return 0 if the mutex could be locked immediately, negative errno otherwise. ++ */ ++int ssam_trylock(void); ++ ++/** ++ * Unlock the global ssam mutex. ++ */ ++void ssam_unlock(void); ++ ++/** ++ * \param smsession ssam session. ++ * \param arg user-provided parameter. ++ * ++ * \return 0 on success, negative if failed ++ */ ++typedef int (*spdk_ssam_session_fn)(struct spdk_ssam_session *smsession, void **arg); ++ ++/** ++ * \param smsession ssam session. ++ * \param arg user-provided parameter. ++ */ ++typedef void (*spdk_ssam_session_cpl_fn)(struct spdk_ssam_session *smsession, void **arg); ++ ++/** ++ * \param arg user-provided parameter. ++ * \param rsp spdk_ssam_session_fn call back response value, 0 success, negative if failed. ++ */ ++typedef void (*spdk_ssam_session_rsp_fn)(void *arg, int rsp); ++ ++struct spdk_ssam_session_reg_info { ++ char type_name[SPDK_SESSION_TYPE_MAX_LEN]; ++ spdk_ssam_session_rsp_fn rsp_fn; ++ void *rsp_ctx; ++ uint16_t gfunc_id; ++ uint16_t tid; ++ uint16_t queues; ++ const struct spdk_ssam_session_backend *backend; ++ uint32_t session_ctx_size; ++ char *name; ++ char *dbdf; ++}; ++ ++/** ++ * Construct a ssam blk device. This will create a ssam ++ * blk device and then create a session. Creating the smdev will ++ * start an I/O poller and hog a CPU. If already exist a ssam ++ * blk device, then it will only create a session to this device. ++ * All sessions in the same device share one I/O poller and one CPU. ++ * ssam blk device is tightly associated with given SPDK bdev. ++ * Given bdev can not be changed, unless it has been hotremoved. This ++ * would result in all I/O failing with virtio VIRTIO_BLK_S_IOERR ++ * error code. ++ * ++ * This function is thread-safe. ++ * ++ * \param info session register information. ++ * \param dev_name bdev name to associate with this vhost device ++ * \param readonly if set, all writes to the device will fail with ++ * VIRTIO_BLK_S_IOERR error code. ++ * \param serial means volume id. ++ * ++ * \return 0 on success, negative errno on error. ++ */ ++int ssam_blk_construct(struct spdk_ssam_session_reg_info *info, ++ const char *dev_name, bool readonly, char *serial); ++ ++/** ++ * ssam user config init. ++ */ ++int spdk_ssam_user_config_init(void); ++ ++/** ++ * ssam get tid which has minimum device. ++ */ ++uint16_t ssam_get_tid(void); ++ ++void spdk_ssam_exit(void); ++ ++void spdk_ssam_subsystem_fini(spdk_ssam_fini_cb fini_cb); ++ ++void spdk_ssam_subsystem_init(spdk_ssam_init_cb init_cb); ++ ++int ssam_scsi_construct(struct spdk_ssam_session_reg_info *info); ++ ++int ssam_scsi_dev_add_tgt(struct spdk_ssam_session *smsession, int target_num, ++ const char *bdev_name); ++ ++int ssam_scsi_dev_remove_tgt(struct spdk_ssam_session *smsession, ++ unsigned scsi_tgt_num, spdk_ssam_session_rsp_fn cb_fn, void *cb_arg); ++ ++void spdk_ssam_set_shm_created(bool shm_created); ++ ++bool spdk_ssam_get_shm_created(void); ++ ++void spdk_ssam_poller_start(void); ++ ++void ssam_deinit_device_pcie_list(void); ++ ++int ssam_init_device_pcie_list(void); ++ ++bool spdk_ssam_is_starting(void); ++ ++void ssam_dump_device_pcie_list(struct spdk_json_write_ctx *w); ++ ++uint32_t ssam_get_device_pcie_list_size(void); ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* SSAM_H */ +diff --git a/lib/Makefile b/lib/Makefile +index b079104..5405fe0 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -27,6 +27,7 @@ endif + DIRS-$(CONFIG_OCF) += env_ocf + DIRS-$(CONFIG_IDXD) += idxd + DIRS-$(CONFIG_VHOST) += vhost ++DIRS-$(CONFIG_SSAM) += ssam + DIRS-$(CONFIG_VIRTIO) += virtio + DIRS-$(CONFIG_VBDEV_COMPRESS) += reduce + DIRS-$(CONFIG_RDMA) += rdma_provider +diff --git a/lib/bdev/bdev.c b/lib/bdev/bdev.c +index 623cfd6..8879833 100644 +--- a/lib/bdev/bdev.c ++++ b/lib/bdev/bdev.c +@@ -24,6 +24,7 @@ + #include "spdk/bdev_module.h" + #include "spdk/log.h" + #include "spdk/string.h" ++#include "spdk/event.h" + + #include "bdev_internal.h" + #include "spdk_internal/trace_defs.h" +@@ -705,6 +706,9 @@ bdev_ok_to_examine(struct spdk_bdev *bdev) + static void + bdev_examine(struct spdk_bdev *bdev) + { ++ if (spdk_ssam_get_hot_restart() == true) { ++ return; ++ } + struct spdk_bdev_module *module; + struct spdk_bdev_module_claim *claim, *tmpclaim; + uint32_t action; +@@ -3945,6 +3949,7 @@ bdev_channel_destroy_resource(struct spdk_bdev_channel *ch) + { + struct spdk_bdev_shared_resource *shared_resource; + struct lba_range *range; ++ struct spdk_bdev_io *bdev_io, *tmp; + + bdev_free_io_stat(ch->stat); + #ifdef SPDK_CONFIG_VTUNE +@@ -3961,6 +3966,11 @@ bdev_channel_destroy_resource(struct spdk_bdev_channel *ch) + spdk_put_io_channel(ch->accel_channel); + + shared_resource = ch->shared_resource; ++ ch->shared_resource = NULL; ++ ++ TAILQ_FOREACH_SAFE(bdev_io, &ch->io_submitted, internal.ch_link, tmp) { ++ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_ABORTED); ++ } + + assert(TAILQ_EMPTY(&ch->io_locked)); + assert(TAILQ_EMPTY(&ch->io_submitted)); +@@ -7491,6 +7501,15 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta + struct spdk_bdev_channel *bdev_ch = bdev_io->internal.ch; + struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource; + ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the bdev buf memory may have been released. ++ * Therefore, do not need to continue. ++ */ ++ return; ++ } ++ + if (spdk_unlikely(bdev_io->internal.status != SPDK_BDEV_IO_STATUS_PENDING)) { + SPDK_ERRLOG("Unexpected completion on IO from %s module, status was %s\n", + spdk_bdev_get_module_name(bdev), +diff --git a/lib/event/app.c b/lib/event/app.c +index 07d9a3b..fed1f4b 100644 +--- a/lib/event/app.c ++++ b/lib/event/app.c +@@ -78,6 +78,12 @@ spdk_app_get_shm_id(void) + return g_spdk_app.shm_id; + } + ++bool ++spdk_get_shutdown_sig_received(void) ++{ ++ return g_shutdown_sig_received; ++} ++ + /* append one empty option to indicate the end of the array */ + static const struct option g_cmdline_options[] = { + #define CONFIG_FILE_OPT_IDX 'c' +@@ -150,6 +156,8 @@ static const struct option g_cmdline_options[] = { + {"no-rpc-server", no_argument, NULL, NO_RPC_SERVER_OPT_IDX}, + #define ENFORCE_NUMA_OPT_IDX 274 + {"enforce-numa", no_argument, NULL, ENFORCE_NUMA_OPT_IDX}, ++#define HOT_RESTART_OPT_IDX 275 ++ {"hot-restart", no_argument, NULL, HOT_RESTART_OPT_IDX}, + }; + + static int +@@ -429,7 +437,7 @@ app_do_spdk_subsystem_init(int rc, void *arg1) + opts.log_file = g_spdk_app.rpc_log_file; + opts.log_level = g_spdk_app.rpc_log_level; + +- rc = spdk_rpc_initialize(g_spdk_app.rpc_addr, &opts); ++ rc = spdk_rpc_initialize(g_spdk_app.rpc_addr, &opts, RPC_SELECT_INTERVAL); + if (rc) { + spdk_app_stop(rc); + return; +@@ -504,6 +512,7 @@ app_setup_env(struct spdk_app_opts *opts) + env_opts.vf_token = opts->vf_token; + env_opts.no_huge = opts->no_huge; + env_opts.enforce_numa = opts->enforce_numa; ++ env_opts.vf_token = opts->vf_token; + + rc = spdk_env_init(&env_opts); + free(env_opts.pci_blocked); +@@ -689,6 +698,7 @@ app_copy_opts(struct spdk_app_opts *opts, struct spdk_app_opts *opts_user, size_ + SET_FIELD(json_data); + SET_FIELD(json_data_size); + SET_FIELD(disable_cpumask_locks); ++ SET_FIELD(hot_restart); + + /* You should not remove this statement, but need to update the assert statement + * if you add a new field, and also add a corresponding SET_FIELD statement */ +@@ -1146,6 +1156,7 @@ usage(void (*app_usage)(void)) + printf(" -v, --version print SPDK version\n"); + printf(" -d, --limit-coredump do not set max coredump size to RLIM_INFINITY\n"); + printf(" --env-context Opaque context for use of the env implementation\n"); ++ printf(" --hot-restart enable hot restart\n"); + + if (app_usage) { + printf("\nApplication specific:\n"); +@@ -1444,6 +1455,9 @@ spdk_app_parse_args(int argc, char **argv, struct spdk_app_opts *opts, + printf(SPDK_VERSION_STRING"\n"); + retval = SPDK_APP_PARSE_ARGS_HELP; + goto out; ++ case HOT_RESTART_OPT_IDX: ++ opts->hot_restart = true; ++ break; + case '?': + /* + * In the event getopt() above detects an option +diff --git a/lib/event/spdk_event.map b/lib/event/spdk_event.map +index 1eaa301..bacd170 100644 +--- a/lib/event/spdk_event.map ++++ b/lib/event/spdk_event.map +@@ -16,6 +16,7 @@ + spdk_event_call; + spdk_framework_enable_context_switch_monitor; + spdk_framework_context_switch_monitor_enabled; ++ spdk_get_shutdown_sig_received; + + # Public scheduler functions + spdk_scheduler_set; +diff --git a/lib/init/json_config.c b/lib/init/json_config.c +index 787fd99..11d5f16 100644 +--- a/lib/init/json_config.c ++++ b/lib/init/json_config.c +@@ -19,6 +19,8 @@ + #include "spdk_internal/event.h" + + #define SPDK_DEBUG_APP_CFG(...) SPDK_DEBUGLOG(app_config, __VA_ARGS__) ++#define SPDK_JSON_CONFIG_HOT_RESTART_INTERVAL 4 /* 4us */ ++#define SPDK_JSON_CONFIG_SELECT_INTERNAL 4000 /* 4ms */ + + /* JSON configuration format is as follows + * +@@ -335,6 +337,15 @@ app_json_config_load_subsystem_config_entry(void *_ctx) + uint32_t state_mask = 0, cur_state_mask, startup_runtime = SPDK_RPC_STARTUP | SPDK_RPC_RUNTIME; + int rc; + ++ if (spdk_get_shutdown_sig_received()) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * rpc and thread may have been released. ++ * Therefore, dont continue. ++ */ ++ return; ++ } ++ + if (ctx->config_it == NULL) { + SPDK_DEBUG_APP_CFG("Subsystem '%.*s': configuration done.\n", ctx->subsystem_name->len, + (char *)ctx->subsystem_name->start); +@@ -571,12 +582,26 @@ err: + return -EINVAL; + } + ++static bool g_hot_restart_flag = false; ++bool ++spdk_ssam_get_hot_restart(void) ++{ ++ return g_hot_restart_flag; ++} ++ ++void ++spdk_ssam_set_hot_restart(bool value) ++{ ++ g_hot_restart_flag = value; ++} ++ + static void + json_config_prepare_ctx(spdk_subsystem_init_fn cb_fn, void *cb_arg, bool stop_on_error, void *json, + ssize_t json_size, bool initalize_subsystems) + { + struct load_json_config_ctx *ctx = calloc(1, sizeof(*ctx)); + int rc; ++ int internal; + + if (!ctx) { + cb_fn(-ENOMEM, cb_arg); +@@ -626,7 +651,13 @@ json_config_prepare_ctx(spdk_subsystem_init_fn cb_fn, void *cb_arg, bool stop_on + goto fail; + } + +- rc = spdk_rpc_initialize(ctx->rpc_socket_path_temp, NULL); ++ if (spdk_ssam_get_hot_restart() == true) { ++ internal = SPDK_JSON_CONFIG_HOT_RESTART_INTERVAL; ++ } else { ++ internal = SPDK_JSON_CONFIG_SELECT_INTERNAL; ++ } ++ ++ rc = spdk_rpc_initialize(ctx->rpc_socket_path_temp, NULL, internal); + if (rc) { + goto fail; + } +diff --git a/lib/init/rpc.c b/lib/init/rpc.c +index f56edf9..30dbe13 100644 +--- a/lib/init/rpc.c ++++ b/lib/init/rpc.c +@@ -11,8 +11,6 @@ + #include "spdk/log.h" + #include "spdk/rpc.h" + +-#define RPC_SELECT_INTERVAL 4000 /* 4ms */ +- + static struct spdk_poller *g_rpc_poller = NULL; + + struct init_rpc_server { +@@ -127,7 +125,7 @@ get_server_by_addr(const char *listen_addr) + } + + int +-spdk_rpc_initialize(const char *listen_addr, const struct spdk_rpc_opts *opts) ++spdk_rpc_initialize(const char *listen_addr, const struct spdk_rpc_opts *opts, int internval) + { + struct init_rpc_server *init_server; + int rc; +@@ -174,7 +172,7 @@ spdk_rpc_initialize(const char *listen_addr, const struct spdk_rpc_opts *opts) + STAILQ_INSERT_TAIL(&g_init_rpc_servers, init_server, link); + if (g_rpc_poller == NULL) { + /* Register a poller to periodically check for RPCs */ +- g_rpc_poller = SPDK_POLLER_REGISTER(rpc_subsystem_poll_servers, NULL, RPC_SELECT_INTERVAL); ++ g_rpc_poller = SPDK_POLLER_REGISTER(rpc_subsystem_poll_servers, NULL, internval); + } + + return 0; +diff --git a/lib/init/spdk_init.map b/lib/init/spdk_init.map +index 9066458..4ef1ed0 100644 +--- a/lib/init/spdk_init.map ++++ b/lib/init/spdk_init.map +@@ -16,6 +16,8 @@ + spdk_rpc_server_pause; + spdk_rpc_server_resume; + spdk_subsystem_load_config; ++ spdk_ssam_get_hot_restart; ++ spdk_ssam_set_hot_restart; + + local: *; + }; +diff --git a/lib/scsi/lun.c b/lib/scsi/lun.c +index f4e32e4..66963cc 100644 +--- a/lib/scsi/lun.c ++++ b/lib/scsi/lun.c +@@ -10,6 +10,8 @@ + #include "spdk/thread.h" + #include "spdk/util.h" + #include "spdk/likely.h" ++#include "spdk/event.h" ++#include "spdk/bdev_module.h" + + static void scsi_lun_execute_tasks(struct spdk_scsi_lun *lun); + static void _scsi_lun_execute_mgmt_task(struct spdk_scsi_lun *lun); +@@ -347,6 +349,16 @@ _scsi_lun_hot_remove(void *arg1) + { + struct spdk_scsi_lun *lun = arg1; + ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the task and bdev_io memory may have been released. ++ * Therefore, outstanding task are not executed in this scenario. ++ */ ++ scsi_lun_notify_hot_remove(lun); ++ return; ++ } ++ + /* If lun->removed is set, no new task can be submitted to the LUN. + * Execute previously queued tasks, which will be immediately aborted. + */ +diff --git a/lib/ssam/Makefile b/lib/ssam/Makefile +new file mode 100644 +index 0000000..bf76d14 +--- /dev/null ++++ b/lib/ssam/Makefile +@@ -0,0 +1,26 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright (C) 2021-2025 Huawei Technologies Co. ++# All rights reserved. ++# ++ ++SPDK_ROOT_DIR := $(abspath $(CURDIR)/../..) ++include $(SPDK_ROOT_DIR)/mk/spdk.common.mk ++ ++SO_VER := 1 ++SO_MINOR := 0 ++ ++CFLAGS += -I. -I../../dpdk/lib/eal/common ++CFLAGS += $(ENV_CFLAGS) ++ ++C_SRCS = ssam.c ssam_blk.c ssam_rpc.c \ ++ ssam_config.c ssam_scsi.c ssam_malloc.c ssam_device_pcie.c ++C_SRCS += ssam_driver/ssam_driver.c ++C_SRCS += ssam_driver/ssam_dbdf.c ++C_SRCS += ssam_driver/ssam_mempool.c ++C_SRCS += ssam_driver/ssam_driver_adapter.c ++ ++LIBNAME = ssam ++ ++SPDK_MAP_FILE = $(abspath $(CURDIR)/spdk_ssam.map) ++ ++include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk +diff --git a/lib/ssam/spdk_ssam.map b/lib/ssam/spdk_ssam.map +new file mode 100644 +index 0000000..9bef6f9 +--- /dev/null ++++ b/lib/ssam/spdk_ssam.map +@@ -0,0 +1,16 @@ ++{ ++ global: ++ ++ # public functions ++ spdk_ssam_user_config_init; ++ spdk_ssam_exit; ++ spdk_ssam_subsystem_fini; ++ spdk_ssam_subsystem_init; ++ spdk_ssam_config_json; ++ spdk_ssam_set_shm_created; ++ spdk_ssam_get_shm_created; ++ spdk_ssam_poller_start; ++ spdk_ssam_rc_preinit; ++ ++ local: *; ++}; +diff --git a/lib/ssam/ssam.c b/lib/ssam/ssam.c +new file mode 100644 +index 0000000..3532236 +--- /dev/null ++++ b/lib/ssam/ssam.c +@@ -0,0 +1,1712 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include ++#include ++ ++#include "spdk/scsi_spec.h" ++#include "spdk/scsi.h" ++#include "spdk/stdinc.h" ++#include "spdk/env.h" ++#include "spdk/likely.h" ++#include "spdk/string.h" ++#include "spdk/util.h" ++#include "spdk/memory.h" ++#include "spdk/barrier.h" ++#include "spdk/bdev_module.h" ++#include "spdk/bdev.h" ++#include "spdk/endian.h" ++ ++#include "ssam_internal.h" ++ ++#define SSAM_PF_NUM_MAX_VAL 31 ++#define SSAM_PF_PLUS_VF_NUM_MAX_VAL 4096 ++ ++#define INQUIRY_OFFSET(field) \ ++ offsetof(struct spdk_scsi_cdb_inquiry_data, field) + \ ++ sizeof(((struct spdk_scsi_cdb_inquiry_data *)0x0)->field) ++ ++#define IO_STUCK_TIMEOUT 120 ++#define SEND_EVENT_WAIT_TIME 10 ++#define VMIO_TYPE_VIRTIO_SCSI_CTRL 4 ++#define DEVICE_READY_TIMEOUT 15 ++#define DEVICE_READY_WAIT_TIME 100000 ++ ++bool g_ssam_subsystem_exit = false; ++ ++struct ssam_event_user_ctx { ++ bool session_freed; /* true if session has been freed */ ++ bool async_done; /* true if session event done */ ++ void *ctx; /* store user context pointer */ ++}; ++ ++struct ssam_session_fn_ctx { ++ /* Device session pointer obtained before enqueuing the event */ ++ struct spdk_ssam_session *smsession; ++ ++ spdk_ssam_session_rsp_fn *rsp_fn; ++ ++ void *rsp_ctx; ++ ++ /* User provided function to be executed on session's thread. */ ++ spdk_ssam_session_fn cb_fn; ++ /** ++ * User provided function to be called on the init thread ++ * after iterating through all sessions. ++ */ ++ spdk_ssam_session_cpl_fn cpl_fn; ++ ++ /* Custom user context */ ++ struct ssam_event_user_ctx user_ctx; ++ ++ /* Session start event time */ ++ uint64_t start_tsc; ++ ++ bool need_async; ++ ++ int rsp; ++}; ++ ++/* ssam total infomation */ ++struct spdk_ssam_info { ++ ssam_mempool_t *mp[SSAM_MAX_CORE_NUM]; ++}; ++ ++static struct spdk_ssam_info g_ssam_info; ++ ++/* Thread performing all ssam management operations */ ++static struct spdk_thread *g_ssam_init_thread; ++ ++static TAILQ_HEAD(, spdk_ssam_dev) g_ssam_devices = ++ TAILQ_HEAD_INITIALIZER(g_ssam_devices); ++ ++static pthread_mutex_t g_ssam_mutex = PTHREAD_MUTEX_INITIALIZER; ++ ++/* Save cpu mask when ssam management thread started */ ++static struct spdk_cpuset g_ssam_core_mask; ++ ++/* Call back when ssam_fini complete */ ++static spdk_ssam_fini_cb g_ssam_fini_cpl_cb; ++ ++static int ssam_init(void); ++ ++static int ++ssam_sessions_init(struct spdk_ssam_session ***smsession) ++{ ++ *smsession = (struct spdk_ssam_session **)calloc( ++ SSAM_MAX_SESSION_PER_DEV, sizeof(struct spdk_ssam_session *)); ++ if (*smsession == NULL) { ++ SPDK_ERRLOG("calloc sessions failed\n"); ++ return -ENOMEM; ++ } ++ return 0; ++} ++ ++static int ++ssam_sessions_insert(struct spdk_ssam_session **smsessions, struct spdk_ssam_session *smsession) ++{ ++ uint16_t i = smsession->gfunc_id; ++ ++ if (smsessions[i] != NULL) { ++ SPDK_ERRLOG("smsessions already have such sesseion\n"); ++ return -ENOSPC; ++ } ++ ++ smsessions[i] = smsession; ++ ++ return 0; ++} ++ ++void ++ssam_sessions_remove(struct spdk_ssam_session **smsessions, struct spdk_ssam_session *smsession) ++{ ++ uint16_t i = smsession->gfunc_id; ++ ++ if (smsessions[i] == NULL) { ++ SPDK_WARNLOG("smsessions no such sesseion\n"); ++ return; ++ } ++ ++ smsessions[i] = NULL; ++ return; ++} ++ ++static struct spdk_ssam_session * ++ssam_sessions_first(int begin, struct spdk_ssam_session **smsessions) ++{ ++ int i; ++ ++ for (i = begin; i < SSAM_MAX_SESSION_PER_DEV; i++) { ++ if (smsessions[i] != NULL) { ++ return smsessions[i]; ++ } ++ } ++ return NULL; ++} ++ ++bool ++ssam_sessions_empty(struct spdk_ssam_session **smsessions) ++{ ++ struct spdk_ssam_session *session; ++ ++ session = ssam_sessions_first(0, smsessions); ++ if (session == NULL) { ++ return true; ++ } ++ ++ return false; ++} ++ ++struct spdk_ssam_session * ++ssam_sessions_next(struct spdk_ssam_session **smsessions, struct spdk_ssam_session *smsession) ++{ ++ if (smsession == NULL) { ++ return ssam_sessions_first(0, smsessions); ++ } ++ if (smsession->gfunc_id == SSAM_MAX_SESSION_PER_DEV) { ++ return NULL; ++ } ++ return ssam_sessions_first(smsession->gfunc_id + 1, smsessions); ++} ++ ++void ++ssam_session_insert_io_wait(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_session_io_wait *io_wait) ++{ ++ TAILQ_INSERT_TAIL(&smsession->smdev->io_wait_queue, io_wait, link); ++ smsession->smdev->io_wait_cnt++; ++} ++ ++static void ++ssam_session_remove_io_wait(struct spdk_ssam_dev *smdev, ++ struct spdk_ssam_session_io_wait *session_io_wait) ++{ ++ TAILQ_REMOVE(&smdev->io_wait_queue, session_io_wait, link); ++ smdev->io_wait_cnt--; ++} ++ ++void ++ssam_session_insert_io_wait_r(struct spdk_ssam_dev *smdev, ++ struct spdk_ssam_session_io_wait_r *io_wait_r) ++{ ++ TAILQ_INSERT_TAIL(&smdev->io_wait_queue_r, io_wait_r, link); ++ smdev->io_wait_r_cnt++; ++} ++ ++static void ++ssam_session_remove_io_wait_r(struct spdk_ssam_dev *smdev, ++ struct spdk_ssam_session_io_wait_r *session_io_wait_r) ++{ ++ TAILQ_REMOVE(&smdev->io_wait_queue_r, session_io_wait_r, link); ++ smdev->io_wait_r_cnt--; ++} ++ ++void ++ssam_session_destroy(struct spdk_ssam_session *smsession) ++{ ++ if (smsession == NULL || smsession->smdev == NULL) { ++ return; ++ } ++ /* Remove smsession from the queue in advance to prevent access by the poller thread. */ ++ if (!ssam_sessions_empty(smsession->smdev->smsessions)) { ++ ssam_sessions_remove(smsession->smdev->smsessions, smsession); ++ } ++ /* The smdev poller is not deleted here, but at the end of the app. */ ++} ++ ++uint64_t ++ssam_get_diff_tsc(uint64_t tsc) ++{ ++ return spdk_get_ticks() - tsc; ++} ++ ++int ++ssam_check_gfunc_id(uint16_t gfunc_id) ++{ ++ enum ssam_device_type type; ++ ++ if (gfunc_id == SPDK_INVALID_GFUNC_ID) { ++ SPDK_ERRLOG("Check gfunc_id(%u) error\n", gfunc_id); ++ return -EINVAL; ++ } ++ ++ type = ssam_get_virtio_type(gfunc_id); ++ if (type >= SSAM_DEVICE_VIRTIO_MAX) { ++ SPDK_ERRLOG("Check gfunc_id(%u) virtio type(%d) error\n", gfunc_id, type); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ ++/* Find a tid which has minimum device */ ++static uint16_t ++ssam_get_min_payload_tid(uint16_t cpu_num) ++{ ++ if (cpu_num == 0) { ++ return SPDK_INVALID_TID; ++ } ++ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ /* All tid have been used, find a tid which has minimum device */ ++ uint32_t min = UINT32_MAX; ++ uint16_t tid = 0; ++ ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ if (smdev->active_session_num < min) { ++ min = smdev->active_session_num; ++ tid = smdev->tid; ++ } ++ } ++ ++ return tid; ++} ++ ++/* Get a tid number */ ++uint16_t ++ssam_get_tid(void) ++{ ++ uint32_t cpu_num; ++ ++ cpu_num = spdk_cpuset_count(&g_ssam_core_mask); ++ if ((cpu_num == 0) || (cpu_num > UINT16_MAX)) { ++ /* If cpu_num > UINT16_MAX, the result of tid will overflow */ ++ SPDK_ERRLOG("CPU num %u not valid.\n", cpu_num); ++ return SPDK_INVALID_TID; ++ } ++ ++ return ssam_get_min_payload_tid((uint16_t)cpu_num); ++} ++ ++void ++ssam_lock(void) ++{ ++ pthread_mutex_lock(&g_ssam_mutex); ++} ++ ++int ++ssam_trylock(void) ++{ ++ return pthread_mutex_trylock(&g_ssam_mutex); ++} ++ ++void ++ssam_unlock(void) ++{ ++ pthread_mutex_unlock(&g_ssam_mutex); ++} ++ ++static struct spdk_ssam_session * ++ssam_session_find_in_dev(const struct spdk_ssam_dev *smdev, ++ uint16_t gfunc_id) ++{ ++ return smdev->smsessions[gfunc_id]; ++} ++ ++void ++ssam_dump_info_json(struct spdk_ssam_dev *smdev, uint16_t gfunc_id, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ ++ spdk_json_write_named_array_begin(w, "session"); ++ if (gfunc_id == UINT16_MAX) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ smsession->backend->dump_info_json(smsession, w); ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ } else { ++ smsession = ssam_session_find_in_dev(smdev, gfunc_id); ++ smsession->backend->dump_info_json(smsession, w); ++ } ++ ++ spdk_json_write_array_end(w); ++} ++ ++const char * ++ssam_dev_get_name(const struct spdk_ssam_dev *smdev) ++{ ++ if (!smdev) { ++ return ""; ++ } ++ return smdev->name; ++} ++ ++const char * ++ssam_session_get_name(const struct spdk_ssam_session *smsession) ++{ ++ if (!smsession) { ++ return ""; ++ } ++ return smsession->name; ++} ++ ++struct spdk_ssam_dev * ++ssam_dev_next(const struct spdk_ssam_dev *smdev) ++{ ++ if (smdev == NULL) { ++ return TAILQ_FIRST(&g_ssam_devices); ++ } ++ ++ return TAILQ_NEXT(smdev, tailq); ++} ++ ++struct spdk_ssam_session * ++ssam_session_find(uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ smsession = ssam_session_find_in_dev(smdev, gfunc_id); ++ if (smsession != NULL) { ++ return smsession; ++ } ++ } ++ ++ return NULL; ++} ++ ++uint16_t ++ssam_get_gfunc_id_by_name(char *name) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ uint16_t gfunc_id; ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ if (smdev != NULL && smdev->active_session_num > 0) { ++ for (gfunc_id = 0; gfunc_id <= SSAM_PF_NUM_MAX_VAL; gfunc_id++) { ++ smsession = ssam_session_find_in_dev(smdev, gfunc_id); ++ if (smsession != NULL && strcmp(name, smsession->name) == 0) { ++ return gfunc_id; ++ } ++ } ++ } ++ } ++ ++ SPDK_WARNLOG("controller(%s) is not existed\n", name); ++ return SPDK_INVALID_GFUNC_ID; ++} ++ ++static struct spdk_ssam_dev * ++ssam_dev_find(uint16_t tid) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ ++ TAILQ_FOREACH_SAFE(smdev, &g_ssam_devices, tailq, tmp) { ++ if (smdev->tid == tid) { ++ return smdev; ++ } ++ } ++ ++ return NULL; ++} ++ ++int ++ssam_mount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id) ++{ ++ uint16_t gfunc_id = smsession->gfunc_id; ++ uint16_t tid = gfunc_id % ssam_get_core_num(); ++ ++ return ssam_function_mount(gfunc_id, lun_id, SSAM_MOUNT_NORMAL, tid); ++} ++ ++int ++ssam_umount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id) ++{ ++ int rc; ++ ++ rc = ssam_function_umount(smsession->gfunc_id, lun_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: function umount failed when add scsi tgt, %d.\n", smsession->name, rc); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_remount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id) ++{ ++ return ssam_function_mount(smsession->gfunc_id, lun_id, SSAM_MOUNT_NORMAL, smsession->smdev->tid); ++} ++ ++static int ++ssam_remove_session(struct spdk_ssam_session *smsession) ++{ ++ int rc; ++ ++ if (smsession->backend->remove_session != NULL) { ++ rc = smsession->backend->remove_session(smsession); ++ if (rc != 0) { ++ SPDK_ERRLOG("session: %s can not be removed, task cnt %d.\n", ++ smsession->name, smsession->task_cnt); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_dev_thread_exit(void *unused) ++{ ++ (void)unused; ++ spdk_thread_exit(spdk_get_thread()); ++} ++ ++static int ++ssam_tid_to_cpumask(uint16_t tid, struct spdk_cpuset *cpumask) ++{ ++ uint32_t core; ++ uint32_t lcore; ++ uint32_t cnt; ++ ++ for (lcore = 0, cnt = 0; lcore < SPDK_CPUSET_SIZE - 1; lcore++) { ++ if (spdk_cpuset_get_cpu(&g_ssam_core_mask, lcore)) { ++ if (cnt == tid) { ++ core = lcore; ++ spdk_cpuset_set_cpu(cpumask, core, true); ++ return 0; ++ } ++ cnt++; ++ } ++ } ++ ++ return -1; ++} ++ ++void ++ssam_session_start_done(struct spdk_ssam_session *smsession, int response) ++{ ++ if (response == 0) { ++ if (smsession->smdev->active_session_num == UINT32_MAX) { ++ SPDK_ERRLOG("smsession %s: active session num reached upper limit %u\n", ++ smsession->name, smsession->smdev->active_session_num); ++ return; ++ } ++ smsession->smdev->active_session_num++; ++ } ++} ++ ++void ++ssam_set_session_be_freed(void **ctx) ++{ ++ struct ssam_event_user_ctx *_ctx; ++ ++ if (ctx == NULL) { ++ return; ++ } ++ ++ _ctx = SPDK_CONTAINEROF(ctx, struct ssam_event_user_ctx, ctx); ++ _ctx->session_freed = true; ++} ++ ++void ++ssam_send_event_async_done(void **ctx) ++{ ++ struct ssam_event_user_ctx *_ctx; ++ ++ if (ctx == NULL) { ++ return; ++ } ++ ++ _ctx = SPDK_CONTAINEROF(ctx, struct ssam_event_user_ctx, ctx); ++ _ctx->async_done = true; ++} ++ ++void ++ssam_session_stop_done(struct spdk_ssam_session *smsession, int rsp, void **ctx) ++{ ++ if (rsp == 0) { ++ if (smsession->smdev->active_session_num > 0) { ++ smsession->smdev->active_session_num--; ++ } else { ++ SPDK_ERRLOG("smsession %s: active session num reached lower limit %u\n", ++ smsession->name, smsession->smdev->active_session_num); ++ } ++ } ++ /* Smdev cannot be free here */ ++ ++ /* Stop process need async */ ++ ssam_send_event_async_done(ctx); ++} ++ ++void ++ssam_session_unreg_response_cb(struct spdk_ssam_session *smsession) ++{ ++ smsession->rsp_fn = NULL; ++ smsession->rsp_ctx = NULL; ++} ++ ++static int ++ssam_dev_create_register(struct spdk_ssam_dev *smdev, uint16_t tid) ++{ ++ char name[NAME_MAX]; ++ struct spdk_cpuset cpumask; ++ int rc; ++ ++ smdev->tid = tid; ++ ++ rc = snprintf(name, NAME_MAX, "%s%u", "ssam.", smdev->tid); ++ if (rc < 0 || rc >= NAME_MAX) { ++ SPDK_ERRLOG("ssam dev name is too long, tid %u\n", tid); ++ return -EINVAL; ++ } ++ ++ spdk_cpuset_zero(&cpumask); ++ if (ssam_tid_to_cpumask(tid, &cpumask)) { ++ SPDK_ERRLOG("Can not find cpu for tid %u\n", tid); ++ return -EINVAL; ++ } ++ ++ smdev->name = strdup(name); ++ if (smdev->name == NULL) { ++ SPDK_ERRLOG("Failed to create name for ssam controller %s.\n", name); ++ return -EIO; ++ } ++ ++ smdev->thread = spdk_thread_create(smdev->name, &cpumask); ++ if (smdev->thread == NULL) { ++ SPDK_ERRLOG("Failed to create thread for ssam controller %s.\n", name); ++ free(smdev->name); ++ smdev->name = NULL; ++ return -EIO; ++ } ++ ++ rc = ssam_sessions_init(&smdev->smsessions); ++ if (rc != 0) { ++ return rc; ++ } ++ TAILQ_INSERT_TAIL(&g_ssam_devices, smdev, tailq); ++ TAILQ_INIT(&smdev->io_wait_queue); ++ TAILQ_INIT(&smdev->io_wait_queue_r); ++ ++ SPDK_NOTICELOG("Controller %s: new controller added, tid %u\n", smdev->name, tid); ++ ++ return 0; ++} ++ ++void ++ssam_dev_unregister(struct spdk_ssam_dev **dev) ++{ ++ struct spdk_ssam_dev *smdev = *dev; ++ struct spdk_thread *thread = smdev->thread; ++ ++ if (!ssam_sessions_empty(smdev->smsessions)) { ++ SPDK_NOTICELOG("Controller %s still has valid session.\n", ++ smdev->name); ++ return; ++ } ++ memset(smdev->smsessions, 0, SSAM_MAX_SESSION_PER_DEV * sizeof(struct spdk_ssam_session *)); ++ free(smdev->smsessions); ++ smdev->smsessions = NULL; ++ ++ /* Used for hot restart. */ ++ if (smdev->stop_poller != NULL) { ++ spdk_poller_unregister(&smdev->stop_poller); ++ smdev->stop_poller = NULL; ++ } ++ ++ SPDK_NOTICELOG("Controller %s: removed\n", smdev->name); ++ ++ free(smdev->name); ++ smdev->name = NULL; ++ ssam_lock(); ++ TAILQ_REMOVE(&g_ssam_devices, smdev, tailq); ++ ssam_unlock(); ++ ++ free(smdev); ++ smdev = NULL; ++ *dev = NULL; ++ ++ spdk_thread_send_msg(thread, ssam_dev_thread_exit, NULL); ++ ++ return; ++} ++ ++static int ++ssam_init_session_fields(struct spdk_ssam_session_reg_info *info, ++ struct spdk_ssam_dev *smdev, struct spdk_ssam_session *smsession) ++{ ++ smsession->mp = g_ssam_info.mp[smdev->tid % ssam_get_core_num()]; ++ smsession->initialized = true; ++ smsession->registered = true; ++ smsession->thread = smdev->thread; ++ smsession->backend = info->backend; ++ smsession->smdev = smdev; ++ smsession->gfunc_id = info->gfunc_id; ++ smsession->started = false; ++ smsession->rsp_fn = info->rsp_fn; ++ smsession->rsp_ctx = info->rsp_ctx; ++ smsession->max_queues = info->queues; ++ smsession->queue_size = SPDK_SSAM_DEFAULT_VQ_SIZE; ++ if (info->name == NULL) { ++ smsession->name = spdk_sprintf_alloc("%s_%s_%d", smdev->name, info->type_name, info->gfunc_id); ++ } else { ++ smsession->name = strdup(info->name); ++ } ++ if (smsession->name == NULL) { ++ SPDK_ERRLOG("smsession name alloc failed\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_add_session(struct spdk_ssam_session_reg_info *info, ++ struct spdk_ssam_dev *smdev, struct spdk_ssam_session **smsession) ++{ ++ struct spdk_ssam_session *l_stsession = NULL; ++ size_t with_ctx_len = sizeof(*l_stsession) + info->session_ctx_size; ++ int rc; ++ ++ if (smdev->active_session_num == SSAM_MAX_SESSION_PER_DEV) { ++ SPDK_ERRLOG("%s reached upper limit %u\n", smdev->name, SSAM_MAX_SESSION_PER_DEV); ++ return -EAGAIN; ++ } ++ ++ if (g_ssam_info.mp == NULL) { ++ SPDK_ERRLOG("No memory pool\n"); ++ return -ENOMEM; ++ } ++ ++ rc = posix_memalign((void **)&l_stsession, SPDK_CACHE_LINE_SIZE, with_ctx_len); ++ if (rc != 0) { ++ SPDK_ERRLOG("smsession alloc failed\n"); ++ return -ENOMEM; ++ } ++ memset(l_stsession, 0, with_ctx_len); ++ ++ rc = ssam_init_session_fields(info, smdev, l_stsession); ++ if (rc != 0) { ++ free(l_stsession); ++ l_stsession = NULL; ++ return rc; ++ } ++ ++ rc = ssam_sessions_insert(smdev->smsessions, l_stsession); ++ if (rc != 0) { ++ return rc; ++ } ++ *smsession = l_stsession; ++ if (smdev->type == VIRTIO_TYPE_UNKNOWN) { ++ smdev->type = info->backend->type; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_dev_register(struct spdk_ssam_dev **dev, uint16_t tid) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ int rc; ++ ++ smdev = calloc(1, sizeof(*smdev)); ++ if (smdev == NULL) { ++ SPDK_ERRLOG("Couldn't alloc device for tid %u.\n", tid); ++ return -1; ++ } ++ ++ rc = ssam_dev_create_register(smdev, tid); ++ if (rc != 0) { ++ free(smdev); ++ smdev = NULL; ++ return -1; ++ } ++ ++ *dev = smdev; ++ ++ return 0; ++} ++ ++int ++ssam_session_register(struct spdk_ssam_session_reg_info *info, ++ struct spdk_ssam_session **smsession) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ int rc; ++ ++ if (ssam_session_find(info->gfunc_id) && (strcmp(info->type_name, SPDK_SESSION_TYPE_BLK) != 0)) { ++ SPDK_ERRLOG("Session with function id %d already exists.\n", info->gfunc_id); ++ return -EEXIST; ++ } ++ ++ smdev = ssam_dev_find(info->tid); ++ if (smdev == NULL) { ++ /* The smdev has been started during process initialization. Do not need to start the poller here. */ ++ SPDK_ERRLOG("No device with function id %d tid %u.\n", info->gfunc_id, info->tid); ++ return -ENODEV; ++ } ++ ++ rc = ssam_add_session(info, smdev, smsession); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_session_unregister(struct spdk_ssam_session *smsession) ++{ ++ int rc; ++ ++ if (smsession == NULL) { ++ SPDK_ERRLOG("smsession null.\n"); ++ return -EINVAL; ++ } ++ ++ if (smsession->pending_async_op_num != 0) { ++ SPDK_ERRLOG("[OFFLOAD_SNIC] %s has internal events(%d) and cannot be deleted.\n", ++ smsession->name, smsession->pending_async_op_num); ++ return -EBUSY; ++ } ++ ++ rc = ssam_remove_session(smsession); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_io_queue_handle(struct spdk_ssam_dev *smdev) ++{ ++ uint64_t count = 0; ++ uint64_t io_wait_cnt = smdev->io_wait_cnt; ++ while (count < io_wait_cnt) { ++ struct spdk_ssam_session_io_wait *io_wait = TAILQ_FIRST(&smdev->io_wait_queue); ++ ssam_session_remove_io_wait(smdev, io_wait); ++ if (io_wait->cb_fn != NULL) { ++ io_wait->cb_fn(io_wait->cb_arg); ++ } ++ count++; ++ } ++} ++ ++struct forward_ctx { ++ struct spdk_ssam_session *smsession; ++ struct ssam_request *io_req; ++}; ++ ++static void ++ssam_handle_forward_req(void *_ctx) ++{ ++ struct forward_ctx *ctx = (struct forward_ctx *)_ctx; ++ ctx->smsession->backend->request_worker(ctx->smsession, ctx->io_req); ++ free(ctx); ++} ++/* The resent request that is polled at the beginning of the hot restart is not the smsession of this smdev ++ * and needs to be forwarded to the corresponding smdev. ++ * If the forwarding is successful, true is returned. Otherwise, false is returned. ++ */ ++static bool ++ssam_dev_forward_req(struct ssam_request *io_req) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct forward_ctx *ctx = NULL; ++ int rc; ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ if (smdev->smsessions[io_req->gfunc_id] != NULL && ++ smdev->smsessions[io_req->gfunc_id]->started == true) { ++ ctx = calloc(1, sizeof(struct forward_ctx)); ++ if (!ctx) { ++ SPDK_ERRLOG("%s: calloc failed.\n", smdev->name); ++ goto out; ++ } ++ ctx->smsession = smdev->smsessions[io_req->gfunc_id]; ++ ctx->io_req = io_req; ++ rc = spdk_thread_send_msg(smdev->smsessions[io_req->gfunc_id]->thread, ssam_handle_forward_req, ++ ctx); ++ if (rc) { ++ SPDK_ERRLOG("%s: send msg error %d.\n", smdev->name, rc); ++ free(ctx); ++ goto out; ++ } ++ ssam_unlock(); ++ return true; ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++out: ++ ssam_unlock(); ++ return false; ++} ++ ++struct ssam_dev_io_complete_arg { ++ struct spdk_ssam_dev *smdev; ++ struct ssam_io_response io_resp; ++}; ++ ++static void ++ssam_dev_io_complete_cb(void *arg) ++{ ++ struct ssam_dev_io_complete_arg *cb_arg = (struct ssam_dev_io_complete_arg *)arg; ++ int rc = ssam_io_complete(cb_arg->smdev->tid, &cb_arg->io_resp); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_dev_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_dev_io_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, bool success) ++{ ++ struct ssam_io_response io_resp; ++ struct ssam_virtio_res *virtio_res = (struct ssam_virtio_res *)&io_resp.data; ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ struct iovec io_vec; ++ struct virtio_scsi_cmd_resp resp = {0}; ++ enum ssam_device_type type; ++ uint8_t res_status; ++ int rc; ++ type = ssam_get_virtio_type(io_req->gfunc_id); ++ ++ if (success) { ++ switch (type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ res_status = VIRTIO_BLK_S_OK; ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ res_status = VIRTIO_SCSI_S_OK; ++ break; ++ default: ++ res_status = 0; /* unknown type, maybe 0 means ok */ ++ } ++ } else { ++ SPDK_INFOLOG(ssam, "%s: io complete return error gfunc_id %u type %d.\n", ++ smdev->name, io_req->gfunc_id, type); ++ switch (type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ res_status = VIRTIO_BLK_S_IOERR; ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ res_status = VIRTIO_SCSI_S_FAILURE; ++ break; ++ default: ++ res_status = 1; /* unknown type, maybe 1 means error */ ++ } ++ } ++ ++ memset(&io_resp, 0, sizeof(io_resp)); ++ io_resp.gfunc_id = io_req->gfunc_id; ++ io_resp.iocb_id = io_req->iocb_id; ++ io_resp.status = io_req->status; ++ io_resp.flr_seq = io_req->flr_seq; ++ io_resp.req = io_req; ++ ++ virtio_res->iovs = &io_vec; ++ if (type == SSAM_DEVICE_VIRTIO_SCSI && io_cmd->writable) { ++ virtio_res->iovs->iov_base = io_cmd->iovs[1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[1].iov_len; ++ } else { ++ virtio_res->iovs->iov_base = io_cmd->iovs[io_cmd->iovcnt - 1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[io_cmd->iovcnt - 1].iov_len; ++ } ++ virtio_res->iovcnt = 1; ++ if (type == SSAM_DEVICE_VIRTIO_SCSI && io_req->type != VMIO_TYPE_VIRTIO_SCSI_CTRL) { ++ resp.response = res_status; ++ virtio_res->rsp = &resp; ++ virtio_res->rsp_len = sizeof(struct virtio_scsi_cmd_resp); ++ } else { ++ virtio_res->rsp = &res_status; ++ virtio_res->rsp_len = sizeof(res_status); ++ } ++ ++ rc = ssam_io_complete(smdev->tid, &io_resp); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_dev_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_dev_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smdev; ++ cb_arg->io_resp = io_resp; ++ io_wait_r->cb_fn = ssam_dev_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smdev, io_wait_r); ++ } ++} ++ ++static void ++ssam_dev_io_request(struct spdk_ssam_dev *smdev, struct ssam_request *io_req) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ ++ SPDK_INFOLOG(ssam_blk_data, "handling io tid=%u gfunc_id=%u type=%d rw=%u vqid=%u reqid=%u.\n", ++ smdev->tid, io_req->gfunc_id, io_req->type, io_req->req.cmd.writable, ++ io_req->req.cmd.virtio.vq_idx, io_req->req.cmd.virtio.req_idx); ++ ++ smsession = smdev->smsessions[io_req->gfunc_id]; ++ if (smsession == NULL || smsession->started == false) { ++ if (!ssam_dev_forward_req(io_req)) { ++ SPDK_INFOLOG(ssam, "%s: not have gfunc_id %u yet in io request.\n", ++ smdev->name, io_req->gfunc_id); ++ ssam_dev_io_complete(smdev, io_req, false); ++ } ++ return; ++ } ++ ++ smsession->backend->request_worker(smsession, io_req); ++ return; ++} ++ ++static void ++ssam_io_wait_r_queue_handle(struct spdk_ssam_dev *smdev) ++{ ++ uint64_t count = 0; ++ uint64_t io_wait_r_cnt = smdev->io_wait_r_cnt > SSAM_MAX_REQ_POLL_SIZE ? SSAM_MAX_REQ_POLL_SIZE : ++ smdev->io_wait_r_cnt; ++ while (count < io_wait_r_cnt) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = TAILQ_FIRST(&smdev->io_wait_queue_r); ++ ssam_session_remove_io_wait_r(smdev, io_wait_r); ++ if (io_wait_r->cb_fn != NULL) { ++ io_wait_r->cb_fn(io_wait_r->cb_arg); ++ } ++ count++; ++ free(io_wait_r); ++ io_wait_r = NULL; ++ } ++} ++ ++static int ++ssam_dev_request_worker(void *arg) ++{ ++ int io_num; ++ struct ssam_request *io_req[SSAM_MAX_REQ_POLL_SIZE] = {0}; ++ struct spdk_ssam_dev *smdev = arg; ++ bool poll_busy_flag = false; ++ ++ if (spdk_unlikely(smdev->io_wait_r_cnt > 0)) { ++ ssam_io_wait_r_queue_handle(smdev); ++ poll_busy_flag = true; ++ } ++ ++ /* The I/O waiting due to insufficient memory needs to be processed first. */ ++ if (spdk_unlikely(smdev->io_wait_cnt > 0)) { ++ ssam_io_queue_handle(smdev); ++ return SPDK_POLLER_BUSY; ++ } ++ ++ io_num = ssam_request_poll(smdev->tid, SSAM_MAX_REQ_POLL_SIZE, io_req); ++ if ((io_num <= 0) || (io_num > SSAM_MAX_REQ_POLL_SIZE)) { ++ /* ++ * The rpc delete callback is registered when the bdev deleting. spdk_put_io_channel ++ * executed the RPC delete callback.The stdev_io_no_data_request function continuously ++ * determines whether to perform the spdk_put_io_channel operation to ensure that the ++ * deletion of the bdev does not time out. ++ */ ++ if (spdk_unlikely(smdev->io_wait_r_cnt > 0)) { ++ ssam_io_wait_r_queue_handle(smdev); ++ poll_busy_flag = true; ++ } ++ return poll_busy_flag == true ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; ++ } ++ ++ if (spdk_unlikely(smdev->io_wait_r_cnt > 0)) { ++ ssam_io_wait_r_queue_handle(smdev); ++ } ++ ++ for (int i = 0; i < io_num; i++) { ++ ssam_dev_io_request(smdev, io_req[i]); ++ } ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static void ++ssam_dev_io_response(struct spdk_ssam_dev *smdev, const struct ssam_dma_rsp *dma_rsp) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ const struct spdk_ssam_dma_cb *dma_cb = (const struct spdk_ssam_dma_cb *)&dma_rsp->cb; ++ ++ SPDK_INFOLOG(ssam_blk_data, ++ "handle dma resp tid=%u gfunc_id=%u rw=%u vqid=%u task_idx=%u statuc=%u.\n", ++ smdev->tid, dma_cb->gfunc_id, dma_cb->req_dir, ++ dma_cb->vq_idx, dma_cb->task_idx, dma_cb->status); ++ ++ smsession = smdev->smsessions[dma_cb->gfunc_id]; ++ if (smsession == NULL) { ++ smdev->discard_io_num++; ++ SPDK_ERRLOG("smsessions not have gfunc_id %u yet in io response.\n", dma_cb->gfunc_id); ++ return; ++ } ++ ++ smsession->backend->response_worker(smsession, (void *)dma_rsp); ++ ++ return; ++} ++ ++static void ++ssam_dev_print_stuck_io(struct spdk_ssam_dev *smdev) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ int i; ++ ++ for (i = 0; i < SSAM_MAX_SESSION_PER_DEV; i++) { ++ smsession = smdev->smsessions[i]; ++ if (smsession == NULL) { ++ continue; ++ } ++ if (smsession->task_cnt > 0) { ++ SPDK_ERRLOG("%s: %d IO stuck for %ds\n", smsession->name, ++ smsession->task_cnt, IO_STUCK_TIMEOUT); ++ if (smsession->backend->print_stuck_io_info != NULL) { ++ smsession->backend->print_stuck_io_info(smsession); ++ } ++ } ++ } ++} ++ ++static void ++ssam_dev_io_stuck_check(struct spdk_ssam_dev *smdev) ++{ ++ uint64_t diff_tsc = spdk_get_ticks() - smdev->io_stuck_tsc; ++ ++ if (smdev->io_num == 0) { ++ smdev->io_stuck_tsc = spdk_get_ticks(); ++ return; ++ } ++ ++ if ((diff_tsc / IO_STUCK_TIMEOUT) > spdk_get_ticks_hz()) { ++ ssam_dev_print_stuck_io(smdev); ++ smdev->io_stuck_tsc = spdk_get_ticks(); ++ } ++} ++ ++void ++ssam_dev_io_dec(struct spdk_ssam_dev *smdev) ++{ ++ smdev->io_num--; ++} ++ ++static int ++ssam_dev_response_worker(void *arg) ++{ ++ int io_num; ++ struct spdk_ssam_dev *smdev = arg; ++ struct ssam_dma_rsp dma_rsp[SSAM_MAX_RESP_POLL_SIZE] = {0}; ++ bool poll_busy_flag = false; ++ ++ uint64_t ticks = spdk_get_ticks(); ++ if (smdev->stat.poll_cur_tsc == 0) { ++ smdev->stat.poll_cur_tsc = ticks; ++ } else { ++ smdev->stat.poll_tsc += ticks - smdev->stat.poll_cur_tsc; ++ smdev->stat.poll_count++; ++ smdev->stat.poll_cur_tsc = ticks; ++ } ++ ++ do { ++ io_num = ssam_dma_rsp_poll(smdev->tid, SSAM_MAX_RESP_POLL_SIZE, dma_rsp); ++ if (io_num <= 0 || io_num > SSAM_MAX_RESP_POLL_SIZE) { ++ ssam_dev_io_stuck_check(smdev); ++ return poll_busy_flag == true ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; ++ } ++ ++ if (smdev->io_num < ((uint64_t)(uint32_t)io_num)) { ++ SPDK_ERRLOG("%s: DMA response IO num too much, should be %lu but %d\n", ++ smdev->name, smdev->io_num, io_num); ++ smdev->discard_io_num += io_num; ++ return poll_busy_flag == true ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; ++ } ++ smdev->io_stuck_tsc = spdk_get_ticks(); ++ ++ for (int i = 0; i < io_num; i++) { ++ ssam_dev_io_response(smdev, dma_rsp + i); ++ } ++ poll_busy_flag = true; ++ } while (io_num == SSAM_MAX_RESP_POLL_SIZE); ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++int ++ssam_dev_register_worker_poller(struct spdk_ssam_dev *smdev) ++{ ++ SPDK_NOTICELOG("%s: worker starting.\n", smdev->name); ++ if (smdev->requestq_poller == NULL) { ++ smdev->requestq_poller = SPDK_POLLER_REGISTER(ssam_dev_request_worker, smdev, 0); ++ if (smdev->requestq_poller == NULL) { ++ SPDK_WARNLOG("%s: stdev_request_worker start failed.\n", smdev->name); ++ return -1; ++ } ++ ++ SPDK_INFOLOG(ssam, "%s: started stdev_request_worker poller on lcore %d\n", ++ smdev->name, spdk_env_get_current_core()); ++ } ++ ++ if (smdev->responseq_poller == NULL) { ++ smdev->responseq_poller = SPDK_POLLER_REGISTER(ssam_dev_response_worker, smdev, 0); ++ if (smdev->responseq_poller == NULL) { ++ SPDK_WARNLOG("%s: stdev_response_worker start failed.\n", smdev->name); ++ return -1; ++ } ++ ++ SPDK_INFOLOG(ssam, "%s: started stdev_response_worker poller on lcore %d\n", ++ smdev->name, spdk_env_get_current_core()); ++ } ++ return 0; ++} ++ ++void ++ssam_dev_unregister_worker_poller(struct spdk_ssam_dev *smdev) ++{ ++ if (!ssam_sessions_empty(smdev->smsessions)) { ++ return; ++ } ++ ++ if (smdev->requestq_poller != NULL) { ++ spdk_poller_unregister(&smdev->requestq_poller); ++ smdev->requestq_poller = NULL; ++ } ++ ++ if (smdev->responseq_poller != NULL) { ++ spdk_poller_unregister(&smdev->responseq_poller); ++ smdev->responseq_poller = NULL; ++ } ++} ++/* When stopping the worker, need to stop the two pollers first ++ * and wait until all sessions are deleted, and then free smdev. ++ */ ++static int ++ssam_dev_stop_poller(void *arg) ++{ ++ struct spdk_ssam_dev *smdev = arg; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ /* special processing is required for virtio-scsi, ++ * because In scsi scenarios, smsessions are not actively or passively removed. ++ */ ++ if (smdev->type == VIRTIO_TYPE_SCSI && smdev->active_session_num > 0) { ++ for (int i = 0; i < SSAM_MAX_SESSION_PER_DEV; i++) { ++ if (smdev->smsessions[i] != NULL) { ++ smsession = smdev->smsessions[i]; ++ smsession->backend->remove_self(smsession); /* remove session */ ++ } ++ } ++ } ++ ++ /* 等待session全部被移除 */ ++ if (smdev->active_session_num != 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ /* 删除smdev的资源 */ ++ ssam_dev_unregister(&smdev); ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static void ++ssam_dev_stop_worker_poller(void *args) ++{ ++ struct spdk_ssam_dev *smdev = (struct spdk_ssam_dev *)args; ++ ++ if (smdev->requestq_poller != NULL) { ++ spdk_poller_unregister(&smdev->requestq_poller); ++ smdev->requestq_poller = NULL; ++ } ++ ++ if (smdev->responseq_poller != NULL) { ++ spdk_poller_unregister(&smdev->responseq_poller); ++ smdev->responseq_poller = NULL; ++ } ++ ++ SPDK_NOTICELOG("%s: poller stopped.\n", smdev->name); ++ smdev->stop_poller = SPDK_POLLER_REGISTER(ssam_dev_stop_poller, smdev, 0); ++ if (smdev->stop_poller == NULL) { ++ SPDK_WARNLOG("%s: ssam_dev stop failed.\n", smdev->name); ++ } ++} ++/* When starting the worker, need to start the two pollers first */ ++static void ++ssam_dev_start_worker_poller(void *args) ++{ ++ struct spdk_ssam_dev *smdev = (struct spdk_ssam_dev *)args; ++ ssam_dev_register_worker_poller(smdev); ++} ++ ++static void ++ssam_send_event_response(struct ssam_session_fn_ctx *ev_ctx) ++{ ++ if (ev_ctx->user_ctx.session_freed == true) { ++ goto out; ++ } ++ ++ if (*ev_ctx->rsp_fn != NULL) { ++ (*ev_ctx->rsp_fn)(ev_ctx->rsp_ctx, ev_ctx->rsp); ++ *ev_ctx->rsp_fn = NULL; ++ } ++ ++out: ++ /* ev_ctx be allocated by another thread */ ++ free(ev_ctx); ++ ev_ctx = NULL; ++} ++ ++static void ++ssam_check_send_event_timeout(struct ssam_session_fn_ctx *ev_ctx, spdk_msg_fn fn) ++{ ++ uint64_t diff_tsc = spdk_get_ticks() - ev_ctx->start_tsc; ++ struct spdk_ssam_session *smsession = ev_ctx->smsession; ++ ++ if ((diff_tsc / SEND_EVENT_WAIT_TIME) > spdk_get_ticks_hz()) { ++ /* If timeout, finish send msg, end the process */ ++ SPDK_ERRLOG("Send event to session %s time out.\n", smsession->name); ++ ev_ctx->rsp = -ETIMEDOUT; ++ ssam_send_event_response(ev_ctx); ++ return; ++ } ++ ++ spdk_thread_send_msg(spdk_get_thread(), fn, (void *)ev_ctx); ++ ++ return; ++} ++ ++static void ++ssam_send_event_finish(void *ctx) ++{ ++ struct ssam_session_fn_ctx *ev_ctx = ctx; ++ struct spdk_ssam_session *smsession = ev_ctx->smsession; ++ ++ if ((ev_ctx->rsp == 0) && (ev_ctx->need_async) && (ev_ctx->user_ctx.async_done == false)) { ++ ssam_check_send_event_timeout(ev_ctx, ssam_send_event_finish); ++ return; ++ } ++ ++ if (ssam_trylock() != 0) { ++ ssam_check_send_event_timeout(ev_ctx, ssam_send_event_finish); ++ return; ++ } ++ ++ if (smsession->pending_async_op_num > 0) { ++ smsession->pending_async_op_num--; ++ } else { ++ SPDK_ERRLOG("[OFFLOAD_SNIC] smsession %s: internal error.\n", smsession->name); ++ } ++ ++ /* If ev_ctx->cb_fn proccess failed, ev_ctx->cpl_fn will not excute */ ++ if ((ev_ctx->rsp == 0) && (ev_ctx->cpl_fn != NULL)) { ++ ev_ctx->cpl_fn(smsession, &ev_ctx->user_ctx.ctx); ++ } ++ ++ ssam_unlock(); ++ ++ ssam_send_event_response(ev_ctx); ++} ++ ++static void ++ssam_send_event(void *ctx) ++{ ++ struct ssam_session_fn_ctx *ev_ctx = ctx; ++ struct spdk_ssam_session *smsession = ev_ctx->smsession; ++ ++ if (ssam_trylock() != 0) { ++ ssam_check_send_event_timeout(ev_ctx, ssam_send_event); ++ return; ++ } ++ ++ if (smsession->initialized && (ev_ctx->cb_fn != NULL)) { ++ ev_ctx->user_ctx.async_done = false; ++ ev_ctx->rsp = ev_ctx->cb_fn(smsession, &ev_ctx->user_ctx.ctx); ++ } else { ++ ev_ctx->rsp = 0; ++ ev_ctx->user_ctx.async_done = true; ++ } ++ ++ ssam_unlock(); ++ /* The judgment logic is used to adapt to the hot-restart. ++ * Because the session has been released during the hot restart, ++ * the following ssam_send_event_finish is not required. ++ */ ++ if (ev_ctx->user_ctx.session_freed) { ++ free(ev_ctx); ++ return; ++ } else { ++ ev_ctx->start_tsc = spdk_get_ticks(); ++ spdk_thread_send_msg(g_ssam_init_thread, ssam_send_event_finish, ctx); ++ } ++} ++ ++static spdk_ssam_session_rsp_fn g_rsp_fn = NULL; ++ ++int ++ssam_send_event_to_session(struct spdk_ssam_session *smsession, spdk_ssam_session_fn fn, ++ spdk_ssam_session_cpl_fn cpl_fn, struct spdk_ssam_send_event_flag send_event_flag, void *ctx) ++{ ++ struct ssam_session_fn_ctx *ev_ctx; ++ int rc; ++ ++ ev_ctx = calloc(1, sizeof(*ev_ctx)); ++ if (ev_ctx == NULL) { ++ SPDK_ERRLOG("Failed to alloc ssam event.\n"); ++ return -ENOMEM; ++ } ++ ++ ev_ctx->smsession = smsession; ++ ev_ctx->cb_fn = fn; ++ ev_ctx->cpl_fn = cpl_fn; ++ ev_ctx->need_async = send_event_flag.need_async; ++ if (send_event_flag.need_rsp == true) { ++ ev_ctx->rsp_fn = &smsession->rsp_fn; ++ ev_ctx->rsp_ctx = smsession->rsp_ctx; ++ } else { ++ ev_ctx->rsp_fn = &g_rsp_fn; ++ ev_ctx->rsp_ctx = NULL; ++ } ++ ++ ev_ctx->user_ctx.ctx = ctx; ++ ev_ctx->user_ctx.session_freed = false; ++ ++ if (smsession->pending_async_op_num < UINT32_MAX) { ++ smsession->pending_async_op_num++; ++ } else { ++ SPDK_ERRLOG("[OFFLOAD_SNIC] smsession %s: internel error, events stuck too much\n", ++ smsession->name); ++ } ++ ++ ev_ctx->start_tsc = spdk_get_ticks(); ++ rc = spdk_thread_send_msg(smsession->thread, ssam_send_event, ev_ctx); ++ if (rc != 0) { ++ SPDK_ERRLOG("send thread msg failed\n"); ++ free(ev_ctx); ++ return rc; ++ } ++ return 0; ++} ++ ++void ++spdk_ssam_config_json(struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ spdk_json_write_array_begin(w); ++ ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ smsession->backend->write_config_json(smsession, w); ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ ++ smdev = ssam_dev_next(smdev); ++ } ++ ++ ssam_unlock(); ++ ++ spdk_json_write_array_end(w); ++} ++ ++int ++ssam_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues) ++{ ++ const struct spdk_ssam_session_backend *backend = smsession->backend; ++ ++ if (backend->ssam_get_config == NULL) { ++ return -1; ++ } ++ ++ return backend->ssam_get_config(smsession, config, len, queues); ++} ++ ++struct dev_destroy_ctx { ++ struct spdk_ssam_session *smsession; ++ void *args; ++}; ++ ++static void ++ssam_dev_destroy(void *arg) ++{ ++ struct dev_destroy_ctx *ctx = (struct dev_destroy_ctx *)arg; ++ ctx->smsession->backend->destroy_bdev_device(ctx->smsession, ctx->args); ++ free(ctx); ++} ++ ++void ++ssam_send_dev_destroy_msg(struct spdk_ssam_session *smsession, void *args) ++{ ++ struct dev_destroy_ctx *ctx = calloc(1, sizeof(struct dev_destroy_ctx)); ++ if (ctx == NULL) { ++ SPDK_ERRLOG("%s: out of memory, destroy dev failed\n", smsession->name); ++ return; ++ } ++ ctx->smsession = smsession; ++ ctx->args = args; ++ spdk_thread_send_msg(g_ssam_init_thread, ssam_dev_destroy, ctx); ++} ++ ++void ++spdk_ssam_poller_start(void) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ tmp = ssam_dev_next(smdev); ++ /* Send the message to each smdev to start the worker on the smdev. */ ++ spdk_thread_send_msg(smdev->thread, ssam_dev_start_worker_poller, smdev); ++ smdev = tmp; ++ } ++ ssam_unlock(); ++} ++ ++static void ++ssam_fini(void *arg) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_dev *tmp = NULL; ++ SPDK_WARNLOG("ssam is finishing\n"); ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ tmp = ssam_dev_next(smdev); ++ /* Send the message to each smdev to stop the worker on the smdev. */ ++ spdk_thread_send_msg(smdev->thread, ssam_dev_stop_worker_poller, smdev); ++ smdev = tmp; ++ } ++ ssam_unlock(); ++ ++ spdk_cpuset_zero(&g_ssam_core_mask); ++ ++ g_ssam_fini_cpl_cb(); ++} ++ ++static void * ++ssam_session_shutdown(void *arg) ++{ ++ SPDK_INFOLOG(ssam, "ssam sesssion Exiting\n"); ++ spdk_thread_send_msg(g_ssam_init_thread, ssam_fini, NULL); ++ ++ return NULL; ++} ++ ++void ++spdk_ssam_subsystem_fini(spdk_ssam_fini_cb fini_cb) ++{ ++ if (spdk_get_thread() != g_ssam_init_thread) { ++ SPDK_ERRLOG("ssam finish thread not equal init thread, internel error\n"); ++ } ++ ++ g_ssam_fini_cpl_cb = fini_cb; ++ ++ ssam_session_shutdown(NULL); ++} ++ ++void ++spdk_ssam_subsystem_init(spdk_ssam_init_cb init_cb) ++{ ++ uint32_t i; ++ int ret; ++ int shm_id; ++ ++ g_ssam_init_thread = spdk_get_thread(); ++ if (g_ssam_init_thread == NULL) { ++ ret = -EBUSY; ++ SPDK_ERRLOG("get thread error\n"); ++ goto exit; ++ } ++ ++ /* init ssam core mask */ ++ spdk_cpuset_zero(&g_ssam_core_mask); ++ SPDK_ENV_FOREACH_CORE(i) { ++ spdk_cpuset_set_cpu(&g_ssam_core_mask, i, true); ++ } ++ ++ ret = ssam_set_core_num(spdk_cpuset_count(&g_ssam_core_mask)); ++ if (ret != 0) { ++ goto exit; ++ } ++ ++ ret = ssam_init(); ++ if (ret != 0) { ++ goto exit; ++ } ++ ++ if (!spdk_ssam_get_shm_created()) { ++ shm_id = shm_open(SSAM_SHM, O_CREAT | O_EXCL | O_RDWR, SSAM_SHM_PERMIT); ++ if (shm_id < 0) { ++ SPDK_ERRLOG("failed to create shared memory %s\n", SSAM_SHM); ++ ret = -1; ++ goto exit; ++ } ++ spdk_ssam_set_shm_created(true); ++ } ++ ++exit: ++ init_cb(ret); ++ return; ++} ++ ++/* Initialize all smdev modules during submodule initialization. */ ++static int ++ssam_smdev_init(void) ++{ ++ int rc = 0; ++ struct spdk_ssam_dev *smdev; ++ struct spdk_ssam_dev *tmp = NULL; ++ uint16_t core_num = ssam_get_core_num(); ++ for (uint16_t i = 0; i < core_num; ++i) { ++ rc = ssam_dev_register(&smdev, i); ++ if (rc != 0) { ++ goto out; ++ } ++ } ++ ++ rc = ssam_get_hot_upgrade_state(); ++ if (rc != 0) { ++ SPDK_ERRLOG(": virtio upgrade state failed.\n"); ++ return rc; ++ } ++ ++ return 0; ++out: ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ tmp = ssam_dev_next(smdev); ++ ssam_dev_unregister(&smdev); ++ smdev = tmp; ++ } ++ return rc; ++} ++ ++static int ++ssam_server_init(void) ++{ ++ uint32_t core_num = ssam_get_core_num(); ++ uint32_t mempool_size = (ssam_get_mempool_size() / core_num) & (~0U - 1); ++ uint32_t i; ++ ++ /* Disable dummy I/O for hot restart */ ++ ++ for (i = 0; i < core_num; i++) { ++ g_ssam_info.mp[i] = ssam_mempool_create(mempool_size * SSAM_MB, SSAM_DEFAULT_MEMPOOL_EXTRA_SIZE); ++ if (g_ssam_info.mp[i] == NULL) { ++ SPDK_ERRLOG("ssam create mempool[%d] failed, mempool_size = %uMB.\n", i, mempool_size); ++ return -ENOMEM; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_server_exit(void) ++{ ++ uint32_t core_num = ssam_get_core_num(); ++ uint32_t i; ++ ++ for (i = 0; i < core_num; i++) { ++ if (g_ssam_info.mp[i] != NULL) { ++ ssam_mempool_destroy(g_ssam_info.mp[i]); ++ g_ssam_info.mp[i] = NULL; ++ } ++ } ++ ++ memset(&g_ssam_info, 0x0, sizeof(struct spdk_ssam_info)); ++} ++ ++ ++static int ++ssam_check_device_status(void) ++{ ++ uint8_t ready = 0; ++ int times = 0; ++ int rc; ++ ++ do { ++ rc = ssam_check_device_ready(0, 0, &ready); ++ if (rc != 0) { ++ SPDK_ERRLOG("device check failed.\n"); ++ return rc; ++ } ++ ++ if (ready != 0) { ++ break; ++ } ++ ++ usleep(DEVICE_READY_WAIT_TIME); ++ times++; ++ } while (times < DEVICE_READY_TIMEOUT); ++ ++ if (ready == 0) { ++ SPDK_ERRLOG("device has not been ready after 1.5s.\n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++ ++static int ++ssam_init(void) ++{ ++ int rc; ++ ++ rc = ssam_check_device_status(); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ rc = ssam_config_init(); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ rc = ssam_server_init(); ++ if (rc != 0) { ++ ssam_config_exit(); ++ return rc; ++ } ++ ++ rc = ssam_smdev_init(); ++ if (rc != 0) { ++ ssam_server_exit(); ++ ssam_config_exit(); ++ } ++ ++ return rc; ++} ++ ++void ++spdk_ssam_exit(void) ++{ ++ ssam_deinit_device_pcie_list(); ++ ssam_config_exit(); ++ ssam_server_exit(); ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam) +diff --git a/lib/ssam/ssam_blk.c b/lib/ssam/ssam_blk.c +new file mode 100644 +index 0000000..98fd56e +--- /dev/null ++++ b/lib/ssam/ssam_blk.c +@@ -0,0 +1,2346 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include ++#include ++ ++#include "spdk/env.h" ++#include "spdk/bdev.h" ++#include "spdk/bdev_module.h" ++#include "spdk/thread.h" ++#include "spdk/likely.h" ++#include "spdk/string.h" ++#include "spdk/util.h" ++ ++#include "ssam_internal.h" ++ ++#define SESSION_STOP_POLLER_PERIOD 1000 ++#define ENQUEUE_TIMES_PER_IO 1000 ++ ++#define IOV_HEADER_TAIL_NUM 2 ++ ++#define SECTOR_SIZE 512 ++#define ALIGNMENT_2M (2048 * 1024) ++#define SERIAL_STRING_LEN 128 ++#define SMSESSION_STOP_TIMEOUT 2 /* s */ ++#define PERF_STAT ++ ++/* Related to (SPDK_SSAM_IOVS_MAX * SPDK_SSAM_MAX_SEG_SIZE) */ ++#define PAYLOAD_SIZE_MAX (2048U * 2048) ++ ++#define RETRY_TIMEOUT 120 ++ ++/* Minimal set of features supported by every virtio-blk device */ ++#define SPDK_SSAM_BLK_FEATURES_BASE (SPDK_SSAM_FEATURES | \ ++ (1ULL << VIRTIO_BLK_F_SIZE_MAX) | (1ULL << VIRTIO_BLK_F_SEG_MAX) | \ ++ (1ULL << VIRTIO_BLK_F_GEOMETRY) | (1ULL << VIRTIO_BLK_F_BLK_SIZE) | \ ++ (1ULL << VIRTIO_BLK_F_TOPOLOGY) | (1ULL << VIRTIO_BLK_F_BARRIER) | \ ++ (1ULL << VIRTIO_BLK_F_SCSI) | (1ULL << VIRTIO_BLK_F_CONFIG_WCE) | \ ++ (1ULL << VIRTIO_BLK_F_MQ)) ++ ++extern bool g_ssam_subsystem_exit; ++ ++static int gfunc_session_number = 0; ++extern int delete_flag; ++extern int delete_dev_times[2000]; ++static uint8_t g_blk_set_times[2000] = {0}; ++ ++struct ssam_task_stat { ++ uint64_t start_tsc; ++ uint64_t dma_start_tsc; ++ uint64_t dma_end_tsc; ++ uint64_t bdev_start_tsc; ++ uint64_t bdev_func_tsc; ++ uint64_t bdev_end_tsc; ++ uint64_t complete_start_tsc; ++ uint64_t complete_end_tsc; ++}; ++ ++struct spdk_ssam_blk_task { ++ /* Returned status of I/O processing, it can be VIRTIO_BLK_S_OK, ++ * VIRTIO_BLK_S_IOERR or VIRTIO_BLK_S_UNSUPP ++ */ ++ volatile uint8_t *status; ++ ++ /* Number of bytes processed successfully */ ++ uint32_t used_len; ++ ++ /* Records the amount of valid data in the struct iovec iovs array. */ ++ uint32_t iovcnt; ++ struct ssam_iovec iovs; ++ ++ /* If set, the task is currently used for I/O processing. */ ++ bool used; ++ ++ /* For bdev io wait */ ++ struct spdk_bdev_io_wait_entry bdev_io_wait; ++ struct spdk_ssam_session_io_wait session_io_wait; ++ struct spdk_ssam_blk_session *bsmsession; ++ ++ /* Size of whole payload in bytes */ ++ uint32_t payload_size; ++ ++ /* ssam request data */ ++ struct ssam_request *io_req; ++ ++ uint16_t vq_idx; ++ uint16_t req_idx; ++ uint16_t task_idx; ++ struct ssam_task_stat task_stat; ++}; ++ ++struct ssam_blk_stat { ++ uint64_t count; ++ uint64_t start_count; ++ uint64_t total_tsc; /* pre_dma <- -> post_return */ ++ uint64_t dma_tsc; /* pre_dma <- -> post_dma */ ++ uint64_t dma_count; ++ uint64_t dma_complete_count; ++ uint64_t bdev_tsc; /* pre_bdev <- -> post_bdev */ ++ uint64_t bdev_submit_tsc; /* <- spdk_bdev_xxx -> */ ++ uint64_t bdev_count; ++ uint64_t bdev_complete_count; ++ uint64_t complete_tsc; /* pre_return <- -> post_return */ ++ uint64_t internel_tsc; /* total_tsc - dma_tsc - bdev_tsc - complete_tsc */ ++ ++ uint64_t complete_read_ios; /* Number of successfully completed read requests */ ++ uint64_t err_read_ios; /* Number of failed completed read requests */ ++ uint64_t complete_write_ios; /* Number of successfully completed write requests */ ++ uint64_t err_write_ios; /* Number of failed completed write requests */ ++ uint64_t flush_ios; /* Total number of flush requests */ ++ uint64_t complete_flush_ios; /* Number of successfully completed flush requests */ ++ uint64_t err_flush_ios; /* Number of failed completed flush requests */ ++ uint64_t other_ios; ++ uint64_t complete_other_ios; ++ uint64_t err_other_ios; ++ uint64_t fatal_ios; /* Number of discarded requests */ ++ uint64_t io_retry; ++}; ++ ++struct spdk_ssam_blk_session { ++ /* The parent session must be the very first field in this struct */ ++ struct spdk_ssam_session smsession; ++ struct spdk_poller *stop_poller; ++ struct spdk_bdev *bdev; ++ struct spdk_bdev_desc *bdev_desc; ++ struct spdk_io_channel *io_channel; ++ ++ /* volume id */ ++ char *serial; ++ ++ /* accumulated I/O statistics */ ++ struct spdk_bdev_io_stat stat; ++ ++ /* Current count of bdev operations for hot-restart. */ ++ int32_t bdev_count; ++ ++ /* poller for waiting bdev finish when hot-restart */ ++ struct spdk_poller *stop_bdev_poller; ++ struct spdk_poller *stop_session_poller; ++ ++ /* controller statistics. */ ++ struct ssam_blk_stat blk_stat; ++ ++ /* accumulated I/O statistics */ ++ struct spdk_bdev_io_stat vq_stat[SPDK_SSAM_MAX_VQUEUES]; ++ ++ /* controller vq statistics. */ ++ struct ssam_blk_stat vq_blk_stat[SPDK_SSAM_MAX_VQUEUES]; ++ ++ /* if set, all writes to the device will fail with ++ * VIRTIO_BLK_S_IOERR error code ++ */ ++ bool readonly; ++ ++ /* if set, indicate the session not have a bdev, all writes to the device ++ * will fail with VIRTIO_BLK_S_IOERR error code ++ */ ++ bool no_bdev; ++ ++ bool need_write_config; ++}; ++ ++struct ssam_blk_session_ctx { ++ struct spdk_ssam_blk_session *bsmsession; ++ void **user_ctx; ++}; ++ ++static const struct spdk_ssam_session_backend g_ssam_blk_session_backend; ++static int ssam_blk_remove_session(struct spdk_ssam_session *smsession); ++static void ssam_blk_request_worker(struct spdk_ssam_session *smsession, void *arg); ++static void ssam_blk_destroy_bdev_device(struct spdk_ssam_session *smsession, void *args); ++static void ssam_blk_response_worker(struct spdk_ssam_session *smsession, void *arg); ++static void ssam_blk_no_data_request_worker(struct spdk_ssam_session *smsession); ++static inline void ssam_request_queue_io(struct spdk_ssam_blk_task *task); ++static void ssam_task_complete(struct spdk_ssam_blk_task *task, uint8_t status); ++static void ssam_data_request_para(struct ssam_dma_request *dma_req, ++ struct spdk_ssam_blk_task *task, uint32_t type, uint8_t status); ++static void ssam_blk_print_stuck_io_info(struct spdk_ssam_session *smsession); ++static int ssam_process_blk_request(struct spdk_ssam_blk_task *task); ++static void ssam_free_task_pool(struct spdk_ssam_blk_session *bsmsession); ++static int ssam_blk_io_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, ++ uint8_t status); ++static void ssam_session_io_resubmit(void *arg); ++ ++static inline struct spdk_ssam_blk_session * ++ssam_to_blk_session(struct spdk_ssam_session *smsession) ++{ ++ return (struct spdk_ssam_blk_session *)smsession; ++} ++ ++static void ++ssam_blk_dump_info_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "name", ssam_session_get_name(smsession)); ++ spdk_json_write_named_uint32(w, "function_id", (uint32_t)smsession->gfunc_id); ++ spdk_json_write_named_uint32(w, "queues", (uint32_t)smsession->max_queues); ++ ++ spdk_json_write_named_object_begin(w, "block"); ++ spdk_json_write_named_bool(w, "readonly", bsmsession->readonly); ++ spdk_json_write_name(w, "bdev"); ++ if (bsmsession->bdev != NULL) { ++ spdk_json_write_string(w, spdk_bdev_get_name(bsmsession->bdev)); ++ } else { ++ spdk_json_write_null(w); ++ } ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_dev_bdev_remove_cpl_cb(struct spdk_ssam_session *smsession, void **unnused) ++{ ++ /* All sessions have been notified, time to close the bdev */ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ if (bsmsession == NULL) { ++ return; ++ } ++ ++ if (bsmsession->bdev_desc != NULL) { ++ spdk_bdev_close(bsmsession->bdev_desc); ++ bsmsession->bdev_desc = NULL; ++ } ++ ++ /* bdev not create by ssam blk, no need be freed here */ ++ bsmsession->bdev = NULL; ++} ++ ++static void ++ssam_blk_stop_cpl_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ spdk_ssam_session_rsp_fn rsp_fn = smsession->rsp_fn; ++ void *rsp_ctx = smsession->rsp_ctx; ++ int rc; ++ delete_dev_times[smsession->gfunc_id]++; ++ ++ ssam_dev_bdev_remove_cpl_cb(smsession, NULL); ++ ++ if (delete_dev_times[smsession->gfunc_id] == ssam_get_core_num()) { ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, 0); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed when remove session.\n", smsession->name); ++ } ++ ++ if (ssam_get_hash_mode() == SSAM_VQ_HASH_MODE) { ++ rc = ssam_virtio_vq_unbind_core(smsession->gfunc_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk vq unbind core failed.\n", smsession->name); ++ } ++ } ++ ++ if (smsession->gfunc_id > SSAM_PF_MAX_NUM) { ++ rc = ssam_virtio_blk_release_resource(smsession->gfunc_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk release vq failed.\n", smsession->name); ++ } ++ } ++ ++ SPDK_NOTICELOG("BLK controller %s deleted\n", smsession->name); ++ } ++ ++ if (smsession->name != NULL) { ++ free(smsession->name); ++ smsession->name = NULL; ++ } ++ ++ ssam_set_session_be_freed(ctx); ++ memset(bsmsession, 0, sizeof(*bsmsession)); ++ free(bsmsession); ++ ++ if (rsp_fn != NULL) { ++ rsp_fn(rsp_ctx, 0); ++ rsp_fn = NULL; ++ } ++} ++ ++static void ++ssam_task_stat_tick(uint64_t *tsc) ++{ ++#ifdef PERF_STAT ++ *tsc = spdk_get_ticks(); ++#endif ++ return; ++} ++ ++static void ++ssam_blk_stat_statistics(struct spdk_ssam_blk_task *task, struct spdk_bdev_io_stat *stat, ++ struct ssam_blk_stat *blk_stat, uint8_t status) ++{ ++#ifdef PERF_STAT ++ uint64_t dma_tsc = task->task_stat.dma_end_tsc - task->task_stat.dma_start_tsc; ++ uint64_t bdev_tsc = task->task_stat.bdev_end_tsc - task->task_stat.bdev_start_tsc; ++ uint64_t bdev_submit_tsc = task->task_stat.bdev_func_tsc - task->task_stat.bdev_start_tsc; ++ uint64_t complete_tsc = task->task_stat.complete_end_tsc - task->task_stat.complete_start_tsc; ++ uint64_t total_tsc = task->task_stat.complete_end_tsc - task->task_stat.start_tsc; ++ struct virtio_blk_outhdr *req = (struct virtio_blk_outhdr *)task->io_req->req.cmd.header; ++ ++ if (req->type == VIRTIO_BLK_T_IN) { /* read */ ++ stat->read_latency_ticks += total_tsc; ++ stat->bytes_read += task->payload_size; ++ stat->num_read_ops++; ++ if (status == VIRTIO_BLK_S_OK) { ++ blk_stat->complete_read_ios++; ++ } else { ++ blk_stat->err_read_ios++; ++ } ++ } else if (req->type == VIRTIO_BLK_T_OUT) { /* write */ ++ stat->write_latency_ticks += total_tsc; ++ stat->bytes_written += task->payload_size; ++ stat->num_write_ops++; ++ if (status == VIRTIO_BLK_S_OK) { ++ blk_stat->complete_write_ios++; ++ } else { ++ blk_stat->err_write_ios++; ++ } ++ } else if (req->type == VIRTIO_BLK_T_FLUSH) { /* flush */ ++ blk_stat->flush_ios++; ++ if (status == VIRTIO_BLK_S_OK) { ++ blk_stat->complete_flush_ios++; ++ } else { ++ blk_stat->err_flush_ios++; ++ } ++ } else { ++ blk_stat->other_ios++; ++ if (status == VIRTIO_BLK_S_OK) { ++ blk_stat->complete_other_ios++; ++ } else { ++ blk_stat->err_other_ios++; ++ } ++ } ++ ++ blk_stat->dma_tsc += dma_tsc; ++ blk_stat->bdev_tsc += bdev_tsc; ++ blk_stat->bdev_submit_tsc += bdev_submit_tsc; ++ blk_stat->complete_tsc += complete_tsc; ++ blk_stat->total_tsc += total_tsc; ++ blk_stat->internel_tsc += total_tsc - complete_tsc - bdev_tsc - dma_tsc; ++ blk_stat->count += 1; ++#endif ++} ++ ++static void ++ssam_blk_configs(uint8_t *config, struct virtio_blk_config *blkcfg, ++ uint32_t len, struct spdk_bdev *bdev) ++{ ++ uint32_t cfg_len; ++ ++ /* minimum I/O size in blocks */ ++ blkcfg->min_io_size = 1; ++ ++ if (bdev && spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) { ++ /* 32768 sectors is 16MiB, expressed in 512 Bytes */ ++ blkcfg->max_discard_sectors = 32768; ++ blkcfg->max_discard_seg = 1; ++ /* expressed in 512 Bytes sectors */ ++ blkcfg->discard_sector_alignment = blkcfg->blk_size / SECTOR_SIZE; ++ } ++ if (bdev && spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES)) { ++ /* 32768 sectors is 16MiB, expressed in 512 Bytes */ ++ blkcfg->max_write_zeroes_sectors = 32768; ++ blkcfg->max_write_zeroes_seg = 1; ++ } ++ ++ cfg_len = sizeof(struct virtio_blk_config); ++ memcpy(config, blkcfg, (unsigned long)spdk_min(len, cfg_len)); ++ if (len < cfg_len) { ++ SPDK_NOTICELOG("Out config len %u < total config len %u\n", len, cfg_len); ++ } ++ ++ return; ++} ++ ++static int ++ssam_blk_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues) ++{ ++ struct virtio_blk_config blkcfg; ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ uint32_t blk_size; ++ uint64_t blkcnt; ++ ++ memset(&blkcfg, 0, sizeof(blkcfg)); ++ bsmsession = ssam_to_blk_session(smsession); ++ if (bsmsession == NULL) { ++ SPDK_ERRLOG("session is null.\n"); ++ return -1; ++ } ++ bdev = bsmsession->bdev; ++ if (bdev == NULL) { ++ return -1; ++ } ++ blk_size = spdk_bdev_get_block_size(bdev); ++ blkcnt = spdk_bdev_get_num_blocks(bdev); ++ /* ssam will use this configuration, this is the max capability of ++ * the ssam, configurations will be obtained through negotiation ++ * in the future. ++ */ ++ blkcfg.size_max = SPDK_SSAM_MAX_SEG_SIZE; ++ blkcfg.seg_max = SPDK_SSAM_IOVS_MAX; ++ ++ if (blk_size == 0) { ++ SPDK_ERRLOG("bdev's blk_size %u error.\n", blk_size); ++ return -1; ++ } ++ if (blkcnt > (UINT64_MAX / blk_size)) { ++ SPDK_ERRLOG("bdev's blkcnt %lu or blk_size %u out of range.\n", ++ blkcnt, blk_size); ++ return -1; ++ } ++ blkcfg.blk_size = blk_size; ++ /* expressed in 512 Bytes sectors */ ++ blkcfg.capacity = (blkcnt * blk_size) / 512; ++ blkcfg.num_queues = 1; ++ ssam_blk_configs(config, &blkcfg, len, bdev); ++ ++ return 0; ++} ++ ++static void ++ssam_blk_write_config_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ if (bsmsession == NULL || bsmsession->bdev == NULL || bsmsession->need_write_config != true) { ++ return; ++ } ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "method", "create_blk_controller"); ++ ++ spdk_json_write_named_object_begin(w, "params"); ++ spdk_json_write_named_string(w, "dev_name", spdk_bdev_get_name(bsmsession->bdev)); ++ char *gfunc_id = spdk_sprintf_alloc("%u", bsmsession->smsession.gfunc_id); ++ if (gfunc_id == NULL) { ++ SPDK_ERRLOG("alloc for gfunc_id failed\n"); ++ } else { ++ spdk_json_write_named_string(w, "index", gfunc_id); ++ free(gfunc_id); ++ } ++ spdk_json_write_named_bool(w, "readonly", bsmsession->readonly); ++ if (bsmsession->serial != NULL) { ++ spdk_json_write_named_string(w, "serial", bsmsession->serial); ++ } ++ if (bsmsession->smsession.gfunc_id > SSAM_PF_MAX_NUM) { ++ spdk_json_write_named_int32(w, "vqueue", (int32_t)bsmsession->smsession.max_queues); ++ } ++ ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_blk_print_iostat_json(struct spdk_ssam_session *smsession, uint16_t vq_idx, ++ struct spdk_bdev_io_stat *stat, ++ struct ssam_blk_stat *blk_stat, struct spdk_json_write_ctx *w) ++{ ++ struct spdk_bdev *bdev = ssam_get_session_bdev(smsession); ++ uint64_t ticks_hz = spdk_get_ticks_hz(); ++ uint64_t poll_count = smsession->smdev->stat.poll_count; ++ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_uint32(w, "function_id", smsession->gfunc_id); ++ if (vq_idx != SPDK_INVALID_ID) { ++ spdk_json_write_named_uint32(w, "vq_idx", vq_idx); ++ spdk_json_write_named_uint32(w, "tid", smsession->smdev->tid); ++ } ++ if (smsession->smdev->stat.poll_count == 0) { ++ poll_count = 1; ++ } ++ spdk_json_write_named_string_fmt(w, "poll_lat", "%.9f", ++ (float)smsession->smdev->stat.poll_tsc / poll_count / ticks_hz); ++ spdk_json_write_named_string(w, "bdev_name", (bdev == NULL) ? "" : spdk_bdev_get_name(bdev)); ++ spdk_json_write_named_uint64(w, "bytes_read", stat->bytes_read); ++ spdk_json_write_named_uint64(w, "num_read_ops", stat->num_read_ops); ++ spdk_json_write_named_uint64(w, "bytes_written", stat->bytes_written); ++ spdk_json_write_named_uint64(w, "num_write_ops", stat->num_write_ops); ++ spdk_json_write_named_uint64(w, "read_latency_ticks", stat->read_latency_ticks); ++ spdk_json_write_named_uint64(w, "write_latency_ticks", stat->write_latency_ticks); ++ spdk_json_write_named_uint64(w, "complete_read_ios", blk_stat->complete_read_ios); ++ spdk_json_write_named_uint64(w, "err_read_ios", blk_stat->err_read_ios); ++ spdk_json_write_named_uint64(w, "complete_write_ios", blk_stat->complete_write_ios); ++ spdk_json_write_named_uint64(w, "err_write_ios", blk_stat->err_write_ios); ++ spdk_json_write_named_uint64(w, "flush_ios", blk_stat->flush_ios); ++ spdk_json_write_named_uint64(w, "complete_flush_ios", blk_stat->complete_flush_ios); ++ spdk_json_write_named_uint64(w, "err_flush_ios", blk_stat->err_flush_ios); ++ spdk_json_write_named_uint64(w, "other_ios", blk_stat->other_ios); ++ spdk_json_write_named_uint64(w, "complete_other_ios", blk_stat->complete_other_ios); ++ spdk_json_write_named_uint64(w, "err_other_ios", blk_stat->err_other_ios); ++ ++ spdk_json_write_named_uint64(w, "fatal_ios", blk_stat->fatal_ios); ++ spdk_json_write_named_uint64(w, "io_retry", blk_stat->io_retry); ++ spdk_json_write_named_object_begin(w, "counters"); ++ spdk_json_write_named_uint64(w, "start_count", blk_stat->start_count); ++ spdk_json_write_named_uint64(w, "dma_count", blk_stat->dma_count); ++ spdk_json_write_named_uint64(w, "dma_complete_count", blk_stat->dma_complete_count); ++ spdk_json_write_named_uint64(w, "bdev_count", blk_stat->bdev_count); ++ spdk_json_write_named_uint64(w, "bdev_complete_count", blk_stat->bdev_complete_count); ++ spdk_json_write_object_end(w); ++ spdk_json_write_named_object_begin(w, "details"); ++ spdk_json_write_named_uint64(w, "count", blk_stat->count); ++ if (blk_stat->count == 0) { ++ blk_stat->count = 1; ++ } ++ spdk_json_write_named_string_fmt(w, "total_lat", "%.9f", ++ (float)blk_stat->total_tsc / blk_stat->count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "dma_lat", "%.9f", ++ (float)blk_stat->dma_tsc / blk_stat->count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "bdev_lat", "%.9f", ++ (float)blk_stat->bdev_tsc / blk_stat->count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "bdev_submit_lat", "%.9f", ++ (float)blk_stat->bdev_submit_tsc / blk_stat->count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "complete_lat", "%.9f", ++ (float)blk_stat->complete_tsc / blk_stat->count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "internal_lat", "%.9f", ++ (float)blk_stat->internel_tsc / blk_stat->count / ticks_hz); ++ spdk_json_write_object_end(w); ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_blk_function_iostat_sum(uint16_t gfunc_id, struct spdk_bdev_io_stat *stat, ++ struct ssam_blk_stat *blk_stat) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ if (smdev->smsessions[gfunc_id] != NULL) { ++ bsmsession = ssam_to_blk_session(smdev->smsessions[gfunc_id]); ++ stat->bytes_read += bsmsession->stat.bytes_read; ++ stat->num_read_ops += bsmsession->stat.num_read_ops; ++ stat->bytes_written += bsmsession->stat.bytes_written; ++ stat->num_write_ops += bsmsession->stat.num_write_ops; ++ stat->bytes_unmapped += bsmsession->stat.bytes_unmapped; ++ stat->num_unmap_ops += bsmsession->stat.num_unmap_ops; ++ stat->bytes_copied += bsmsession->stat.bytes_copied; ++ stat->num_copy_ops += bsmsession->stat.num_copy_ops; ++ stat->read_latency_ticks += bsmsession->stat.read_latency_ticks; ++ stat->max_read_latency_ticks += bsmsession->stat.max_read_latency_ticks; ++ stat->min_read_latency_ticks += bsmsession->stat.min_read_latency_ticks; ++ stat->write_latency_ticks += bsmsession->stat.write_latency_ticks; ++ stat->max_write_latency_ticks += bsmsession->stat.max_write_latency_ticks; ++ stat->min_write_latency_ticks += bsmsession->stat.min_write_latency_ticks; ++ stat->unmap_latency_ticks += bsmsession->stat.unmap_latency_ticks; ++ stat->max_unmap_latency_ticks += bsmsession->stat.max_unmap_latency_ticks; ++ stat->min_unmap_latency_ticks += bsmsession->stat.min_unmap_latency_ticks; ++ stat->copy_latency_ticks += bsmsession->stat.copy_latency_ticks; ++ stat->max_copy_latency_ticks += bsmsession->stat.max_copy_latency_ticks; ++ stat->min_copy_latency_ticks += bsmsession->stat.min_copy_latency_ticks; ++ stat->ticks_rate += bsmsession->stat.ticks_rate; ++ ++ blk_stat->count += bsmsession->blk_stat.count; ++ blk_stat->start_count += bsmsession->blk_stat.start_count; ++ blk_stat->total_tsc += bsmsession->blk_stat.total_tsc; ++ blk_stat->dma_tsc += bsmsession->blk_stat.dma_tsc; ++ blk_stat->dma_count += bsmsession->blk_stat.dma_count; ++ blk_stat->dma_complete_count += bsmsession->blk_stat.dma_complete_count; ++ blk_stat->bdev_tsc += bsmsession->blk_stat.bdev_tsc; ++ blk_stat->bdev_submit_tsc += bsmsession->blk_stat.bdev_submit_tsc; ++ blk_stat->bdev_count += bsmsession->blk_stat.bdev_count; ++ blk_stat->bdev_complete_count += bsmsession->blk_stat.bdev_complete_count; ++ blk_stat->complete_tsc += bsmsession->blk_stat.complete_tsc; ++ blk_stat->internel_tsc += bsmsession->blk_stat.internel_tsc; ++ blk_stat->complete_read_ios += bsmsession->blk_stat.complete_read_ios; ++ blk_stat->err_read_ios += bsmsession->blk_stat.err_read_ios; ++ blk_stat->complete_write_ios += bsmsession->blk_stat.complete_write_ios; ++ blk_stat->err_write_ios += bsmsession->blk_stat.err_write_ios; ++ blk_stat->flush_ios += bsmsession->blk_stat.flush_ios; ++ blk_stat->complete_flush_ios += bsmsession->blk_stat.complete_flush_ios; ++ blk_stat->err_flush_ios += bsmsession->blk_stat.err_flush_ios; ++ blk_stat->other_ios += bsmsession->blk_stat.other_ios; ++ blk_stat->complete_other_ios += bsmsession->blk_stat.complete_other_ios; ++ blk_stat->err_other_ios += bsmsession->blk_stat.err_other_ios; ++ blk_stat->fatal_ios += bsmsession->blk_stat.fatal_ios; ++ blk_stat->io_retry += bsmsession->blk_stat.io_retry; ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ return; ++} ++ ++static void ++ssam_blk_show_iostat_json(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_show_iostat_args *args, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ struct spdk_bdev_io_stat stat = {0}; ++ struct ssam_blk_stat blk_stat = {0}; ++ ++ switch (args->mode) { ++ case SSAM_IOSTAT_NORMAL: ++ if (args->id == SPDK_INVALID_ID) { ++ memcpy(&stat, &bsmsession->stat, sizeof(struct spdk_bdev_io_stat)); ++ memcpy(&blk_stat, &bsmsession->blk_stat, sizeof(struct ssam_blk_stat)); ++ } else { ++ memcpy(&stat, &bsmsession->vq_stat[args->id], sizeof(struct spdk_bdev_io_stat)); ++ memcpy(&blk_stat, &bsmsession->vq_blk_stat[args->id], sizeof(struct ssam_blk_stat)); ++ } ++ ssam_blk_print_iostat_json(smsession, args->id, &stat, &blk_stat, w); ++ break; ++ case SSAM_IOSTAT_SUM: ++ if (bsmsession->need_write_config == false) { ++ return; ++ } ++ ssam_blk_function_iostat_sum(smsession->gfunc_id, &stat, &blk_stat); ++ ssam_blk_print_iostat_json(smsession, args->id, &stat, &blk_stat, w); ++ break; ++ case SSAM_IOSTAT_DUMP_VQ: ++ for (int i = 0; i < smsession->max_queues; i++) { ++ if (bsmsession->vq_blk_stat[i].start_count == 0) { ++ continue; ++ } ++ ssam_blk_print_iostat_json(smsession, i, &bsmsession->vq_stat[i], &bsmsession->vq_blk_stat[i], w); ++ } ++ break; ++ case SSAM_IOSTAT_SPARSE: ++ if (args->id == SPDK_INVALID_ID) { ++ memcpy(&stat, &bsmsession->stat, sizeof(struct spdk_bdev_io_stat)); ++ memcpy(&blk_stat, &bsmsession->blk_stat, sizeof(struct ssam_blk_stat)); ++ } else { ++ memcpy(&stat, &bsmsession->vq_stat[args->id], sizeof(struct spdk_bdev_io_stat)); ++ memcpy(&blk_stat, &bsmsession->vq_blk_stat[args->id], sizeof(struct ssam_blk_stat)); ++ } ++ if (blk_stat.start_count == 0) { ++ return; ++ } ++ ssam_blk_print_iostat_json(smsession, args->id, &stat, &blk_stat, w); ++ break; ++ default: ++ break; ++ } ++ return; ++} ++ ++static void ++ssam_blk_clear_iostat_json(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ memset(&bsmsession->stat, 0, sizeof(struct spdk_bdev_io_stat) - sizeof( ++ uint64_t)); /* exclude ticks_rate */ ++ memset(&bsmsession->blk_stat, 0, sizeof(struct ssam_blk_stat)); ++ for (int i = 0; i < smsession->max_queues; i++) { ++ memset(&bsmsession->vq_stat[i], 0, ++ sizeof(struct spdk_bdev_io_stat) - sizeof(uint64_t)); /* exclude ticks_rate */ ++ memset(&bsmsession->vq_blk_stat[i], 0, sizeof(struct ssam_blk_stat)); ++ } ++} ++ ++static struct spdk_bdev * ++ssam_blk_get_bdev(struct spdk_ssam_session *smsession, uint32_t id) ++{ ++ struct spdk_bdev *bdev = ssam_get_session_bdev(smsession); ++ ++ return bdev; ++} ++ ++static const struct spdk_ssam_session_backend g_ssam_blk_session_backend = { ++ .type = VIRTIO_TYPE_BLK, ++ .remove_session = ssam_blk_remove_session, ++ .request_worker = ssam_blk_request_worker, ++ .destroy_bdev_device = ssam_blk_destroy_bdev_device, ++ .response_worker = ssam_blk_response_worker, ++ .no_data_req_worker = ssam_blk_no_data_request_worker, ++ .ssam_get_config = ssam_blk_get_config, ++ .print_stuck_io_info = ssam_blk_print_stuck_io_info, ++ .dump_info_json = ssam_blk_dump_info_json, ++ .write_config_json = ssam_blk_write_config_json, ++ .show_iostat_json = ssam_blk_show_iostat_json, ++ .clear_iostat_json = ssam_blk_clear_iostat_json, ++ .get_bdev = ssam_blk_get_bdev, ++ .remove_self = NULL, ++}; ++ ++/* Clean Smsession */ ++static int ++ssam_destroy_poller_cb(void *arg) ++{ ++ struct spdk_ssam_blk_session *bsmsession = (struct spdk_ssam_blk_session *)arg; ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ struct spdk_ssam_dev *smdev = smsession->smdev; ++ ++ SPDK_NOTICELOG("%s: remaining %u tasks\n", smsession->name, smsession->task_cnt); ++ ++ /* stop poller */ ++ spdk_poller_unregister(&bsmsession->stop_bdev_poller); ++ ++ /* remove session */ ++ ssam_sessions_remove(smdev->smsessions, smsession); ++ if (smdev->active_session_num > 0) { ++ smdev->active_session_num--; ++ } ++ smsession->smdev = NULL; ++ ++ /* put ioChannle */ ++ if (bsmsession->io_channel != NULL) { ++ spdk_put_io_channel(bsmsession->io_channel); ++ bsmsession->io_channel = NULL; ++ } ++ ++ /* close bdev device, last step, async */ ++ ssam_send_dev_destroy_msg(smsession, NULL); ++ ++ /* free smsession not here, but after close bdev device; ++ * see ssam_blk_destroy_bdev_device() ++ */ ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static int ++ssam_session_bdev_remove_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ int rc = 0; ++ ++ /* smsession already removed */ ++ if (!smsession->started) { ++ return 0; ++ } else { ++ smsession->started = false; ++ } ++ ++ bsmsession->stop_bdev_poller = SPDK_POLLER_REGISTER(ssam_destroy_poller_cb, ++ bsmsession, 0); ++ ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, 0); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed when remove session.\n", smsession->name); ++ } ++ ++ if (ssam_get_hash_mode() == SSAM_VQ_HASH_MODE) { ++ rc = ssam_virtio_vq_unbind_core(smsession->gfunc_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk vq unbind core failed.\n", smsession->name); ++ } ++ } ++ ++ if (smsession->gfunc_id > SSAM_PF_MAX_NUM) { ++ rc = ssam_virtio_blk_release_resource(smsession->gfunc_id); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk release vq failed.\n", smsession->name); ++ } ++ } ++ ++ ssam_set_session_be_freed(ctx); ++ ssam_send_event_async_done(ctx); ++ ++ return 0; ++} ++ ++static void ++ssam_bdev_remove_cb(void *remove_ctx) ++{ ++ struct spdk_ssam_session *smsession = remove_ctx; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ ++ SPDK_WARNLOG("%s: hot-removing bdev - all further requests will be stucked.\n", ++ smsession->name); ++ ++ ssam_send_event_to_session(smsession, ssam_session_bdev_remove_cb, ++ NULL, send_event_flag, NULL); ++} ++ ++static void ++ssam_session_bdev_resize_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ int rc; ++ ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, bsmsession->bdev->blockcnt); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed.\n", smsession->name); ++ } ++} ++ ++static void ++ssam_blk_resize_cb(void *resize_ctx) ++{ ++ struct spdk_ssam_session *smsession = resize_ctx; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ ++ ssam_send_event_to_session(smsession, NULL, ssam_session_bdev_resize_cb, send_event_flag, NULL); ++} ++ ++static void ++ssam_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, ++ void *event_ctx) ++{ ++ SPDK_DEBUGLOG(ssam_blk, "Bdev event: type %d, name %s\n", ++ type, bdev->name); ++ ++ switch (type) { ++ case SPDK_BDEV_EVENT_REMOVE: ++ SPDK_NOTICELOG("bdev name (%s) received event(SPDK_BDEV_EVENT_REMOVE)\n", ++ bdev->name); ++ ssam_bdev_remove_cb(event_ctx); ++ break; ++ case SPDK_BDEV_EVENT_RESIZE: ++ SPDK_NOTICELOG("bdev name (%s) received event(SPDK_BDEV_EVENT_RESIZE)\n", ++ bdev->name); ++ ssam_blk_resize_cb(event_ctx); ++ break; ++ default: ++ SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); ++ break; ++ } ++} ++ ++static void ++ssam_free_task_pool(struct spdk_ssam_blk_session *bsmsession) ++{ ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = NULL; ++ uint16_t max_queues = smsession->max_queues; ++ uint16_t i; ++ ++ if (max_queues > SPDK_SSAM_MAX_VQUEUES) { ++ return; ++ } ++ ++ for (i = 0; i < max_queues; i++) { ++ vq = &smsession->virtqueue[i]; ++ if (vq->tasks != NULL) { ++ spdk_free(vq->tasks); ++ vq->tasks = NULL; ++ } ++ ++ if (vq->index != NULL) { ++ spdk_free(vq->index); ++ vq->index = NULL; ++ } ++ } ++} ++ ++static int ++ssam_alloc_task_pool(struct spdk_ssam_blk_session *bsmsession) ++{ ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = NULL; ++ struct spdk_ssam_blk_task *task = NULL; ++ uint16_t max_queues = smsession->max_queues; ++ uint32_t task_cnt = smsession->queue_size; ++ uint16_t i; ++ uint32_t j; ++ ++ if ((max_queues > SPDK_SSAM_MAX_VQUEUES) || (max_queues == 0)) { ++ SPDK_ERRLOG("%s: max_queues %u invalid\n", smsession->name, max_queues); ++ return -EINVAL; ++ } ++ ++ if ((task_cnt == 0) || (task_cnt > SPDK_SSAM_MAX_VQ_SIZE)) { ++ SPDK_ERRLOG("%s: virtuque size %u invalid\n", smsession->name, task_cnt); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < max_queues; i++) { ++ vq = &smsession->virtqueue[i]; ++ vq->smsession = smsession; ++ vq->num = task_cnt; ++ vq->use_num = 0; ++ vq->index_l = 0; ++ vq->index_r = 0; ++ vq->tasks = spdk_zmalloc(sizeof(struct spdk_ssam_blk_task) * task_cnt, ++ SPDK_CACHE_LINE_SIZE, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ vq->index = spdk_zmalloc(sizeof(uint32_t) * task_cnt, ++ SPDK_CACHE_LINE_SIZE, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ if (vq->tasks == NULL || vq->index == NULL) { ++ SPDK_ERRLOG("%s: failed to allocate %"PRIu32" tasks for virtqueue %"PRIu16"\n", ++ smsession->name, task_cnt, i); ++ ssam_free_task_pool(bsmsession); ++ return -ENOMEM; ++ } ++ for (j = 0; j < task_cnt; j++) { ++ task = &((struct spdk_ssam_blk_task *)vq->tasks)[j]; ++ task->bsmsession = bsmsession; ++ task->task_idx = j; ++ vq->index[j] = j; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_blk_print_stuck_io_info(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_task *tasks; ++ struct spdk_ssam_blk_task *task; ++ int i, j; ++ ++ for (i = 0; i < smsession->max_queues; i++) { ++ for (j = 0; j < smsession->queue_size; j++) { ++ tasks = (struct spdk_ssam_blk_task *)smsession->virtqueue[i].tasks; ++ task = &tasks[j]; ++ if (task == NULL) { ++ continue; ++ } ++ if (task->used) { ++ SPDK_INFOLOG(ssam_blk, "%s: stuck io payload_size %u, vq_idx %u, req_idx %u\n", ++ smsession->name, task->payload_size, task->vq_idx, task->req_idx); ++ } ++ } ++ } ++} ++ ++static uint16_t ++get_req_idx(struct spdk_ssam_blk_task *task) ++{ ++ return task->io_req->req.cmd.virtio.req_idx; ++} ++ ++static void ++ssam_blk_task_init(struct spdk_ssam_blk_task *task) ++{ ++ task->used = true; ++ task->iovcnt = 0; ++ task->io_req = NULL; ++ task->payload_size = 0; ++ memset(&task->task_stat, 0, sizeof(task->task_stat)); ++ ssam_task_stat_tick(&task->task_stat.start_tsc); ++} ++ ++static void ++ssam_blk_task_finish(struct spdk_ssam_blk_task *task) ++{ ++ struct spdk_ssam_session *smsession = &task->bsmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[task->vq_idx]; ++ ++ if (smsession->task_cnt == 0) { ++ SPDK_ERRLOG("smsession %s: task internel error\n", smsession->name); ++ return; ++ } ++ ++ task->io_req = NULL; ++ task->payload_size = 0; ++ ++ if (task->iovs.virt.sges[0].iov_base != NULL) { ++ ssam_mempool_free(smsession->mp, task->iovs.virt.sges[0].iov_base); ++ task->iovs.virt.sges[0].iov_base = NULL; ++ } ++ ++ memset(&task->iovs, 0, sizeof(task->iovs)); ++ ++ task->iovcnt = 0; ++ smsession->task_cnt--; ++ task->used = false; ++ vq->index[vq->index_l] = task->task_idx; ++ vq->index_l = (vq->index_l + 1) & 0xFF; ++ vq->use_num--; ++} ++ ++static int ++ssam_blk_io_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, uint8_t status) ++{ ++ struct ssam_io_response io_resp; ++ struct ssam_virtio_res *virtio_res = (struct ssam_virtio_res *)&io_resp.data; ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ struct iovec io_vec; ++ uint8_t res_status = status; ++ int rc; ++ ++ if (status != VIRTIO_BLK_S_OK) { ++ SPDK_ERRLOG("ssam io complete return error tid=%u gfunc_id:%u.\n", smdev->tid, io_req->gfunc_id); ++ } ++ ++ memset(&io_resp, 0, sizeof(io_resp)); ++ io_resp.gfunc_id = io_req->gfunc_id; ++ io_resp.iocb_id = io_req->iocb_id; ++ io_resp.status = io_req->status; ++ io_resp.req = io_req; ++ io_resp.flr_seq = io_req->flr_seq; ++ ++ virtio_res->iovs = &io_vec; ++ virtio_res->iovs->iov_base = io_cmd->iovs[io_cmd->iovcnt - 1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[io_cmd->iovcnt - 1].iov_len; ++ virtio_res->iovcnt = 1; ++ virtio_res->rsp = &res_status; ++ virtio_res->rsp_len = sizeof(res_status); ++ ++ rc = ssam_io_complete(smdev->tid, &io_resp); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ ssam_dev_io_dec(smdev); ++ return 0; ++} ++ ++struct ssam_task_complete_arg { ++ struct spdk_ssam_blk_task *task; ++ uint8_t status; ++}; ++ ++static void ++ssam_task_complete_cb(void *arg) ++{ ++ struct ssam_task_complete_arg *cb_arg = (struct ssam_task_complete_arg *)arg; ++ struct spdk_ssam_session *smsession = &cb_arg->task->bsmsession->smsession; ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ struct spdk_ssam_blk_task *task = cb_arg->task; ++ int rc = ssam_blk_io_complete(smsession->smdev, task->io_req, cb_arg->status); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_task_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_task_stat_tick(&task->task_stat.complete_end_tsc); ++ ssam_blk_stat_statistics(task, &bsmsession->stat, &bsmsession->blk_stat, cb_arg->status); ++ ssam_blk_stat_statistics(task, &bsmsession->vq_stat[task->vq_idx], ++ &bsmsession->vq_blk_stat[task->vq_idx], ++ cb_arg->status); ++ ssam_blk_task_finish(task); ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_task_complete(struct spdk_ssam_blk_task *task, uint8_t status) ++{ ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ struct spdk_ssam_session *smsession = &task->bsmsession->smsession; ++ if (status != VIRTIO_BLK_S_OK) { ++ SPDK_ERRLOG("ssam task return error tid=%u gfunc_id:%u.\n", ++ smsession->smdev->tid, task->io_req->gfunc_id); ++ } ++ SPDK_INFOLOG(ssam_blk_data, "handled io tid=%u gfunc_id=%u rw=%u vqid=%u reqid=%u status=%u.\n", ++ smsession->smdev->tid, smsession->gfunc_id, task->io_req->req.cmd.writable, ++ task->io_req->req.cmd.virtio.vq_idx, task->io_req->req.cmd.virtio.req_idx, status); ++ ssam_task_stat_tick(&task->task_stat.complete_start_tsc); ++ int rc = ssam_blk_io_complete(smsession->smdev, task->io_req, status); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_task_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_task_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->status = status; ++ cb_arg->task = task; ++ io_wait_r->cb_fn = ssam_task_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_task_stat_tick(&task->task_stat.complete_end_tsc); ++ ssam_blk_stat_statistics(task, &bsmsession->stat, &bsmsession->blk_stat, status); ++ ssam_blk_stat_statistics(task, &bsmsession->vq_stat[task->vq_idx], ++ &bsmsession->vq_blk_stat[task->vq_idx], ++ status); ++ ssam_blk_task_finish(task); ++} ++ ++struct ssam_blk_dma_data_request_arg { ++ struct spdk_ssam_dev *smdev; ++ struct spdk_ssam_blk_task *task; ++ struct ssam_dma_request dma_req; ++}; ++ ++static void ++ssam_blk_dma_data_request_cb(void *arg) ++{ ++ struct ssam_blk_dma_data_request_arg *cb_arg = (struct ssam_blk_dma_data_request_arg *)arg; ++ int ret = ssam_dma_data_request(cb_arg->smdev->tid, &cb_arg->dma_req); ++ if (ret == -ENOMEM || ret == -EIO) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_blk_dma_data_request_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ if (ret < 0) { ++ SPDK_ERRLOG("%s: ssam dma data request failed:%s\n", ++ cb_arg->task->bsmsession->smsession.name, spdk_strerror(-ret)); ++ ssam_task_complete(cb_arg->task, VIRTIO_BLK_S_IOERR); ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_res_dma_process(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_blk_task *task, uint32_t type, uint8_t status) ++{ ++ struct ssam_dma_request dma_req = {0}; ++ uint16_t tid = smsession->smdev->tid; ++ int ret; ++ ++ ssam_data_request_para(&dma_req, task, type, status); ++ ssam_task_stat_tick(&task->task_stat.dma_start_tsc); ++ task->bsmsession->blk_stat.dma_count++; ++ task->bsmsession->vq_blk_stat[task->vq_idx].dma_count++; ++ ret = ssam_dma_data_request(tid, &dma_req); ++ if (ret == -ENOMEM || ret == -EIO) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_dma_data_request_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_dma_data_request_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->dma_req = dma_req; ++ cb_arg->task = task; ++ io_wait_r->cb_fn = ssam_blk_dma_data_request_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ++ if (ret < 0) { ++ SPDK_ERRLOG("%s: ssam dma data request failed:%s\n", smsession->name, spdk_strerror(-ret)); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ } ++} ++ ++static void ++ssam_blk_request_finish(bool success, struct spdk_ssam_blk_task *task) ++{ ++ uint8_t res_status = success ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR; ++ const struct virtio_blk_outhdr *req = NULL; ++ struct spdk_ssam_session *smsession = &task->bsmsession->smsession; ++ if (res_status != VIRTIO_BLK_S_OK) { ++ SPDK_ERRLOG("request finish return error gfunc_id=%u.\n", smsession->gfunc_id); ++ } ++ ++ req = (struct virtio_blk_outhdr *)task->io_req->req.cmd.header; ++ switch (req->type) { ++ case VIRTIO_BLK_T_IN: ++ case VIRTIO_BLK_T_GET_ID: ++ ssam_res_dma_process(smsession, task, SSAM_REQUEST_DATA_STORE, res_status); ++ break; ++ ++ case VIRTIO_BLK_T_OUT: ++ case VIRTIO_BLK_T_DISCARD: ++ case VIRTIO_BLK_T_WRITE_ZEROES: ++ case VIRTIO_BLK_T_FLUSH: ++ ssam_task_complete(task, res_status); ++ break; ++ ++ default: ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ SPDK_ERRLOG("Not supported request type '%"PRIu32"'.\n", req->type); ++ break; ++ } ++} ++ ++static void ++ssam_blk_req_complete_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) ++{ ++ struct spdk_ssam_blk_task *task = cb_arg; ++ ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the task and bdev_io memory may have been released. ++ * Therefore, task and bdev_io are not released in this scenario. ++ */ ++ return; ++ } ++ ++ /* Second part start of read and write */ ++ SPDK_INFOLOG(ssam_blk_data, ++ "backend io finish tid=%u gfunc_id=%u rw=%u vqid=%u reqid=%u success=%d.\n", ++ task->bsmsession->smsession.smdev->tid, task->bsmsession->smsession.gfunc_id, ++ task->io_req->req.cmd.writable, task->io_req->req.cmd.virtio.vq_idx, ++ task->io_req->req.cmd.virtio.req_idx, ++ success); ++ task->bsmsession->bdev_count--; ++ task->bsmsession->blk_stat.bdev_complete_count++; ++ task->bsmsession->vq_blk_stat[task->vq_idx].bdev_complete_count++; ++ ssam_task_stat_tick(&task->task_stat.bdev_end_tsc); ++ ++ spdk_bdev_free_io(bdev_io); ++ ssam_blk_request_finish(success, task); ++} ++ ++static int ++ssam_request_rc_process(int rc, struct spdk_ssam_blk_task *task) ++{ ++ if (rc == 0) { ++ return rc; ++ } ++ ++ if (rc == -ENOMEM) { ++ SPDK_WARNLOG("No memory, start to queue io.\n"); ++ ssam_request_queue_io(task); ++ } else { ++ SPDK_ERRLOG("IO error, gfunc_id=%u.\n", task->bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ return rc; ++} ++ ++static bool ++ssam_is_req_sector_err(uint64_t sector) ++{ ++ if (sector > (UINT64_MAX / SECTOR_SIZE)) { ++ SPDK_ERRLOG("req sector out of range, need less or equal than %lu, actually %lu\n", ++ (UINT64_MAX / SECTOR_SIZE), sector); ++ return true; ++ } ++ ++ return false; ++} ++ ++static int ++ssam_virtio_read_write_process(struct spdk_ssam_blk_task *task, ++ const struct virtio_blk_outhdr *req) ++{ ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ struct ssam_io_message *io_cmd = NULL; ++ uint32_t payload_size = task->payload_size; ++ int rc; ++ ++ io_cmd = &task->io_req->req.cmd; ++ ++ if (ssam_is_req_sector_err(req->sector)) { ++ SPDK_ERRLOG("rw check sector error, gfunc_id=%u.\n", bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (spdk_unlikely(payload_size == 0 || (payload_size & (SECTOR_SIZE - 1)) != 0)) { ++ SPDK_ERRLOG("%s - passed IO buffer is not multiple of 512 Bytes (req_idx = %"PRIu16"), " ++ "payload_size = %u, iovcnt = %u.\n", req->type ? "WRITE" : "READ", ++ get_req_idx(task), payload_size, io_cmd->iovcnt); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ if (req->type == VIRTIO_BLK_T_IN) { ++ bsmsession->bdev_count++; ++ ssam_task_stat_tick(&task->task_stat.bdev_start_tsc); ++ rc = spdk_bdev_readv(bsmsession->bdev_desc, bsmsession->io_channel, ++ task->iovs.virt.sges, task->iovcnt, req->sector * SECTOR_SIZE, ++ payload_size, ssam_blk_req_complete_cb, task); ++ ssam_task_stat_tick(&task->task_stat.bdev_func_tsc); ++ } else if (!bsmsession->readonly) { ++ bsmsession->bdev_count++; ++ ssam_task_stat_tick(&task->task_stat.bdev_start_tsc); ++ rc = spdk_bdev_writev(bsmsession->bdev_desc, bsmsession->io_channel, ++ task->iovs.virt.sges, task->iovcnt, req->sector * SECTOR_SIZE, ++ payload_size, ssam_blk_req_complete_cb, task); ++ ssam_task_stat_tick(&task->task_stat.bdev_func_tsc); ++ } else { ++ SPDK_DEBUGLOG(ssam_blk, "Device is in read-only mode!\n"); ++ rc = -1; ++ } ++ ++ return ssam_request_rc_process(rc, task); ++} ++ ++static int ++ssam_virtio_discard_process(struct spdk_ssam_blk_task *task) ++{ ++ int rc; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ struct virtio_blk_discard_write_zeroes *desc = task->iovs.virt.sges[0].iov_base; ++ ++ if (ssam_is_req_sector_err(desc->sector)) { ++ SPDK_ERRLOG("discard check sector error, gfunc_id=%u.\n", bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (task->payload_size != sizeof(*desc)) { ++ SPDK_ERRLOG("Invalid discard payload size: %u\n", task->payload_size); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (desc->flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) { ++ SPDK_ERRLOG("UNMAP flag is only used for WRITE ZEROES command\n"); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ bsmsession->bdev_count++; ++ rc = spdk_bdev_unmap(bsmsession->bdev_desc, bsmsession->io_channel, ++ desc->sector * SECTOR_SIZE, desc->num_sectors * SECTOR_SIZE, ++ ssam_blk_req_complete_cb, task); ++ ++ return ssam_request_rc_process(rc, task); ++} ++ ++static int ++ssam_virtio_write_zeroes_process(struct spdk_ssam_blk_task *task) ++{ ++ int rc; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ struct virtio_blk_discard_write_zeroes *desc = task->iovs.virt.sges[0].iov_base; ++ ++ if (ssam_is_req_sector_err(desc->sector)) { ++ SPDK_ERRLOG("write zeros check sector error, gfunc_id=%u.\n", bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (task->payload_size != sizeof(*desc)) { ++ SPDK_NOTICELOG("Invalid write zeroes payload size: %u\n", task->payload_size); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (desc->flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) { ++ SPDK_WARNLOG("Ignore the unmap flag for WRITE ZEROES from %"PRIx64", len %"PRIx64"\n", ++ (uint64_t)desc->sector * SECTOR_SIZE, (uint64_t)desc->num_sectors * SECTOR_SIZE); ++ } ++ bsmsession->bdev_count++; ++ rc = spdk_bdev_write_zeroes(bsmsession->bdev_desc, bsmsession->io_channel, ++ desc->sector * SECTOR_SIZE, desc->num_sectors * SECTOR_SIZE, ssam_blk_req_complete_cb, task); ++ ++ return ssam_request_rc_process(rc, task); ++} ++ ++static int ++ssam_virtio_flush_process(struct spdk_ssam_blk_task *task, ++ const struct virtio_blk_outhdr *req) ++{ ++ int rc; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ uint64_t blockcnt = spdk_bdev_get_num_blocks(bsmsession->bdev); ++ uint32_t blocklen = spdk_bdev_get_block_size(bsmsession->bdev); ++ uint64_t flush_bytes; ++ ++ if (blocklen == 0) { ++ SPDK_ERRLOG("bdev's blocklen %u error.\n", blocklen); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ if (req->sector != 0) { ++ SPDK_ERRLOG("sector must be zero for flush command\n"); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ ++ if (blockcnt > (UINT64_MAX / blocklen)) { ++ SPDK_ERRLOG("bdev's blockcnt %lu or blocklen %u out of range.\n", ++ blockcnt, blocklen); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ return -1; ++ } ++ flush_bytes = blockcnt * blocklen; ++ bsmsession->bdev_count++; ++ rc = spdk_bdev_flush(bsmsession->bdev_desc, bsmsession->io_channel, ++ 0, flush_bytes, ssam_blk_req_complete_cb, task); ++ ++ return ssam_request_rc_process(rc, task); ++} ++ ++static int ++ssam_virtio_get_id_process(struct spdk_ssam_blk_task *task) ++{ ++ uint32_t used_length; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ ++ if (task->iovcnt == 0 || task->payload_size == 0) { ++ SPDK_ERRLOG("check task param error, gfunc_id=%u.\n", bsmsession->smsession.gfunc_id); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ used_length = spdk_min((size_t)VIRTIO_BLK_ID_BYTES, task->iovs.virt.sges[0].iov_len); ++ if (bsmsession->serial == NULL) { ++ spdk_strcpy_pad(task->iovs.virt.sges[0].iov_base, spdk_bdev_get_product_name(bsmsession->bdev), ++ used_length, ' '); ++ } else { ++ spdk_strcpy_pad(task->iovs.virt.sges[0].iov_base, bsmsession->serial, ++ used_length, ' '); ++ } ++ bsmsession->blk_stat.bdev_complete_count++; ++ bsmsession->vq_blk_stat[task->vq_idx].bdev_complete_count++; ++ ssam_blk_request_finish(true, task); ++ ++ return 0; ++} ++ ++static int ++ssam_io_process(struct spdk_ssam_blk_task *task, const struct virtio_blk_outhdr *req) ++{ ++ int rc; ++ SPDK_INFOLOG(ssam_blk_data, ++ "backend io start tid=%u gfunc_id=%u reqtype=%d rw=%u vqid=%u reqid=%u offset=%llu length=%u.\n", ++ task->bsmsession->smsession.smdev->tid, task->bsmsession->smsession.gfunc_id, req->type, ++ task->io_req->req.cmd.writable, task->io_req->req.cmd.virtio.vq_idx, ++ task->io_req->req.cmd.virtio.req_idx, ++ req->sector * SECTOR_SIZE, task->payload_size); ++ task->bsmsession->blk_stat.bdev_count++; ++ task->bsmsession->vq_blk_stat[task->vq_idx].bdev_count++; ++ switch (req->type) { ++ case VIRTIO_BLK_T_IN: ++ case VIRTIO_BLK_T_OUT: ++ rc = ssam_virtio_read_write_process(task, req); ++ break; ++ case VIRTIO_BLK_T_DISCARD: ++ rc = ssam_virtio_discard_process(task); ++ break; ++ case VIRTIO_BLK_T_WRITE_ZEROES: ++ rc = ssam_virtio_write_zeroes_process(task); ++ break; ++ case VIRTIO_BLK_T_FLUSH: ++ rc = ssam_virtio_flush_process(task, req); ++ break; ++ case VIRTIO_BLK_T_GET_ID: ++ rc = ssam_virtio_get_id_process(task); ++ break; ++ default: ++ SPDK_ERRLOG("Not supported request type '%"PRIu32"'.\n", req->type); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ return rc; ++} ++ ++static int ++ssam_process_blk_request(struct spdk_ssam_blk_task *task) ++{ ++ int ret; ++ struct iovec *iov = NULL; ++ const struct virtio_blk_outhdr *req = NULL; ++ struct ssam_io_message *io_cmd = NULL; ++ ++ io_cmd = &task->io_req->req.cmd; ++ /* get req header */ ++ if (spdk_unlikely(io_cmd->iovs[0].iov_len != sizeof(*req))) { ++ SPDK_ERRLOG("First descriptor size is %zu but expected %zu (req_idx = %"PRIu16").\n", ++ io_cmd->iovs[0].iov_len, sizeof(*req), get_req_idx(task)); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ req = (struct virtio_blk_outhdr *)io_cmd->header; ++ /* get req tail */ ++ iov = &io_cmd->iovs[io_cmd->iovcnt - 1]; ++ if (spdk_unlikely(iov->iov_len != 1)) { ++ SPDK_ERRLOG("Last descriptor size is %zu but expected %d (req_idx = %"PRIu16").\n", ++ iov->iov_len, 1, get_req_idx(task)); ++ ssam_task_complete(task, VIRTIO_BLK_S_UNSUPP); ++ return -1; ++ } ++ ++ ret = ssam_io_process(task, req); ++ if (ret < 0) { ++ SPDK_ERRLOG("ssam io process failed(%d)\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_get_payload_size(struct ssam_request *io_req, uint32_t *payload_size) ++{ ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ uint32_t payload = 0; ++ uint32_t i; ++ ++ for (i = 1; i < io_cmd->iovcnt - 1; i++) { ++ if (spdk_unlikely((UINT32_MAX - io_cmd->iovs[i].iov_len) < payload)) { ++ SPDK_ERRLOG("payload size overflow\n"); ++ return -1; ++ } ++ payload += io_cmd->iovs[i].iov_len; ++ } ++ ++ if (spdk_unlikely(payload > PAYLOAD_SIZE_MAX)) { ++ SPDK_ERRLOG("payload size larger than %u, payload_size = %u\n", ++ PAYLOAD_SIZE_MAX, payload); ++ return -1; ++ } ++ ++ *payload_size = payload; ++ ++ return 0; ++} ++ ++static int ++ssam_task_iovs_memory_get(struct spdk_ssam_blk_task *task) ++{ ++ struct ssam_mempool *mp = task->bsmsession->smsession.mp; ++ void *buffer = NULL; ++ uint64_t phys_addr = 0; ++ ++ if (task->payload_size == 0) { ++ /* request type of VIRTIO_BLK_T_FLUSH does not have payload */ ++ task->iovs.virt.sges[0].iov_base = NULL; ++ return 0; ++ } ++ ++ task->iovs.virt.sges[0].iov_base = NULL; ++ task->iovs.phys.sges[0].iov_base = NULL; ++ task->iovs.virt.sges[0].iov_len = task->payload_size; ++ task->iovs.phys.sges[0].iov_len = task->payload_size; ++ task->iovcnt = 1; ++ ++ buffer = ssam_mempool_alloc(mp, task->payload_size, &phys_addr); ++ if (spdk_unlikely(buffer == NULL)) { ++ return -ENOMEM; ++ } ++ ++ /* ssam request max IO size is PAYLOAD_SIZE_MAX, only use one iov to save data */ ++ task->iovs.virt.sges[0].iov_base = buffer; ++ task->iovs.phys.sges[0].iov_base = (void *)phys_addr; ++ ++ return 0; ++} ++ ++static void ++ssam_data_request_para(struct ssam_dma_request *dma_req, struct spdk_ssam_blk_task *task, ++ uint32_t type, uint8_t status) ++{ ++ struct ssam_io_message *io_cmd = NULL; ++ struct spdk_ssam_dma_cb dma_cb = { ++ .status = status, ++ .req_dir = type, ++ .gfunc_id = task->io_req->gfunc_id, ++ .vq_idx = task->vq_idx, ++ .task_idx = task->task_idx ++ }; ++ ++ io_cmd = &task->io_req->req.cmd; ++ dma_req->cb = (void *) * (uint64_t *)&dma_cb; ++ dma_req->gfunc_id = task->io_req->gfunc_id; ++ dma_req->flr_seq = task->io_req->flr_seq; ++ dma_req->direction = type; ++ dma_req->data_len = task->payload_size; ++ if (type == SSAM_REQUEST_DATA_STORE) { ++ dma_req->src = task->iovs.phys.sges; ++ dma_req->src_num = task->iovcnt; ++ dma_req->dst = &io_cmd->iovs[1]; ++ /* dma data iovs does not contain header and tail */ ++ dma_req->dst_num = io_cmd->iovcnt - IOV_HEADER_TAIL_NUM; ++ } else if (type == SSAM_REQUEST_DATA_LOAD) { ++ dma_req->src = &io_cmd->iovs[1]; ++ /* dma data iovs does not contain header and tail */ ++ dma_req->src_num = io_cmd->iovcnt - IOV_HEADER_TAIL_NUM; ++ dma_req->dst = task->iovs.phys.sges; ++ dma_req->dst_num = task->iovcnt; ++ } ++} ++ ++static void ++ssam_request_dma_process(struct spdk_ssam_session *smsession, struct spdk_ssam_blk_task *task) ++{ ++ struct virtio_blk_outhdr *req = NULL; ++ int ret; ++ ++ req = (struct virtio_blk_outhdr *)task->io_req->req.cmd.header; ++ SPDK_INFOLOG(ssam_blk_data, ++ "request dma request io tid=%u gfunc_id=%u reqtype=%d rw=%u vqid=%u reqid=%u.\n", ++ smsession->smdev->tid, smsession->gfunc_id, req->type, task->io_req->req.cmd.writable, ++ task->io_req->req.cmd.virtio.vq_idx, task->io_req->req.cmd.virtio.req_idx); ++ ++ switch (req->type) { ++ case VIRTIO_BLK_T_IN: ++ case VIRTIO_BLK_T_GET_ID: ++ case VIRTIO_BLK_T_FLUSH: ++ ret = ssam_process_blk_request(task); ++ if (ret < 0) { ++ SPDK_ERRLOG("====== Task: req_idx %u failed ======\n", task->req_idx); ++ } ++ break; ++ ++ case VIRTIO_BLK_T_OUT: ++ case VIRTIO_BLK_T_DISCARD: ++ case VIRTIO_BLK_T_WRITE_ZEROES: ++ /* dma request: Host -> ipu */ ++ ssam_res_dma_process(smsession, task, SSAM_REQUEST_DATA_LOAD, 0); ++ break; ++ ++ default: ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ SPDK_ERRLOG("Not supported request type '%"PRIu32"'.\n", req->type); ++ } ++} ++ ++struct ssam_blk_io_complete_arg { ++ struct spdk_ssam_dev *smdev; ++ struct ssam_request *io_req; ++}; ++ ++static void ++ssam_blk_io_complete_cb(void *arg) ++{ ++ struct ssam_blk_io_complete_arg *cb_arg = (struct ssam_blk_io_complete_arg *)arg; ++ int rc = ssam_blk_io_complete(cb_arg->smdev, cb_arg->io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_process_blk_task(struct spdk_ssam_session *smsession, struct ssam_request *io_req, ++ uint16_t vq_idx, uint16_t req_idx, uint32_t payload_size) ++{ ++ int rc; ++ struct spdk_ssam_blk_task *task = NULL; ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[vq_idx]; ++ ++ if (spdk_unlikely(vq->use_num >= vq->num)) { ++ SPDK_ERRLOG("Session:%s vq(%hu) task_cnt(%u) limit(%u).\n", smsession->name, vq_idx, vq->use_num, ++ vq->num); ++ rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ } ++ return; ++ } ++ ++ uint32_t index = vq->index[vq->index_r]; ++ task = &((struct spdk_ssam_blk_task *)vq->tasks)[index]; ++ if (spdk_unlikely(task->used)) { ++ SPDK_ERRLOG("%s: vq(%u) task with idx %u is already pending.\n", smsession->name, vq_idx, index); ++ rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ } ++ return; ++ } ++ ++ smsession->task_cnt++; ++ vq->index_r = (vq->index_r + 1) & 0xFF; ++ vq->use_num++; ++ ++ ssam_blk_task_init(task); ++ task->io_req = io_req; ++ task->vq_idx = vq_idx; ++ task->req_idx = req_idx; ++ task->payload_size = payload_size; ++ task->session_io_wait.cb_fn = ssam_session_io_resubmit; ++ task->session_io_wait.cb_arg = task; ++ ++ rc = ssam_task_iovs_memory_get(task); ++ if (rc != 0) { ++ ssam_session_insert_io_wait(smsession, &task->session_io_wait); ++ return; ++ } ++ ++ ssam_request_dma_process(smsession, task); ++ return; ++} ++ ++static void ++ssam_process_vq(struct spdk_ssam_session *smsession, struct ssam_request *io_req) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ uint16_t vq_idx = io_cmd->virtio.vq_idx; ++ uint16_t req_idx = io_cmd->virtio.req_idx; ++ uint32_t payload_size = 0; ++ int rc; ++ ++ if (vq_idx >= smsession->max_queues) { ++ SPDK_ERRLOG("vq_idx out of range, need less than %u, actually %u\n", ++ smsession->max_queues, vq_idx); ++ goto err; ++ } ++ ++ bsmsession->vq_blk_stat[vq_idx].start_count++; ++ ++ if (io_req->status != SSAM_IO_STATUS_OK) { ++ SPDK_WARNLOG("%s: ssam request status invalid, but still process, status=%d\n", ++ smsession->name, io_req->status); ++ goto err; ++ } ++ ++ rc = ssam_get_payload_size(io_req, &payload_size); ++ if (rc != 0) { ++ goto err; ++ } ++ ++ ssam_process_blk_task(smsession, io_req, vq_idx, req_idx, payload_size); ++ return; ++ ++err: ++ rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_blk_io_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_blk_io_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_blk_io_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ } ++ return; ++} ++ ++static void ++ssam_no_bdev_put_io_channel(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ if (smsession->task_cnt == 0 && (bsmsession->io_channel != NULL)) { ++ spdk_put_io_channel(bsmsession->io_channel); ++ bsmsession->io_channel = NULL; ++ } ++} ++ ++struct ssam_no_bdev_process_vq_arg { ++ struct spdk_ssam_session *smsession; ++ struct ssam_request *io_req; ++}; ++ ++static void ++ssam_no_bdev_process_vq_cb(void *arg) ++{ ++ struct ssam_no_bdev_process_vq_arg *cb_arg = (struct ssam_no_bdev_process_vq_arg *)arg; ++ int rc = ssam_blk_io_complete(cb_arg->smsession->smdev, cb_arg->io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_no_bdev_process_vq_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_no_bdev_put_io_channel(cb_arg->smsession); ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_no_bdev_process_vq(struct spdk_ssam_session *smsession, struct ssam_request *io_req) ++{ ++ SPDK_ERRLOG("gfunc_id %u No bdev, aborting request, return EIO\n", io_req->gfunc_id); ++ int rc = ssam_blk_io_complete(smsession->smdev, io_req, VIRTIO_BLK_S_IOERR); ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_no_bdev_process_vq_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_no_bdev_process_vq_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smsession = smsession; ++ cb_arg->io_req = io_req; ++ io_wait_r->cb_fn = ssam_no_bdev_process_vq_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ SPDK_WARNLOG("Aborting request because no this controller\n"); ++ ++ ssam_no_bdev_put_io_channel(smsession); ++} ++ ++static void ++ssam_blk_response_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct ssam_dma_rsp *dma_rsp = (struct ssam_dma_rsp *)arg; ++ struct spdk_ssam_dma_cb *dma_cb = (struct spdk_ssam_dma_cb *)&dma_rsp->cb; ++ struct spdk_ssam_blk_task *task = NULL; ++ uint16_t vq_idx = dma_cb->vq_idx; ++ uint16_t task_idx = dma_cb->task_idx; ++ uint8_t req_dir = dma_cb->req_dir; ++ ++ if (vq_idx >= smsession->max_queues) { ++ smsession->smdev->discard_io_num++; ++ SPDK_ERRLOG("vq_idx out of range, need less than %u, actually %u\n", ++ smsession->max_queues, vq_idx); ++ return; ++ } ++ ++ task = &((struct spdk_ssam_blk_task *)smsession->virtqueue[vq_idx].tasks)[task_idx]; ++ if (dma_rsp->status != 0) { ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ SPDK_ERRLOG("dma data process failed!\n"); ++ return; ++ } ++ if (dma_rsp->last_flag == 0) { ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ SPDK_ERRLOG("last_flag should not equal 0!\n"); ++ return; ++ } ++ ssam_task_stat_tick(&task->task_stat.dma_end_tsc); ++ task->bsmsession->blk_stat.dma_complete_count++; ++ task->bsmsession->vq_blk_stat[task->vq_idx].dma_complete_count++; ++ if (req_dir == SSAM_REQUEST_DATA_LOAD) { ++ /* Write data ready, start a request to backend */ ++ ssam_process_blk_request(task); ++ } else if (req_dir == SSAM_REQUEST_DATA_STORE) { ++ /* Data have been read by user, complete the task */ ++ ssam_task_complete(task, dma_cb->status); ++ } ++} ++ ++static int ++ssam_blk_check_io_req(struct spdk_ssam_dev *smdev, struct ssam_request *io_req) ++{ ++ struct ssam_io_message *io_cmd = NULL; ++ uint16_t vq_idx; ++ uint16_t req_idx; ++ const struct virtio_blk_outhdr *req = NULL; ++ ++ if (io_req == NULL) { ++ SPDK_ERRLOG("%s: received a NULL IO message\n", smdev->name); ++ return -1; ++ } ++ ++ io_cmd = &io_req->req.cmd; ++ vq_idx = io_cmd->virtio.vq_idx; ++ req_idx = io_cmd->virtio.req_idx; ++ req = (struct virtio_blk_outhdr *)io_cmd->header; ++ ++ if (io_cmd->iovs == NULL) { ++ SPDK_ERRLOG("%s: received an empty IO, vq_idx:%u, req_idx:%u\n", ++ smdev->name, vq_idx, req_idx); ++ return -1; ++ } ++ ++ if (io_cmd->iovcnt < IOV_HEADER_TAIL_NUM) { ++ SPDK_ERRLOG("%s: iovcnt %u less than %d but expected not less than %d\n", ++ smdev->name, io_cmd->iovcnt, IOV_HEADER_TAIL_NUM, IOV_HEADER_TAIL_NUM); ++ return -1; ++ } ++ ++ if ((io_cmd->iovcnt == IOV_HEADER_TAIL_NUM) && (req->type != VIRTIO_BLK_T_FLUSH)) { ++ SPDK_ERRLOG("%s: received an IO not contain valid data, iovcnt:%u, vq_idx:%u, " ++ "req_idx:%u, req_type:%u, req_ioprio:%u, req_sector:%llu\n", ++ smdev->name, io_cmd->iovcnt, vq_idx, req_idx, req->type, req->ioprio, req->sector); ++ return -1; ++ } ++ ++ if (io_cmd->iovcnt > (SPDK_SSAM_IOVS_MAX + IOV_HEADER_TAIL_NUM)) { ++ SPDK_ERRLOG("%s: received too much IO, iovcnt:%u, vq_idx:%u, req_idx:%u\n", ++ smdev->name, io_cmd->iovcnt, vq_idx, req_idx); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_blk_request_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct spdk_ssam_dev *smdev = smsession->smdev; ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ struct ssam_request *io_req = (struct ssam_request *)arg; ++ int ret; ++ ++ smdev->io_num++; ++ bsmsession->blk_stat.start_count++; ++ ++ ret = ssam_blk_check_io_req(smdev, io_req); ++ if (ret < 0) { ++ smdev->discard_io_num++; ++ return; ++ } ++ ++ if (bsmsession->io_channel == NULL) { ++ bsmsession->io_channel = spdk_bdev_get_io_channel(bsmsession->bdev_desc); ++ if (bsmsession->io_channel == NULL) { ++ ssam_no_bdev_process_vq(smsession, io_req); ++ SPDK_ERRLOG("%s: I/O channel allocation failed\n", smsession->name); ++ return; ++ } ++ } ++ ++ if (bsmsession->no_bdev) { ++ ssam_no_bdev_process_vq(smsession, io_req); ++ } else { ++ ssam_process_vq(smsession, io_req); ++ } ++} ++ ++static void ++ssam_blk_no_data_request_worker(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ ++ bsmsession = ssam_to_blk_session(smsession); ++ if (bsmsession->no_bdev) { ++ ssam_no_bdev_put_io_channel(smsession); ++ } ++} ++ ++static void ++ssam_blk_destroy_bdev_device(struct spdk_ssam_session *smsession, void *args) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ spdk_bdev_close(bsmsession->bdev_desc); ++ ++ /* free taskpool */ ++ ssam_free_task_pool(bsmsession); ++ ++ /* free */ ++ free(bsmsession); ++} ++ ++static void ++ssam_request_resubmit(void *arg) ++{ ++ struct spdk_ssam_blk_task *task = (struct spdk_ssam_blk_task *)arg; ++ int rc; ++ ++ rc = ssam_process_blk_request(task); ++ if (rc == 0) { ++ SPDK_DEBUGLOG(ssam_blk_data, "====== Task: req_idx = %"PRIu16" resubmitted ======\n", ++ get_req_idx(task)); ++ } else { ++ SPDK_WARNLOG("====== Task: req_idx = %"PRIu16" failed ======\n", get_req_idx(task)); ++ } ++} ++ ++static inline void ++ssam_request_queue_io(struct spdk_ssam_blk_task *task) ++{ ++ int rc; ++ struct spdk_ssam_blk_session *bsmsession = task->bsmsession; ++ ++ task->bdev_io_wait.bdev = bsmsession->bdev; ++ task->bdev_io_wait.cb_fn = ssam_request_resubmit; ++ task->bdev_io_wait.cb_arg = task; ++ ++ rc = spdk_bdev_queue_io_wait(bsmsession->bdev, bsmsession->io_channel, &task->bdev_io_wait); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: failed to queue I/O, rc=%d\n", bsmsession->smsession.name, rc); ++ ssam_task_complete(task, VIRTIO_BLK_S_IOERR); ++ } ++} ++ ++static void ++ssam_session_io_resubmit(void *arg) ++{ ++ struct spdk_ssam_blk_task *task = (struct spdk_ssam_blk_task *)arg; ++ struct spdk_ssam_session *smsession = &task->bsmsession->smsession; ++ int rc; ++ ++ rc = ssam_task_iovs_memory_get(task); ++ if (rc != 0) { ++ ssam_session_insert_io_wait(smsession, &task->session_io_wait); ++ return; ++ } ++ ssam_request_dma_process(smsession, task); ++} ++ ++static void ++ssam_blk_start_post_cb(struct spdk_ssam_session *smsession, void **arg) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ int rc; ++ ++ smsession->started = true; ++ g_blk_set_times[smsession->gfunc_id]++; ++ if (g_blk_set_times[smsession->gfunc_id] == ssam_get_core_num()) { ++ rc = ssam_virtio_blk_resize(smsession->gfunc_id, bsmsession->bdev->blockcnt); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: virtio blk resize failed.\n", smsession->name); ++ } ++ ++ rc = ssam_mount_normal(smsession, 0); ++ if (rc != SSAM_MOUNT_OK) { ++ SPDK_WARNLOG("%s: mount ssam volume failed\n", smsession->name); ++ } ++ ++ /* Smdev poller is not created here, but is created in the initialization process. */ ++ SPDK_NOTICELOG("BLK controller %s created with bdev %s, queues %u\n", ++ smsession->name, spdk_bdev_get_name(bsmsession->bdev), smsession->max_queues); ++ return; ++ } ++} ++ ++static int ++ssam_blk_start_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ if (bsmsession->bdev == NULL) { ++ SPDK_ERRLOG("%s: session not have a bdev.\n", smsession->name); ++ return -ENODEV; ++ } ++ ++ ssam_send_event_async_done(ctx); ++ ++ return 0; ++} ++ ++static int ++ssam_blk_start(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ gfunc_session_number++; ++ ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = true, ++ .need_rsp = (gfunc_session_number == ssam_get_core_num()), ++ }; ++ if (send_event_flag.need_rsp == false) { ++ smsession->rsp_fn = NULL; ++ smsession->rsp_ctx = NULL; ++ } ++ ++ int rc = ssam_alloc_task_pool(bsmsession); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: failed to alloc task pool.\n", smsession->name); ++ return rc; ++ } ++ return ssam_send_event_to_session(smsession, ssam_blk_start_cb, ssam_blk_start_post_cb, ++ send_event_flag, NULL); ++} ++ ++static void ++ssam_blk_destroy_session(struct ssam_blk_session_ctx *ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ctx->bsmsession; ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ ++ if (smsession->task_cnt > 0) { ++ return; ++ } ++ ++ /* If in ssam subsystem finish process, session registered flag will ++ * be set to false first, bdev will be removed in ssam_bdev_remove_cb() ++ * call back process, wating for the call back process finish first. ++ */ ++ if ((smsession->registered == false) && (bsmsession->bdev != NULL)) { ++ return; ++ } ++ ++ SPDK_NOTICELOG("%s: removing on lcore %d\n", ++ smsession->name, spdk_env_get_current_core()); ++ ++ ssam_session_destroy(smsession); ++ ++ if (bsmsession->io_channel != NULL) { ++ spdk_put_io_channel(bsmsession->io_channel); ++ bsmsession->io_channel = NULL; ++ } ++ ssam_free_task_pool(bsmsession); ++ ++ if (bsmsession->serial != NULL) { ++ free(bsmsession->serial); ++ } ++ spdk_poller_unregister(&bsmsession->stop_poller); ++ ++ ssam_session_stop_done(smsession, 0, ctx->user_ctx); ++ free(ctx); ++ ++ return; ++} ++ ++static int ++ssam_destroy_session_poller_cb(void *arg) ++{ ++ struct ssam_blk_session_ctx *ctx = arg; ++ ++ if (ssam_trylock() != 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ ssam_blk_destroy_session(ctx); ++ ++ ssam_unlock(); ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static int ++ssam_blk_stop_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ smsession->started = false; ++ ++ struct ssam_blk_session_ctx *_ctx = ++ (struct ssam_blk_session_ctx *)calloc(1, sizeof(struct ssam_blk_session_ctx)); ++ ++ if (_ctx == NULL) { ++ SPDK_ERRLOG("%s: calloc blk session ctx error.\n", smsession->name); ++ return -ENOMEM; ++ } ++ ++ _ctx->bsmsession = bsmsession; ++ _ctx->user_ctx = ctx; ++ ++ bsmsession->stop_poller = SPDK_POLLER_REGISTER(ssam_destroy_session_poller_cb, ++ _ctx, SESSION_STOP_POLLER_PERIOD); ++ if (bsmsession->stop_poller == NULL) { ++ SPDK_WARNLOG("%s: ssam_destroy_session_poller_cb start failed.\n", smsession->name); ++ ssam_session_stop_done(smsession, -EBUSY, ctx); ++ free(_ctx); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_blk_stop(struct spdk_ssam_session *smsession) ++{ ++ delete_flag++; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = true, ++ .need_rsp = (delete_flag == ssam_get_core_num()), ++ }; ++ if (send_event_flag.need_rsp == false) { ++ smsession->rsp_fn = NULL; ++ smsession->rsp_ctx = NULL; ++ } ++ ++ return ssam_send_event_to_session(smsession, ssam_blk_stop_cb, ssam_blk_stop_cpl_cb, ++ send_event_flag, NULL); ++} ++ ++static int ++ssam_blk_remove_session(struct spdk_ssam_session *smsession) ++{ ++ SPDK_NOTICELOG("session gfunc_id=%u removing\n", smsession->gfunc_id); ++ int ret = ssam_blk_stop(smsession); ++ if ((ret != 0) && (smsession->registered == true)) { ++ (void)ssam_remount_normal(smsession, 0); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++const char * ++ssam_get_bdev_name_by_gfunc_id(uint16_t gfunc_id) ++{ ++ struct spdk_ssam_session *smsession; ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ return NULL; ++ } ++ bsmsession = ssam_to_blk_session(smsession); ++ ++ return spdk_bdev_get_name(bsmsession->bdev); ++} ++ ++struct spdk_bdev * ++ssam_get_session_bdev(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_blk_session *bsmsession = ssam_to_blk_session(smsession); ++ ++ return bsmsession->bdev; ++} ++ ++static int ++ssam_destroy_session_cb(void *arg) ++{ ++ struct spdk_ssam_blk_session *bsmsession = (struct spdk_ssam_blk_session *)arg; ++ struct spdk_ssam_session *smsession = &bsmsession->smsession; ++ int rc = 0; ++ ++ if (smsession->task_cnt > 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ if (smsession->pending_async_op_num != 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ /* stop poller */ ++ spdk_poller_unregister(&bsmsession->stop_session_poller); ++ ++ ssam_session_unreg_response_cb(smsession); ++ rc = ssam_session_unregister(smsession); ++ if (rc != 0) { ++ SPDK_ERRLOG("function id %d: blk construct failed and session remove failed\n", ++ smsession->gfunc_id); ++ } ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++int ++ssam_blk_construct(struct spdk_ssam_session_reg_info *info, const char *dev_name, ++ bool readonly, char *serial) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_ssam_blk_session *bsmsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ uint32_t session_ctx_size = sizeof(struct spdk_ssam_blk_session) - ++ sizeof(struct spdk_ssam_session); ++ struct spdk_ssam_dev *smdev = NULL; ++ uint16_t tid; ++ int ret = 0; ++ ++ ssam_lock(); ++ gfunc_session_number = 0; ++ g_blk_set_times[info->gfunc_id] = 0; ++ ++ for (int i = 0; i < ssam_get_core_num(); i++) { ++ tid = i; ++ if (tid == SPDK_INVALID_TID) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ info->tid = tid; ++ info->backend = &g_ssam_blk_session_backend; ++ info->session_ctx_size = session_ctx_size; ++ snprintf(info->type_name, SPDK_SESSION_TYPE_MAX_LEN, "%s", SPDK_SESSION_TYPE_BLK); ++ ret = ssam_session_register(info, &smsession); ++ if (ret != 0) { ++ goto out; ++ } ++ ++ ssam_session_start_done(smsession, 0); ++ ++ bsmsession = ssam_to_blk_session(smsession); ++ ++ ret = spdk_bdev_open_ext(dev_name, true, ssam_bdev_event_cb, smsession, ++ &bsmsession->bdev_desc); ++ if (ret != 0) { ++ SPDK_ERRLOG("function id %d: could not open bdev, error:%s\n", info->gfunc_id, spdk_strerror(-ret)); ++ goto out; ++ } ++ bdev = spdk_bdev_desc_get_bdev(bsmsession->bdev_desc); ++ bsmsession->bdev = bdev; ++ bsmsession->readonly = readonly; ++ bsmsession->need_write_config = ((tid == ssam_get_core_num() - 1) ? true : false); ++ ++ if (serial == NULL) { ++ SPDK_INFOLOG(ssam_blk, "function id %d: not set volume id.\n", info->gfunc_id); ++ } else { ++ bsmsession->serial = calloc(SERIAL_STRING_LEN, sizeof(char)); ++ if (!bsmsession->serial) { ++ SPDK_ERRLOG("no memory for alloc.\n"); ++ goto out; ++ } ++ (void)snprintf(bsmsession->serial, SERIAL_STRING_LEN, "%s", serial); ++ } ++ ++ ret = ssam_blk_start(smsession); ++ if (ret != 0) { ++ SPDK_ERRLOG("%s: start failed\n", smsession->name); ++ goto out; ++ } ++ } ++ ++ SPDK_INFOLOG(ssam_blk, "function id %d: using bdev '%s'\n", info->gfunc_id, dev_name); ++out: ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = smdev->smsessions[info->gfunc_id]; ++ if ((ret != 0) && (smsession != NULL) && (smsession->smdev != NULL)) { ++ ssam_to_blk_session(smsession)->stop_session_poller = SPDK_POLLER_REGISTER(ssam_destroy_session_cb, ++ smsession, 0); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++ return ret; ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_blk) ++SPDK_LOG_REGISTER_COMPONENT(ssam_blk_data) +diff --git a/lib/ssam/ssam_config.c b/lib/ssam/ssam_config.c +new file mode 100644 +index 0000000..81e0688 +--- /dev/null ++++ b/lib/ssam/ssam_config.c +@@ -0,0 +1,582 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include ++#include ++#include ++ ++#include "spdk/string.h" ++#include "spdk/file.h" ++#include "ssam_internal.h" ++ ++#define SSAM_JSON_DEFAULT_MEMPOOL_SIZE 1024 ++#define SSAM_JSON_MAX_MEMPOOL_SIZE 10240 ++ ++enum ssam_dma_queue_num { ++ SSAM_DMA_QUEUE_NUM_DISABLE = 0, ++ SSAM_DMA_QUEUE_NUM_SMALL_IO = 1, ++ SSAM_DMA_QUEUE_NUM_DEFAULT = 2, ++ SSAM_DMA_QUEUE_NUM_LARGE_IO = 4, ++}; ++ ++struct ssam_user_config { ++ char *cfg_file_name; ++ uint32_t mempool_size; ++ uint32_t queues; ++ uint32_t dma_queue_num; ++ char *mode; ++ uint8_t hash_mode; ++}; ++ ++struct ssam_config { ++ struct ssam_user_config user_config; ++ struct ssam_hostep_info ep_info; ++ uint32_t core_num; ++ bool shm_created; ++}; ++ ++static struct ssam_config g_ssam_config; ++ ++static const struct spdk_json_object_decoder g_ssam_user_config_decoders[] = { ++ {"mempool_size_mb", offsetof(struct ssam_user_config, mempool_size), spdk_json_decode_uint32}, ++ {"queues", offsetof(struct ssam_user_config, queues), spdk_json_decode_uint32}, ++ {"mode", offsetof(struct ssam_user_config, mode), spdk_json_decode_string}, ++}; ++ ++static int ++ssam_heap_malloc(const char *type, size_t size, int socket_arg, ++ unsigned int flags, size_t align, size_t bound, bool contig, struct ssam_melem *mem) ++{ ++ void *addr = NULL; ++ unsigned long long pg_size; ++ int socket_id; ++ int rc; ++ uint64_t iova; ++ ++ addr = rte_malloc_socket(type, size, align, socket_arg); ++ if (addr == NULL) { ++ return -ENOMEM; ++ } ++ ++ rc = ssam_malloc_elem_from_addr(addr, &pg_size, &socket_id); ++ if (rc != 0) { ++ ssam_free_ex(addr); ++ return -ENOMEM; ++ } ++ ++ iova = rte_malloc_virt2iova(addr); ++ if (iova == RTE_BAD_IOVA) { ++ ssam_free_ex(addr); ++ return -ENOMEM; ++ } ++ ++ mem->addr = addr; ++ mem->iova = iova; ++ mem->page_sz = pg_size; ++ mem->socket_id = socket_id; ++ return 0; ++} ++ ++static int ++ssam_heap_free(void *addr) ++{ ++ return ssam_free_ex(addr); ++} ++ ++static uint8_t ++ssam_get_dma_queue_num_by_mode(void) ++{ ++ if (g_ssam_config.user_config.mode == NULL) { ++ return SSAM_DMA_QUEUE_NUM_DISABLE; ++ } ++ ++ if (!strcasecmp(g_ssam_config.user_config.mode, "default")) { ++ return SSAM_DMA_QUEUE_NUM_DEFAULT; ++ } else if (!strcasecmp(g_ssam_config.user_config.mode, "small-IO")) { ++ return SSAM_DMA_QUEUE_NUM_SMALL_IO; ++ } else if (!strcasecmp(g_ssam_config.user_config.mode, "large-IO")) { ++ return SSAM_DMA_QUEUE_NUM_LARGE_IO; ++ } ++ return SSAM_DMA_QUEUE_NUM_DISABLE; ++} ++ ++static void ++ssam_get_ssam_lib_init_config(struct ssam_lib_args *cfg) ++{ ++ uint32_t core_num = g_ssam_config.core_num; ++ ++ cfg->role = 1; ++ cfg->dma_queue_num = g_ssam_config.user_config.dma_queue_num; ++ cfg->ssam_heap_malloc = ssam_heap_malloc; ++ cfg->ssam_heap_free = ssam_heap_free; ++ cfg->hash_mode = g_ssam_config.user_config.hash_mode; ++ ++ /* The number of tid is 1 greater than the number of cores. */ ++ cfg->core_num = core_num; ++} ++ ++void ++spdk_ssam_set_shm_created(bool shm_created) ++{ ++ g_ssam_config.shm_created = shm_created; ++} ++ ++bool ++spdk_ssam_get_shm_created(void) ++{ ++ return g_ssam_config.shm_created; ++} ++ ++int ++ssam_set_core_num(uint32_t core_num) ++{ ++ if (core_num > SSAM_MAX_CORE_NUM) { ++ SPDK_ERRLOG("Invalid coremask, total cores need less or equal than %d, " ++ "actually %u, please check startup item.\n", ++ SSAM_MAX_CORE_NUM, core_num); ++ return -EINVAL; ++ } ++ if (g_ssam_config.user_config.dma_queue_num == SSAM_DMA_QUEUE_NUM_LARGE_IO ++ && core_num > SSAM_MAX_CORE_NUM_WITH_LARGE_IO) { ++ SPDK_ERRLOG("Invalid coremask, total cores need less or equal than %d, " ++ "actually %u, please check startup item.\n", ++ SSAM_MAX_CORE_NUM_WITH_LARGE_IO, core_num); ++ return -EINVAL; ++ } ++ g_ssam_config.core_num = core_num; ++ return 0; ++} ++ ++uint16_t ++ssam_get_core_num(void) ++{ ++ return (uint16_t)g_ssam_config.core_num; ++} ++ ++uint32_t ++ssam_get_mempool_size(void) ++{ ++ return g_ssam_config.user_config.mempool_size; ++} ++ ++uint16_t ++ssam_get_queues(void) ++{ ++ uint16_t cfg_queues = (uint16_t)g_ssam_config.user_config.queues; ++ ++ if (cfg_queues == 0) { ++ SPDK_INFOLOG(ssam_config, "Use default queues number: %u.\n", SPDK_SSAM_DEFAULT_VQUEUES); ++ return SPDK_SSAM_DEFAULT_VQUEUES; ++ } ++ return cfg_queues; ++} ++ ++uint8_t ++ssam_get_hash_mode(void) ++{ ++ return g_ssam_config.user_config.hash_mode; ++} ++ ++enum ssam_device_type ++ssam_get_virtio_type(uint16_t gfunc_id) { ++ uint16_t vf_start, vf_end; ++ struct ssam_pf_list *pf = g_ssam_config.ep_info.host_pf_list; ++ ++ for (uint32_t i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) ++ { ++ if (pf[i].pf_funcid == UINT16_MAX) { ++ continue; ++ } ++ if (gfunc_id == pf[i].pf_funcid) { ++ return pf[i].pf_type; ++ } ++ ++ vf_start = pf[i].vf_funcid_start; ++ if (((uint32_t)vf_start + (uint32_t)pf[i].vf_num) > UINT16_MAX) { ++ SPDK_ERRLOG("vf_start %u + vf_num %u out of range, need less or equal than %u.\n", ++ vf_start, pf[i].vf_num, UINT16_MAX); ++ continue; ++ } ++ vf_end = vf_start + pf[i].vf_num; ++ if ((gfunc_id >= vf_start) && (gfunc_id < vf_end)) { ++ return pf[i].pf_type; ++ } ++ } ++ ++ return SSAM_DEVICE_VIRTIO_MAX; ++} ++ ++static void ++ssam_get_virtio_blk_config(struct ssam_virtio_config *cfg) ++{ ++ struct virtio_blk_config *dev_cfg = (struct virtio_blk_config *)cfg->device_config; ++ ++ cfg->device_feature = SPDK_SSAM_VIRTIO_BLK_DEFAULT_FEATURE; ++ cfg->queue_num = g_ssam_config.user_config.queues; ++ cfg->config_len = sizeof(struct virtio_blk_config); ++ ++ memset(dev_cfg, 0, cfg->config_len); ++ dev_cfg->blk_size = 0x200; ++ dev_cfg->min_io_size = 0; ++ dev_cfg->capacity = 0; ++ dev_cfg->num_queues = cfg->queue_num; ++ dev_cfg->seg_max = 0x7d; ++ dev_cfg->size_max = 0x200000; ++ cfg->queue_size = VIRITO_DEFAULT_QUEUE_SIZE; ++ ++ return; ++} ++ ++static void ++ssam_get_virtio_scsi_config(struct ssam_virtio_config *cfg) ++{ ++ struct virtio_scsi_config *dev_cfg = (struct virtio_scsi_config *)cfg->device_config; ++ ++ cfg->device_feature = SPDK_SSAM_VIRTIO_SCSI_DEFAULT_FEATURE; ++ cfg->queue_num = g_ssam_config.user_config.queues; ++ cfg->config_len = sizeof(struct virtio_scsi_config); ++ ++ memset(dev_cfg, 0, sizeof(struct virtio_scsi_config)); ++ dev_cfg->num_queues = 0x04; ++ dev_cfg->seg_max = 0x6f; ++ dev_cfg->max_sectors = 0x1ff; ++ dev_cfg->cmd_per_lun = 0x80; ++ dev_cfg->event_info_size = 0; ++ dev_cfg->sense_size = 0x60; ++ dev_cfg->cdb_size = 0x20; ++ dev_cfg->max_channel = 0; ++ dev_cfg->max_target = SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; ++ dev_cfg->max_lun = 0xff; ++ cfg->queue_size = VIRITO_DEFAULT_QUEUE_SIZE; ++ ++ return; ++} ++ ++static int ++ssam_virtio_config_get(struct ssam_pf_list *pf, struct ssam_function_config *cfg) ++{ ++ int ret = 0; ++ ++ cfg->gfunc_id = pf->pf_funcid; ++ cfg->type = pf->pf_type; ++ switch (cfg->type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ ssam_get_virtio_blk_config(&cfg->virtio_config); ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ ssam_get_virtio_scsi_config(&cfg->virtio_config); ++ break; ++ default: { ++ SPDK_ERRLOG("function config init fail (%d|%d)\n", cfg->gfunc_id, cfg->type); ++ ret = -EINVAL; ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++static int ++ssam_setup_pf(struct ssam_pf_list *pf, struct ssam_function_config *cfg) ++{ ++ int rc; ++ ++ rc = ssam_setup_function(pf->pf_funcid, pf->vf_num, pf->pf_type); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam init function(%u) failed:%s\n", pf->pf_funcid, spdk_strerror(-rc)); ++ return rc; ++ } ++ rc = ssam_write_function_config(cfg); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam write function(%d) config failed:%s\n", cfg->gfunc_id, spdk_strerror(-rc)); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_virtio_config_init(struct ssam_hostep_info *ep_info) ++{ ++ int rc = 0; ++ uint32_t i; ++ struct ssam_function_config cfg = {0}; ++ struct ssam_pf_list *pf = ep_info->host_pf_list; ++ ++ if (spdk_ssam_get_shm_created()) { ++ /* If server is crashed from last time, no need setup config this time */ ++ return 0; ++ } ++ ++ /** ++ * During chip initialization, the vq and msix resources are initialized. ++ * However, the ssam configuration may be different from the initialization configuration. ++ * In the scene of virtio-blk, resources will be alloced at the function `ssam_blk_controller_set_vqueue`. ++ * Therefore, the original resources need to be released before negotiation with the host end. ++ */ ++ for (i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ if (pf[i].pf_funcid == UINT16_MAX || pf[i].pf_type != SSAM_DEVICE_VIRTIO_BLK) { ++ continue; ++ } ++ rc = ssam_virtio_blk_release_resource(i); ++ if (rc != 0) { ++ SPDK_WARNLOG("virtio blk release vq failed.\n"); ++ } ++ } ++ ++ for (i = 0; i < SSAM_HOSTEP_NUM_MAX; i++) { ++ if (pf[i].pf_funcid == UINT16_MAX) { ++ continue; ++ } ++ rc = ssam_virtio_config_get(&pf[i], &cfg); ++ if (rc != 0) { ++ return rc; ++ } ++ rc = ssam_setup_pf(&pf[i], &cfg); ++ if (rc != 0) { ++ return rc; ++ } ++ } ++ ++ return rc; ++} ++ ++static int ++ssam_virtio_init(void) ++{ ++ struct ssam_lib_args ssam_args = { 0 }; ++ struct ssam_hostep_info *ep_info = &g_ssam_config.ep_info; ++ int rc; ++ ++ ssam_get_ssam_lib_init_config(&ssam_args); ++ ++ rc = ssam_lib_init(&ssam_args, ep_info); ++ if (rc != 0) { ++ SPDK_ERRLOG("Failed to init ssam:%s\n", spdk_strerror(-rc)); ++ return rc; ++ } ++ ++ rc = ssam_virtio_config_init(ep_info); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam virtio device init failed:%s\n", spdk_strerror(-rc)); ++ if (ssam_lib_exit() != 0) { ++ SPDK_WARNLOG("ssam lib exit failed\n"); ++ } ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_user_config_default(void) ++{ ++ struct ssam_user_config *user_config = &g_ssam_config.user_config; ++ ++ user_config->mempool_size = SSAM_JSON_DEFAULT_MEMPOOL_SIZE; ++ /** ++ * If file param json file is not exist, queue number will be ++ * set default value SPDK_SSAM_DEFAULT_VQUEUES when user create controller. ++ */ ++ user_config->queues = SPDK_SSAM_DEFAULT_VQUEUES; ++ user_config->dma_queue_num = SSAM_DMA_QUEUE_NUM_DEFAULT; ++ user_config->mode = NULL; ++ user_config->hash_mode = SSAM_VQ_HASH_MODE; ++ ++ return -ENOENT; ++} ++ ++static int ++ssam_user_config_file_read(const char *config_file, size_t *file_len, ++ void **json, ssize_t *value_size) ++{ ++ FILE *read_json = fopen(config_file, "r"); ++ ssize_t ret; ++ void *end = NULL; ++ ++ if (read_json == NULL) { ++ if (errno != ENOENT) { ++ SPDK_ERRLOG("Read JSON configuration file \"%s\" failed\n", config_file); ++ return -1; ++ } ++ SPDK_WARNLOG("JSON config file:%s does not exist! Use default configuration.\n", ++ config_file); ++ return ssam_user_config_default(); ++ } ++ ++ void *load = spdk_posix_file_load(read_json, file_len); ++ fclose(read_json); ++ if (load == NULL) { ++ return -1; ++ } ++ ++ ret = spdk_json_parse(load, *file_len, NULL, 0, &end, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS); ++ if (ret < 0) { ++ SPDK_ERRLOG("Parsing JSON configuration file \"%s\" failed (%zd)\n", config_file, ret); ++ free(load); ++ load = NULL; ++ if (ret == -ENOENT) { /* json file exists, but content is null */ ++ SPDK_ERRLOG("json file exists, but content is null\n"); ++ ret = -1; ++ } ++ return ret; ++ } ++ *json = load; ++ *value_size = ret; ++ ++ return 0; ++} ++ ++static void ++ssam_user_config_free(struct ssam_user_config *user_config) ++{ ++ if (user_config->mode != NULL) { ++ free(user_config->mode); ++ user_config->mode = NULL; ++ } ++} ++ ++static int ++ssam_user_config_parse(size_t file_len, void *json, ssize_t value_size) ++{ ++ struct spdk_json_val *value; ++ struct ssam_user_config *user_config = &g_ssam_config.user_config; ++ ssize_t ret; ++ void *end = NULL; ++ int rc; ++ ++ value = calloc(value_size, sizeof(struct spdk_json_val)); ++ if (value == NULL) { ++ SPDK_ERRLOG("Out of memory\n"); ++ free(json); ++ return -ENOMEM; ++ } ++ ++ ret = spdk_json_parse(json, file_len, value, value_size, &end, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS); ++ if (ret != value_size) { ++ SPDK_ERRLOG("Parsing JSON configuration file failed\n"); ++ free(json); ++ free(value); ++ return -1; ++ } ++ ++ /* resolve json values to struct spdk_ssam_json_config */ ++ ++ rc = spdk_json_decode_object(value, g_ssam_user_config_decoders, ++ SPDK_COUNTOF(g_ssam_user_config_decoders), user_config); ++ free(json); ++ free(value); ++ if (rc != 0) { ++ SPDK_ERRLOG("decode object failed:%s\n", spdk_strerror(-rc)); ++ ssam_user_config_free(user_config); ++ return -1; ++ } ++ user_config->hash_mode = SSAM_VQ_HASH_MODE; ++ ++ return 0; ++} ++ ++static int ++ssam_user_config_check(void) ++{ ++ struct ssam_user_config *user_config = &g_ssam_config.user_config; ++ ++ if (user_config->mempool_size < SSAM_JSON_DEFAULT_MEMPOOL_SIZE) { ++ SPDK_ERRLOG("mempool_size_mb value in file %s out of range, need larger or equal than %u MB, actually %u MB.\n", ++ user_config->cfg_file_name, SSAM_JSON_DEFAULT_MEMPOOL_SIZE, user_config->mempool_size); ++ return -EINVAL; ++ } ++ ++ if (user_config->mempool_size > SSAM_JSON_MAX_MEMPOOL_SIZE) { ++ SPDK_ERRLOG("mempool_size_mb value in file %s out of range, need less or equal than %u MB, actually %u MB.\n", ++ user_config->cfg_file_name, SSAM_JSON_MAX_MEMPOOL_SIZE, user_config->mempool_size); ++ return -EINVAL; ++ } ++ ++ if (user_config->queues > SPDK_SSAM_MAX_VQUEUES) { ++ SPDK_ERRLOG("queues value in file %s out of range, need less or equal than %u, actually %u\n", ++ user_config->cfg_file_name, SPDK_SSAM_MAX_VQUEUES, user_config->queues); ++ return -EINVAL; ++ } ++ ++ if (user_config->queues == 0) { ++ SPDK_ERRLOG("queues value in file %s out of range, need not equal to 0\n", ++ user_config->cfg_file_name); ++ return -EINVAL; ++ } ++ ++ user_config->dma_queue_num = ssam_get_dma_queue_num_by_mode(); ++ if (user_config->dma_queue_num == SSAM_DMA_QUEUE_NUM_DISABLE) { ++ SPDK_ERRLOG("Invalid mode in file %s, which should be chosen from default, small-IO, large-IO, " ++ "actually %s\n", ++ user_config->mode, ssam_rc_get_param_json_file_path()); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++int ++spdk_ssam_user_config_init(void) ++{ ++ size_t file_len = 0; ++ void *json = NULL; ++ ssize_t value_size = 0; ++ int rc; ++ struct ssam_user_config *user_config = &g_ssam_config.user_config; ++ ++ user_config->cfg_file_name = ssam_rc_get_param_json_file_path(); ++ rc = ssam_user_config_file_read(user_config->cfg_file_name, &file_len, &json, &value_size); ++ if (rc != 0) { ++ if (rc == -ENOENT) { ++ return 0; ++ } ++ return rc; ++ } ++ ++ rc = ssam_user_config_parse(file_len, json, value_size); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ rc = ssam_user_config_check(); ++ if (rc != 0) { ++ ssam_user_config_free(&g_ssam_config.user_config); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_virtio_exit(void) ++{ ++ int rc; ++ ++ rc = ssam_lib_exit(); ++ if (rc != 0) { ++ SPDK_WARNLOG("ssam lib exit failed\n"); ++ } ++} ++ ++int ++ssam_config_init(void) ++{ ++ int rc; ++ ++ rc = ssam_virtio_init(); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return 0; ++} ++ ++void ++ssam_config_exit(void) ++{ ++ ssam_virtio_exit(); ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_config) +diff --git a/lib/ssam/ssam_config.h b/lib/ssam/ssam_config.h +new file mode 100644 +index 0000000..86b9f3d +--- /dev/null ++++ b/lib/ssam/ssam_config.h +@@ -0,0 +1,25 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#ifndef SSAM_CONFIG_H ++#define SSAM_CONFIG_H ++ ++int ssam_set_core_num(uint32_t core_num); ++ ++uint16_t ssam_get_core_num(void); ++ ++uint32_t ssam_get_mempool_size(void); ++ ++uint16_t ssam_get_queues(void); ++ ++uint8_t ssam_get_hash_mode(void); ++ ++enum ssam_device_type ssam_get_virtio_type(uint16_t gfunc_id); ++ ++int ssam_config_init(void); ++ ++void ssam_config_exit(void); ++ ++#endif /* SSAM_CONFIG_H */ +diff --git a/lib/ssam/ssam_device_pcie.c b/lib/ssam/ssam_device_pcie.c +new file mode 100644 +index 0000000..3b34934 +--- /dev/null ++++ b/lib/ssam/ssam_device_pcie.c +@@ -0,0 +1,223 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "spdk/string.h" ++#include "spdk/file.h" ++#include "ssam_internal.h" ++ ++#define SSAM_KEY_MAX_LEN 16 ++#define SSAM_TYPE_MAX_LEN 12 ++#define SSAM_DBDF_MAX_LEN 16 ++ ++struct ssam_device_pcie_info { ++ uint32_t func_id; ++ char type[SSAM_TYPE_MAX_LEN]; ++ char dbdf[SSAM_DBDF_MAX_LEN]; ++}; ++ ++struct ssam_device_pcie_list { ++ uint32_t size; ++ struct ssam_device_pcie_info *device_pcie_list; ++}; ++ ++static struct ssam_device_pcie_list g_ssam_device_pcie_list = { ++ .size = 0, ++ .device_pcie_list = NULL, ++}; ++ ++void ++ssam_deinit_device_pcie_list(void) ++{ ++ if (g_ssam_device_pcie_list.device_pcie_list != NULL) { ++ free(g_ssam_device_pcie_list.device_pcie_list); ++ g_ssam_device_pcie_list.device_pcie_list = NULL; ++ } ++} ++ ++static int ++ssam_alloc_device_pcie_list(struct spdk_json_val *values, size_t num_values) ++{ ++ size_t i; ++ uint32_t size = 0; ++ ++ for (i = 0; i < num_values; i++) { ++ if (values[i].type == SPDK_JSON_VAL_OBJECT_END) { ++ size++; ++ } ++ } ++ ++ if (g_ssam_device_pcie_list.device_pcie_list == NULL) { ++ g_ssam_device_pcie_list.size = size; ++ g_ssam_device_pcie_list.device_pcie_list = calloc(size, sizeof(struct ssam_device_pcie_info)); ++ if (g_ssam_device_pcie_list.device_pcie_list == NULL) { ++ SPDK_ERRLOG("Unable to allocate enough memory for device_pcie_list\n"); ++ return -ENOMEM; ++ } ++ } ++ return 0; ++} ++ ++static void ++ssam_set_device_pcie_index(struct spdk_json_val *value, uint32_t cur_index) ++{ ++ char val[16]; ++ uint32_t gfunc_id; ++ if (value->type != SPDK_JSON_VAL_NUMBER || value->len > 5) { ++ SPDK_ERRLOG("device pcie gfunc id is invalid, type: %u, len: %u\n", value->type, value->len); ++ return; ++ } ++ ++ memset(val, 0, 16); ++ memcpy(val, value->start, value->len); ++ gfunc_id = spdk_strtol(val, 10); ++ if (gfunc_id >= SPDK_INVALID_GFUNC_ID) { ++ SPDK_ERRLOG("device pcie gfunc id(%u) is more than %u\n", gfunc_id, SPDK_INVALID_GFUNC_ID); ++ return; ++ } ++ g_ssam_device_pcie_list.device_pcie_list[cur_index].func_id = gfunc_id; ++} ++ ++static void ++ssam_set_device_pcie_dbdf(struct spdk_json_val *value, uint32_t cur_index) ++{ ++ if (value->type != SPDK_JSON_VAL_STRING || value->len >= SSAM_DBDF_MAX_LEN) { ++ SPDK_ERRLOG("device pcie dbdf is invalid, type: %u, len: %u\n", value->type, value->len); ++ return; ++ } ++ ++ memset(g_ssam_device_pcie_list.device_pcie_list[cur_index].dbdf, 0, SSAM_DBDF_MAX_LEN); ++ memcpy(g_ssam_device_pcie_list.device_pcie_list[cur_index].dbdf, value->start, value->len); ++} ++ ++static void ++ssam_set_device_pcie_type(struct spdk_json_val *value, uint32_t cur_index) ++{ ++ if (value->type != SPDK_JSON_VAL_STRING || value->len >= SSAM_TYPE_MAX_LEN) { ++ SPDK_ERRLOG("device pcie type is invalid, type: %u, len: %u\n", value->type, value->len); ++ return; ++ } ++ ++ memset(g_ssam_device_pcie_list.device_pcie_list[cur_index].type, 0, SSAM_TYPE_MAX_LEN); ++ memcpy(g_ssam_device_pcie_list.device_pcie_list[cur_index].type, value->start, value->len); ++} ++ ++static void ++ssam_init_device_pcie_list_by_values(struct spdk_json_val *values, size_t num_values) ++{ ++ char key[SSAM_KEY_MAX_LEN]; ++ uint32_t cur_index = 0; ++ size_t i; ++ ++ for (i = 0; i < num_values; i++) { ++ if (values[i].type == SPDK_JSON_VAL_OBJECT_END) { ++ cur_index++; ++ } ++ if (values[i].type != SPDK_JSON_VAL_NAME || values[i].len >= SSAM_KEY_MAX_LEN) { ++ continue; ++ } ++ ++ memset(key, 0, SSAM_KEY_MAX_LEN); ++ memcpy(key, values[i].start, values[i].len); ++ ++ /* point to val */ ++ i++; ++ ++ if (strcmp(key, "index") == 0) { ++ ssam_set_device_pcie_index(&values[i], cur_index); ++ } else if (strcmp(key, "dbdf") == 0) { ++ ssam_set_device_pcie_dbdf(&values[i], cur_index); ++ } else if (strcmp(key, "type") == 0) { ++ ssam_set_device_pcie_type(&values[i], cur_index); ++ } ++ } ++} ++ ++int ++ssam_init_device_pcie_list(void) ++{ ++ FILE *fp = NULL; ++ void *buf = NULL; ++ ssize_t rc = 0; ++ size_t size; ++ size_t num_values; ++ struct spdk_json_val *values = NULL; ++ ++ fp = popen("dpak-smi info -t device_pcie_list -f storage", "r"); ++ if (fp == NULL) { ++ SPDK_ERRLOG("execute dpak-smi failed\n"); ++ return -EINVAL; ++ } ++ ++ buf = spdk_posix_file_load(fp, &size); ++ if (buf == NULL) { ++ SPDK_ERRLOG("get size of json failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_parse(buf, size, NULL, 0, NULL, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS); ++ if (rc < 0) { ++ SPDK_ERRLOG("dpak-smi error: %s\n", (char *)buf); ++ goto invalid; ++ } ++ num_values = (size_t)rc; ++ values = calloc(num_values, sizeof(*values)); ++ if (values == NULL) { ++ SPDK_ERRLOG("Unable to allocate enough memory for values\n"); ++ rc = -ENOMEM; ++ goto invalid; ++ } ++ ++ rc = spdk_json_parse(buf, size, values, num_values, NULL, ++ SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS | SPDK_JSON_PARSE_FLAG_DECODE_IN_PLACE); ++ if (rc <= 0) { ++ SPDK_ERRLOG("parse json to values failed\n"); ++ goto invalid; ++ } ++ ++ rc = ssam_alloc_device_pcie_list(values, num_values); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_init_device_pcie_list_by_values(values, num_values); ++ rc = 0; ++ ++invalid: ++ if (values != NULL) { ++ free(values); ++ values = NULL; ++ } ++ if (buf != NULL) { ++ free(buf); ++ buf = NULL; ++ } ++ if (fp != NULL) { ++ pclose(fp); ++ fp = NULL; ++ } ++ return rc; ++} ++ ++void ++ssam_dump_device_pcie_list(struct spdk_json_write_ctx *w) ++{ ++ uint32_t i; ++ spdk_json_write_named_array_begin(w, "device_pcie_list"); ++ for (i = 0; i < g_ssam_device_pcie_list.size; i++) { ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_uint32(w, "index", g_ssam_device_pcie_list.device_pcie_list[i].func_id); ++ spdk_json_write_named_string(w, "dbdf", g_ssam_device_pcie_list.device_pcie_list[i].dbdf); ++ spdk_json_write_named_string(w, "type", g_ssam_device_pcie_list.device_pcie_list[i].type); ++ spdk_json_write_object_end(w); ++ } ++ spdk_json_write_array_end(w); ++} ++ ++uint32_t ++ssam_get_device_pcie_list_size(void) ++{ ++ return g_ssam_device_pcie_list.size; ++} +diff --git a/lib/ssam/ssam_driver/dpak_ssam.h b/lib/ssam/ssam_driver/dpak_ssam.h +new file mode 100644 +index 0000000..3b832bc +--- /dev/null ++++ b/lib/ssam/ssam_driver/dpak_ssam.h +@@ -0,0 +1,559 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#ifndef DPAK_SSAM_H ++#define DPAK_SSAM_H ++ ++#include "spdk/stdinc.h" ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#define SSAM_HOSTEP_NUM_MAX 32 ++#define SSAM_MAX_REQ_POLL_SIZE 16 ++#define SSAM_MAX_RESP_POLL_SIZE 10 ++#define SSAM_VIRTIO_HEAD_LEN 64 ++#define SSAM_DEV_CFG_MAX_LEN 60 ++#define SSAM_DBDF_STR_MAX_LEN 13 ++#define SSAM_MB (uint64_t)(1 << 20) ++#define SSAM_SERVER_NAME "ssam" ++ ++enum ssam_device_type { ++ SSAM_DEVICE_NVME = 0, /* NVMe device */ ++ SSAM_DEVICE_VIRTIO_BLK = 2, /* virtio-blk device */ ++ SSAM_DEVICE_VIRTIO_SCSI = 3, /* virtio-scsi device */ ++ SSAM_DEVICE_VIRTIO_FS = 5, /* virtio-fs device */ ++ SSAM_DEVICE_VIRTIO_MAX = 6 /* virtio device type upper boundary */ ++}; ++ ++enum ssam_mount_type { ++ SSAM_MOUNT_DUMMY = 0, /* mount virtio to dummy function */ ++ SSAM_MOUNT_NORMAL /* mount virtio to normal function */ ++}; ++ ++enum ssam_function_mount_status { ++ SSAM_MOUNT_OK, /* mount ok */ ++ SSAM_MOUNT_VOLUME_NOT_FOUND, /* mount volume not found */ ++ SSAM_MOUNT_PARAMETERS_ERROR, /* mount parameter error */ ++ SSAM_MOUNT_UNKNOWN_ERROR /* unknow error */ ++}; ++ ++enum ssam_io_type { ++ SSAM_VIRTIO_BLK_IO = 2, /* virtio-blk IO */ ++ SSAM_VIRTIO_SCSI_IO, /* virtio-scsi normal IO */ ++ SSAM_VIRTIO_SCSI_CTRL, /* virtio-scsi control IO */ ++ SSAM_VIRTIO_SCSI_EVT, /* virtio-scsi event IO */ ++ SSAM_VIRTIO_VSOCK_IO, /* virtio-vsock IO */ ++ SSAM_VIRTIO_VSOCK_EVT, /* virtio-vsock event */ ++ SSAM_VIRTIO_FUNC_STATUS, /* virtio function status change */ ++ SSAM_VIRTIO_FS_IO, /* virtio-fs normal IO */ ++ SSAM_VIRTIO_FS_HIPRI, /* virtio-fs high priority IO */ ++ SSAM_VIRTIO_TYPE_RSVD, /* virtio type rsvd */ ++}; ++ ++enum ssam_io_status { ++ SSAM_IO_STATUS_OK, /* ok */ ++ SSAM_IO_STATUS_EMPTY, /* poll return empty */ ++ SSAM_IO_STATUS_ERROR /* error */ ++}; ++ ++enum ssam_function_action { ++ SSAM_FUNCTION_ACTION_START, /* start */ ++ SSAM_FUNCTION_ACTION_STOP, /* stop */ ++ SSAM_FUNCTION_ACTION_RESET, /* reset */ ++ SSAM_FUNCTION_ACTION_CONFIG_CHANGE, /* config change report */ ++ SSAM_FUNCTION_ACTION_SCSI_EVENT, /* SCSI event report */ ++ SSAM_FUNCTION_ACTION_MAX ++}; ++ ++enum ssam_function_status { ++ SSAM_FUNCTION_STATUS_START, /* start */ ++ SSAM_FUNCTION_STATUS_STOP, /* stop */ ++ SSAM_FUNCTION_EVENT_MIGRATE /* migrate */ ++}; ++ ++enum data_request_dma_type { ++ SSAM_REQUEST_DATA_LOAD = 0, /* load data from host->CPU DDR */ ++ SSAM_REQUEST_DATA_STORE = 1, /* store data frome CPU DDR->host */ ++ SSAM_REQUEST_DATA_MAX ++}; ++ ++struct ssam_melem { ++ void *addr; /* virtual address */ ++ uint64_t iova; /* IO address */ ++ uint64_t page_sz; /* page size of underlying memory */ ++ int socket_id; /* NUMA socket ID */ ++ int rsvd; ++}; ++ ++enum ssam_blk_hash_mode { ++ SSAM_PF_HASH_MODE = 0, ++ SSAM_VQ_HASH_MODE, ++ SSAM_IO_HASH_MODE, ++}; ++ ++struct ssam_lib_args { ++ uint8_t role; /* reserved */ ++ uint8_t core_num; /* core num that polled by SPDK thread */ ++ uint8_t dma_queue_num; /* host dma queue num per channel */ ++ uint8_t hash_mode; /* hash mode: BLK:0-1bits SCSI:2-3bits FS:4-5bits NVMe:6-7bits */ ++ uint8_t rsvd[32]; /* for rsvd */ ++ /* register DPDK function rte_malloc_heap_alloc */ ++ int (*ssam_heap_malloc)(const char *type, size_t size, ++ int socket_arg, unsigned int flags, size_t align, ++ size_t bound, bool contig, struct ssam_melem *mem); ++ int (*ssam_heap_free)(void *addr); /* register DPDK function rte_malloc_heap_free */ ++}; ++ ++struct ssam_pf_list { ++ uint16_t pf_funcid; /* pf_funcid = -1 means invalid */ ++ uint16_t pf_type; /* refer to enum ssam_device_type */ ++ uint16_t vf_funcid_start; /* the start function id of vf */ ++ uint16_t vf_num; /* the number of vf that have been configured */ ++ uint16_t vf_max; /* the max number of vf that can be configured */ ++}; ++ ++/* the host side all pf/vf end point info */ ++struct ssam_hostep_info { ++ struct ssam_pf_list host_pf_list[SSAM_HOSTEP_NUM_MAX]; ++}; ++ ++struct ssam_virtio_config { ++ uint64_t device_feature; /* the virtio device feature */ ++ uint16_t queue_num; /* the queue number of virtio device */ ++ uint16_t config_len; /* the actual length of device_config */ ++ uint8_t device_config[SSAM_DEV_CFG_MAX_LEN]; /* the virtio device configure */ ++ uint16_t queue_size; ++ uint16_t rx_queue_id; ++}; ++ ++/* ssam function config */ ++struct ssam_function_config { ++ int gfunc_id; /* pf or vf funcion id */ ++ enum ssam_device_type type; /* pf or vf type */ ++ struct ssam_virtio_config virtio_config; /* pf or vf configure */ ++}; ++ ++struct ssam_virt_request { ++ uint16_t vq_idx; ++ uint16_t req_idx; ++}; ++ ++struct ssam_nvme_request { ++ void *data; ++}; ++ ++struct ssam_io_message { ++ uint32_t header_len; /* io header length */ ++ uint8_t header[SSAM_VIRTIO_HEAD_LEN]; /* refer to struct virtio_blk_outhdr */ ++ uint32_t iovcnt; /* io vector count */ ++ struct iovec *iovs; /* io vectors, max 1MB IO */ ++ uint8_t writable; /* 0 : write io, 1 : read io */ ++ uint8_t rsvd[3]; /* for byte alignment */ ++ union { ++ struct ssam_virt_request virtio; ++ struct ssam_nvme_request nvme; ++ }; ++}; ++ ++/** ++ * @brief function event structure ++ */ ++struct ssam_func_event { ++ enum ssam_function_status status; /* function status */ ++ uint32_t data; /* virtio version: 0--v0.95 1--v1.0 2--v1.1 */ ++}; ++ ++struct ssam_request { ++ uint16_t gfunc_id; /* function id vf id number */ ++ uint16_t rsvd; ++ uint32_t iocb_id; /* response need */ ++ enum ssam_io_type type; ++ union { ++ struct ssam_io_message cmd; /* VMIO command structure */ ++ struct ssam_func_event event; /* report function event */ ++ } req; ++ enum ssam_io_status status; /* request status */ ++ uint32_t flr_seq; /* response need */ ++}; ++ ++struct ssam_request_poll_opt { ++ struct iovec ++ *sge1_iov; /**< output for req->req.cmd.iovs[1] (per VMIO req). Actual data length set in iov_len */ ++ uint16_t queue_id; /**< (optional) poll a queue id instead of using 'tid' parameter to calculate the queue */ ++ uint8_t rsvd[54]; ++}; ++ ++struct ssam_virtio_res { ++ struct iovec *iovs; /* rsp io vectors */ ++ void *rsp; /* data of rsp */ ++ uint32_t rsp_len; /* length of rsp */ ++ uint32_t iovcnt; /* rsp vector count */ ++}; ++ ++struct ssam_io_response { ++ uint16_t gfunc_id; /* global function id in chip */ ++ uint16_t rsvd; ++ uint32_t iocb_id; /* copy from struct ssam_request */ ++ struct ssam_virtio_res data; ++ struct ssam_request *req; /* corresponding to struct vmio_request */ ++ enum ssam_io_status status; /* IO status, copy from struct ssam_request */ ++ uint32_t flr_seq; /* copy from struct ssam_request */ ++}; ++ ++struct ssam_dma_request { ++ uint16_t gfunc_id; ++ uint16_t direction; ++ uint32_t flr_seq; ++ uint32_t src_num; /* source sge number */ ++ uint32_t dst_num; /* dest sge number */ ++ struct iovec *src; /* source buffer address, gpa mode */ ++ struct iovec *dst; /* dest buffer address, va mode */ ++ uint32_t data_len; ++ void *cb; ++}; ++ ++struct ssam_dma_rsp { ++ void *cb; ++ uint32_t status; /* process status, 0--OK, 1--ERR */ ++ uint32_t last_flag; /* data copy finish until receive this last flag */ ++}; ++ ++struct memory_info_stats { ++ size_t total_size; /* Total bytes of mempool */ ++ size_t free_size; /* Total free bytes of mempool */ ++ size_t greatest_free_size; /* Size in bytes of largest free block */ ++ unsigned free_count; /* Number of free elements of mempool */ ++ unsigned alloc_count; /* Number of allocated elements of mempool */ ++ size_t used_size; /* Total allocated bytes of mempool */ ++}; ++ ++/** ++ * Init ssam lib, set ssam work mode, set core num, set functions, get host pf/vf endpoint info. ++ * ++ * \param args_in input work mode, core num, functions. ++ * \param eps_out output host pf/vf endpoint info. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_lib_init(struct ssam_lib_args *args_in, struct ssam_hostep_info *eps_out); ++ ++/** ++ * Exit ssam lib when not use ssam any more. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_lib_exit(void); ++ ++typedef void ssam_mempool_t; ++ ++/** ++ * Create the memory pool, the memory is allocated by spdk_dma_malloc. ++ * ++ * \param size the memory pool size. ++ * \param extra_size_limit the memory size that can alloc in addition to the memory pool ++ * ++ * \return a pointer to memory pool when succeed or null when failed ++ */ ++ssam_mempool_t *ssam_mempool_create(uint64_t size, uint64_t extra_size_limit); ++ ++/** ++ * Allocate one piece of memory from the memory pool. ++ * ++ * \param mp the memory pool. ++ * \param size the memory size that want to allocate. ++ * \param phys_addr save the physical address of the allocated memory, ++ * if allocate failed, will not change the value. ++ * ++ * \return the allocated memory's start virtual address when succeed or null when failed ++ */ ++void *ssam_mempool_alloc(ssam_mempool_t *mp, uint64_t size, uint64_t *phys_addr); ++ ++/** ++ * Free the memory back to the memory pool. ++ * ++ * \param mp the memory pool. ++ * \param ptr the memory virtual address that return by ssam_mempool_alloc. ++ */ ++void ssam_mempool_free(ssam_mempool_t *mp, void *ptr); ++ ++/** ++ * Destroy the memory pool, when this done, the memory pool cannot be used again. ++ * ++ * \param mp the memory pool. ++ */ ++void ssam_mempool_destroy(ssam_mempool_t *mp); ++ ++/** ++ * get the memory pool info status. ++ * ++ * \param mp the memory pool. ++ * \param info the mempool info status. ++ */ ++int ssam_get_mempool_info(ssam_mempool_t *mp, struct memory_info_stats *info); ++ ++/** ++ * ssam recover module preinit. ++ * ++ * \return 0 for succeed, 1 for config file exist, and less then 0 for failed. ++ */ ++int spdk_ssam_rc_preinit(void); ++ ++/** ++ * Get recover json file path. ++ * ++ * \return a file path string ++ */ ++char *ssam_rc_get_recover_json_file_path(void); ++ ++/** ++ * Get parameter json file path. ++ * ++ * \return a file path string ++ */ ++char *ssam_rc_get_param_json_file_path(void); ++ ++/** ++ * Initialize PF (include all VFs belong to this PF) to specific device type. ++ * The interface must be called with increasing pf_id. The function is not ++ * visible to host after init. ++ * ++ * \param pf_id PF function id. ++ * \param num_vf number of VFs of the PF. ++ * \param dev_type PF/VF type. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_setup_function(uint16_t pf_id, uint16_t num_vf, enum ssam_device_type dev_type); ++ ++/** ++ * Change specific device config. ++ * ++ * \param cfg new device configuration data. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_write_function_config(struct ssam_function_config *cfg); ++ ++/** ++ * send action to function. Invoked by SPDK. ++ * ++ * \param gfunc_id the global function index of the chip ++ * \param action the action to take on the function ++ * \param data extra action data if used ++ * \param data_len extra action data len ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_send_action(uint16_t gfunc_id, enum ssam_function_action action, const void *data, ++ uint16_t data_len); ++ ++/** ++ * Mount ssam volume, synchronous interface. ++ * ++ * \param gfunc_id the global function id of chip. ++ * \param lun_id the lun id of this volume. ++ * \param type mount type, refer to enum ssam_mount_type. ++ * \param tid it's used as the request queue id per CPU core. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_function_mount(uint16_t gfunc_id, uint32_t lun_id, enum ssam_mount_type type, ++ uint16_t tid); ++ ++/** ++ * Umount ssam volume, synchronous interface. ++ * ++ * \param gfunc_id the global function id of chip. ++ * \param lun_id the lun id of this volume. ++ * ++ * \return refer to enum ssam_function_mount_status ++ */ ++int ssam_function_umount(uint16_t gfunc_id, uint32_t lun_id); ++ ++/** ++ * Poll request queue for ssam request. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param poll_num the number of ssam request that want to be polled. ++ * \param io_req output for received request, the buffer is allocated by ssam, ++ * and released when IO complete. ++ * ++ * \return the number of vmio has been polled, less than 0 or bigger than poll_num for failed ++ */ ++int ssam_request_poll(uint16_t tid, uint16_t poll_num, struct ssam_request **io_req); ++ ++/** ++ * Poll request queue for ssam request. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param poll_num the number of ssam request that want to be polled. ++ * \param io_req output for received request, the buffer is allocated by ssam, ++ * and released when IO complete. ++ * \param poll_opt (optional) extra poll options. ++ * ++ * \return the number of vmio has been polled, less than 0 or bigger than poll_num for failed ++ */ ++int ssam_request_poll_ext(uint16_t tid, uint16_t poll_num, struct ssam_request **io_req, ++ struct ssam_request_poll_opt *poll_opt); ++ ++/** ++ * Request ssam data. Hardware will load or store data betweent host and CPU. ++ * Asynchronous interface. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param dma_req request data is here. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_dma_data_request(uint16_t tid, struct ssam_dma_request *dma_req); ++ ++/** ++ * Poll ssam request data. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param poll_num the number of ssam request that want to be polled. ++ * \param dma_rsp response data is here. ++ * ++ * \return the number of msg rsp has been polled, less than 0 or bigger than poll_num for failed ++ */ ++int ssam_dma_rsp_poll(uint16_t tid, uint16_t poll_num, struct ssam_dma_rsp *dma_rsp); ++ ++/** ++ * Send IO complete info to ssam request queue. ++ * ++ * \param tid it's used as the request queue id per CPU core. ++ * \param resp response info is here. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_io_complete(uint16_t tid, struct ssam_io_response *resp); ++ ++/** ++ * Create vmio rx queue ++ * ++ * \param queue_id_out id of the queue create ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_vmio_rxq_create(uint16_t *queue_id_out); ++ ++/** ++ * Update virtio device used or not. ++ * ++ * \param glb_function_id the global function index of the chip ++ * \param device_used virtio device is used or not ++ * ++ * \return 0: success -1: fail, internal error, others: fail, refer to errno.h ++ */ ++int ssam_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used); ++ ++/** ++ * release virtio blk vq resource. ++ * ++ * \param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * ++ * \return 0: success -1: fail, internal error, others: fail, refer to errno.h ++ */ ++int ssam_virtio_blk_release_resource(uint16_t glb_function_id); ++ ++/** ++ * alloc virtio blk vq resource. ++ * ++ * \param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * \param queue_num number of vq ++ * ++ * \return 0: success -1: fail, internal error, others: fail, refer to errno.h ++ */ ++int ssam_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num); ++ ++/** ++ * Update virtio blk capacity. ++ * ++ * \param gfunc_id the global function index of the chip. ++ * \param capacity the new capacity. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_virtio_blk_resize(uint16_t gfunc_id, uint64_t capacity); ++ ++/** ++ * Vq bind core. ++ * ++ * \param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * \param queue_num the num of vqueue ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_virtio_vq_bind_core(uint16_t glb_function_id, uint16_t queue_num); ++ ++/** ++ * Vq unbind core. ++ * ++ * \param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_virtio_vq_unbind_core(uint16_t glb_function_id); ++ ++/** ++ * Get global function id by dbdf. ++ * ++ * \param dbdf the combine of domain bus device function. ++ * \param gfunc_id the global function index of the chip. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_get_funcid_by_dbdf(uint32_t dbdf, uint16_t *gfunc_id); ++ ++/** ++ * Convert dbdf from string format to number. ++ * ++ * \param str source dbdf string. ++ * \param dbdf store result. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_dbdf_str2num(char *str, uint32_t *dbdf); ++ ++/** ++ * Convert dbdf from number format to string. ++ * ++ * \param dbdf source dbdf number. ++ * \param str store result. ++ * \param len the str buffer length. ++ * ++ * \return 0 for succeed or not 0 for failed ++ */ ++int ssam_dbdf_num2str(uint32_t dbdf, char *str, size_t len); ++ ++/** ++ * @brief check device ready ++ * @param role 0--old process; 1--new process ++ * @param proc_type enum proc_type, supoort PROC_TYPE_VBS and PROC_TYPE_BOOT ++ * @param ready output_para 0--not ready, 1--ready ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int ssam_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready); ++ ++/** ++ * @brief get hot upgrade state ++ * @param void ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int ssam_get_hot_upgrade_state(void); ++ ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* DPAK_SSAM_H */ +diff --git a/lib/ssam/ssam_driver/hivio_api.h b/lib/ssam/ssam_driver/hivio_api.h +new file mode 100644 +index 0000000..106465f +--- /dev/null ++++ b/lib/ssam/ssam_driver/hivio_api.h +@@ -0,0 +1,668 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#ifndef HIVIO_API_H ++#define HIVIO_API_H ++ ++#include "spdk/stdinc.h" ++ ++#define MEM_ALLOC_SGE_NUM_MAX 512 ++ ++/** ++ * @brief memory descriptor for hvio_mem_alloc. ++ */ ++typedef struct mem_desc { ++ uint32_t size; /* *< mem array size */ ++ struct { ++ uint64_t virt; /* *< virtual address */ ++ uint64_t phys; /* *< physical address */ ++ uint32_t len; /* *< length */ ++ } mem[MEM_ALLOC_SGE_NUM_MAX]; ++} mem_desc_s; ++ ++/** ++ * @brief memory descriptor for hvio_heap_malloc. ++ */ ++struct hvio_melem { ++ void *addr; /**< virtual address */ ++ uint64_t iova; /**< IO address */ ++ uint64_t page_sz; /**< page size of underlying memory */ ++ int socket_id; /**< NUMA socket ID */ ++ int rsvd; ++}; ++ ++/** ++ * @brief memory-related callbacks. ++ */ ++typedef struct hvio_callback_ops { ++ int (*hvio_heap_malloc)(const char *type, size_t size, int socket_arg, unsigned int flags, ++ size_t align, size_t bound, bool contig, ++ struct hvio_melem *mem); /* register rte_malloc_heap_alloc */ ++ int (*hvio_heap_free)(void *addr); /* register rte_malloc_heap_free */ ++ int (*hvio_mem_alloc)(uint32_t size, int phy_contig, ++ mem_desc_s *mem_desc); /* register dma_mem_alloc function */ ++ int (*hvio_mem_free)(void *virt); /* register dma_mem_free function */ ++} hvio_callback_ops_s; ++ ++/** ++ * @brief proc type definition ++ */ ++enum proc_type { ++ PROC_TYPE_VBS = 0, ++ PROC_TYPE_BOOT, ++ PROC_TYPE_MIGTORBO, ++ PROC_TYPE_MAX ++}; ++ ++enum hivio_blk_hash_mode { ++ HVIO_PF_HASH_MODE = 0, ++ HVIO_VQ_HASH_MODE, ++ HVIO_IO_HASH_MODE, ++}; ++ ++/** ++ * @brief hivio_lib initialize parameters ++ */ ++typedef struct hvio_lib_args { ++ uint8_t role; /**< 0--old process; 1--new process */ ++ uint8_t core_num; /**< core num that polled by SPDK thread */ ++ hvio_callback_ops_s cb_ops; /**< memory-related callbacks */ ++ uint32_t proc_type; /**< enum proc_type */ ++ uint8_t host_dma_chnl_num; /**< host dma channel number, used for migtorbo multi chan process */ ++ uint8_t host_dma_mp_per_chnl; /**< host dma mempool per channel, 0: disable mp per channel, 1: enable */ ++ uint8_t host_dma_queue_per_chnl; /**< host dma queue num per channel, 0: disabled-defalt 1, max: 4 */ ++ uint8_t hash_mode; /**< HASH MODE: BLK:0-1bits SCSI:2-3bits FS:4-5bits NVMe:6-7bits */ ++ uint8_t rsvd[56]; /**< for rsvd */ ++} hvio_lib_args_s; ++ ++#define HVIO_HOSTEP_NUM_MAX 32 ++ ++/** ++ * @brief host side storage pf/vf end point info ++ */ ++typedef struct hvio_hostep_info { ++ struct { ++ uint16_t pf_funcid; /* *< pf_funcid = 0xffff means invalid */ ++ uint16_t pf_type; /* *< is config or not */ ++ uint16_t vf_funcid_start; ++ uint16_t vf_num; /* *< already config vf num */ ++ uint16_t vf_max; /* *< max num can be config */ ++ } host_pf_list[HVIO_HOSTEP_NUM_MAX]; ++} hvio_hostep_info_s; ++ ++/** ++ * @brief device type definition ++ */ ++enum device_type { ++ DEVICE_NVME, /* *< NVMe device */ ++ DEVICE_VIRTIO_NET, /* *< VirtIO-net device */ ++ DEVICE_VIRTIO_BLK, /* *< VirtIO-blk device */ ++ DEVICE_VIRTIO_SCSI, /* *< VirtIO-scsi device */ ++ DEVICE_VIRTIO_VSOCK, /* *< VirtIO-vsock device */ ++ DEVICE_VIRTIO_FS, /**< VirtIO-FS device */ ++ DEVICE_VIRTIO_MAX /* *< VirtIO-max device */ ++}; ++ ++/** ++ * @brief configration type definition ++ */ ++ ++struct function_config { ++ uint32_t function_id; ++ enum device_type type; ++ union { ++ struct { ++ uint64_t device_feature; ++ uint16_t queue_num; ++ uint16_t config_len; ++ uint8_t device_config[60]; ++ uint16_t queue_size; ++ uint16_t rx_queue_id; ++ } virtio; ++ } config; ++}; ++ ++/** ++ * @brief EP operation definition. ++ */ ++enum function_action { ++ FUNCTION_ACTION_START, /* *< start */ ++ FUNCTION_ACTION_STOP, /* *< stop */ ++ FUNCTION_ACTION_RESET, /* *< reset */ ++ FUNCTION_ACTION_CONFIG_CHANGE, /* *< config change report */ ++ FUNCTION_ACTION_SCSI_EVENT, /* *< SCSI event report */ ++ FUNCTION_ACTION_MAX ++}; ++ ++/** ++ * @brief EP function status definition. ++ */ ++enum function_status { ++ FUNCTION_STATUS_START, /* *< start */ ++ FUNCTION_STATUS_STOP, /* *< stop */ ++ FUNCTION_EVENT_MIGRATE, /* *< migrate */ ++}; ++ ++/** ++ * @brief VMIO type definition, support nvme and virtio. ++ */ ++enum vmio_type { ++ VMIO_TYPE_NVME_IO, /* *< NVMe normal IO */ ++ VMIO_TYPE_NVME_ADMIN, /* *< NVMe admin IO */ ++ VMIO_TYPE_VIRTIO_BLK_IO, /* *< VirtIO blk IO */ ++ VMIO_TYPE_VIRTIO_SCSI_IO, /* *< VirtIO scsi normal IO */ ++ VMIO_TYPE_VIRTIO_SCSI_CTRL, /* *< VirtIO scsi IO */ ++ VMIO_TYPE_VIRTIO_SCSI_EVT, /* *< VirtIO scsi event */ ++ VMIO_TYPE_VIRTIO_VSOCK_IO, /* *< VirtIO vsock IO */ ++ VMIO_TYPE_VIRTIO_VSOCK_EVT, /* *< VirtIO vsock event */ ++ VMIO_TYPE_VIRTIO_FUNC_STATUS, /* *< VirtIO function status change */ ++ VMIO_TYPE_VIRTIO_FS_IO, /* *< VirtIO fs normal IO */ ++ VMIO_TYPE_VIRTIO_FS_HIPRI, /* *< VirtIO fs high priority IO */ ++ VMIO_TYPE_RSVD, /* *< VMIO type rsvd */ ++}; ++ ++struct virtio_req { ++ uint16_t vq_idx; /* *< vq idx */ ++ uint16_t req_idx; /* *< head desc idx of io */ ++}; ++ ++struct nvme_req { ++ void *data; /* *< nvme admin input data */ ++}; ++ ++/** ++ * @brief VMIO cmd structure. ++ */ ++struct vmio_cmd { ++ uint32_t cmd_len; /* *< length of VMIO command, fixed to 64B */ ++ uint8_t cmd[64]; /* *< the specific format according to vmio_type */ ++ ++ uint32_t iovcnt; /* *< io vector count */ ++ struct iovec *iovs; /* *< io vectors, max 1MB IO */ ++ uint8_t writable; /* *< 2nd desc->write_flag */ ++ uint8_t rsvd[3]; /* *< rsvd */ ++ union { ++ struct virtio_req virtio; ++ struct nvme_req nvme; ++ }; ++}; ++ ++/** ++ * @brief function event structure. ++ */ ++struct func_event { ++ enum function_status status; /* *< function status */ ++ uint32_t data; /* *< VirtIO version: 0--v0.95; 1--v1.0; 2--v1.1 */ ++}; ++ ++/** ++ * @brief VMIO status definition. ++ */ ++enum vmio_status { ++ VMIO_STATUS_OK, /* *< ok */ ++ VMIO_STATUS_VQ_EMPTY, /* *< VQ empty */ ++ VMIO_STATUS_ERROR, /* *< error */ ++ VMIO_STATUS_DRIVER_NOT_OK, /* *< frontend driver not ready */ ++ VMIO_STATUS_VQ_ENGN_NOT_EN, /* *< backend vq not ready */ ++ VMIO_STATUS_DMA_IO_ERROR, /* *< frontend dma access error */ ++ VMIO_STATUS_VQ_SOURCE_ERROR, /* *< VQ cache source error */ ++ VMIO_STATUS_VQ_ERROR /* *< frontend vq status error */ ++}; ++ ++/** ++ * @brief VMIO request structure. ++ */ ++struct vmio_request { ++ uint16_t glb_function_id; /* *< global function id in chip */ ++ uint16_t nvme_sq_id; /* *< sq_id in iocb for NVMe vmio */ ++ uint32_t iocb_id; /* *< io control block id for ucode */ ++ enum vmio_type type; /* *< VMIO type to parse the req format */ ++ union { ++ struct vmio_cmd cmd; /* *< VMIO command structure */ ++ struct func_event event; /* *< report function event */ ++ } req; ++ enum vmio_status status; /* *< when flr occurs, set status to error */ ++ uint32_t flr_seq; /* *< check whether VMIO is from VF which FLR occurs */ ++}; ++ ++typedef struct tag_nvme_cqe { ++ uint32_t cmd_spec; ++ uint32_t rsvd; ++ ++ uint32_t sq_hd : 16; ++ uint32_t sq_id : 16; ++ ++ uint32_t cmd_id : 16; ++ uint32_t p : 1; ++ uint32_t status : 15; ++} nvme_cqe_s; ++ ++/** ++ * @brief NVMe response structure ++ */ ++struct nvme_response { ++ nvme_cqe_s nvme_cqe; ++ ++ uint32_t rsp_len; /* *< rsp length */ ++ uint32_t iovcnt; /* *< rsp vector count */ ++ struct iovec *iovs; /* *< rsp io vectors */ ++ void *rsp; /* *< rsp data */ ++}; ++ ++/** ++ * @brief VirtIO response structure ++ */ ++struct virtio_response { ++ uint32_t used_len; /* *< length of data has been upload to VM */ ++ uint32_t rsp_len; /* *< length of rsp */ ++ uint32_t iovcnt; /* *< rsp vector count */ ++ struct iovec *iovs; /* *< rsp io vectors */ ++ void *rsp; /* *< data of rsp */ ++}; ++ ++/** ++ * @brief VMIO response structure ++ */ ++struct vmio_response { ++ uint16_t glb_function_id; /* *< global function id in chip */ ++ uint16_t rsvd0; /* *< make sure nvme and virtio offset is 16B aligned */ ++ uint32_t iocb_id; /* *< io control block id used by ucode */ ++ enum vmio_type type; /* *< VMIO type */ ++ uint32_t rsvd1; /* make sure nvme and virtio offset is 16B aligned */ ++ ++ union { ++ struct nvme_response nvme; /* *< nvme rsp structure */ ++ struct virtio_response virtio; /* *< virtio rsp structure */ ++ }; ++ ++ struct vmio_request *req; /* *< corresponding vmio_request */ ++ enum vmio_status status; /* *< VMIO status, copy from vmio_request */ ++ uint32_t flr_seq; /* *< copy from vmio_request */ ++}; ++ ++/** ++ * @brief data structrue for send action request. ++ */ ++typedef struct hvio_send_action_req { ++ uint16_t glb_function_id; /**< global function id in chip */ ++ uint16_t data_len; /**< length of request's payload */ ++ void *data; /**< request's payload */ ++ enum function_action action; /**< action type */ ++} hvio_send_action_req_s; ++ ++/** ++ * @brief data structrue for VMIO send request(destination is virtio RQ). ++ */ ++typedef struct hvio_vmio_send_req { ++ uint64_t cb; /**< callback info */ ++ uint16_t glb_function_id; /**< global function id in chip */ ++ uint16_t vqn; /**< function inner vq idx */ ++ uint32_t sge_num; /**< data sge number */ ++ struct iovec *data; /**< data buffer address, gpa mode, including virtio_hdr and payload */ ++ uint32_t data_len; /**< data len, including virtio_hdr len and payload len. */ ++ enum vmio_type type; /**< vmio type */ ++} hvio_vmio_send_req_s; ++ ++/** ++ * @brief data structrue for ACK of VMIO send request(destination is virtio RQ). ++ */ ++typedef struct hvio_vmio_send_rsp { ++ uint64_t cb; /**< callback info */ ++ uint32_t status; /**< refer to enum vmio_status */ ++} hvio_vmio_send_rsp_s; ++ ++/** ++ * @brief data structrue for rsp of vsock recovery. ++ */ ++typedef struct hvio_vsock_recovery_rsp { ++ uint16_t tx_used_idx; /* *< virtio vsock txq used idx */ ++ uint16_t rx_used_idx; /* *< virtio vsock rxq used idx */ ++} hvio_vsock_recovery_rsp_s; ++ ++/** ++ * @brief host_dma direction. ++ */ ++enum hvio_host_dma_mode { ++ READ_HOST_MODE = 0, /**< read host data and write to SPU */ ++ WRITE_HOST_MODE = 1, /**< write data to host */ ++ HOST_DMA_MODE_MAX ++}; ++ ++/** ++ * @brief data structrue for host dma request. ++ */ ++typedef struct hvio_host_dma_req { ++ uint16_t glb_function_id; /**< VM global function id */ ++ uint16_t direction; /**< host dma direction, format is enum hvio_host_dma_mode */ ++ uint32_t flr_seq; /**< check whether the vmio copy request is a leaked request when flr occurs */ ++ uint32_t ssge_num; /**< source sge number */ ++ uint32_t dsge_num; /**< dest sge number */ ++ struct iovec *src; /**< source buffer address, gpa. host buf for read, ddr for write. */ ++ struct iovec *dst; /**< dest buffer address, gpa. ddr for read, host buf for write */ ++ uint32_t data_len; /**< length for load or store */ ++ void *cb; /**< callback info */ ++} hvio_host_dma_req_s; ++ ++/** ++ * @brief data structrue for ACK of host dma request. ++ */ ++typedef struct hvio_host_dma_rsp { ++ void *cb; /**< SPDK callback info */ ++ uint32_t status; /**< 0 OK, 1 ERROR */ ++ uint32_t last_flag; ++} hvio_host_dma_rsp_s; ++ ++/** ++ * @brief data structrue for hivio stats. ++ */ ++ ++typedef struct hvio_info_stats { ++ uint64_t vmio_req; ++ uint64_t vmio_rsp; ++ ++ uint64_t vsock_tx_req; ++ uint64_t vscok_tx_rsp; ++ uint64_t vsock_rx_req; ++ uint64_t vsock_rx_rsp; ++ ++ uint64_t host_dma_req; ++ uint64_t host_dma_sub_req; ++ uint64_t host_dma_rsp; ++ ++ uint64_t update_blk_cap; ++ uint64_t send_action; ++ ++ uint64_t rsvd[16]; ++} hvio_info_stats_s; ++ ++typedef struct hvio_warn_stats { ++ uint64_t invalid_vmio; ++ uint64_t vsock_rx_rsp_status_abnormal; ++ uint64_t host_dma_rsp_status_abnormal; ++ ++ uint64_t rsvd[16]; ++} hvio_warn_stats_s; ++ ++typedef struct hvio_error_stats { ++ uint64_t update_blk_cap_fail; ++ uint64_t send_action_fail; ++ uint64_t vmio_rsp_fail; ++ uint64_t vsock_tx_fail; ++ uint64_t vsock_rx_fail; ++ uint64_t host_dma_req_fail; ++ ++ uint64_t rsvd[16]; ++} hvio_error_stats_s; ++ ++typedef struct hivio_func_ctx_read_rsp { ++ uint8_t device_type; ++ uint8_t device_status; ++ uint16_t num_queues; ++ uint8_t flr_status; ++ uint8_t rsvd0[3]; ++ uint32_t device_feature_l; ++ uint32_t device_feature_h; ++ uint32_t driver_feature_l; ++ uint32_t driver_feature_h; ++ uint32_t rsvd1[26]; ++} hivio_func_ctx_read_rsp_s; ++ ++struct hvio_mount_para { ++ uint32_t algo_type; /* *< VBS:algorithm 0 or 1; IPU:0--dummy; 1--normal */ ++ uint32_t key[3]; /* *< 0 for rsvd. VBS:key[0] tree_id, key[1] pt_num, key[2] blk_size */ ++}; ++ ++/** ++ * @brief hivio initialization function ++ * @param args_in initialization parameters input ++ * @param eps_out host side ep info ouput ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_lib_init(hvio_lib_args_s *args_in, hvio_hostep_info_s *eps_out); ++ ++/** ++ * @brief hivbs de-initialize function. ++ * @param void ++ * @return ++ * - 0: success ++ */ ++int hvio_lib_deinit(void); ++ ++/** ++ * @brief update virtio blk capacity. ++ * @param glb_function_id the global function index of the chip ++ * @param capacity new capacity ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_update_virtio_blk_capacity(uint16_t glb_function_id, uint64_t capacity); ++ ++/** ++ * @brief poll RQ for VMIO request. ++ * @param tid It's used as the L2NIC RQ id per SPU core. ++ * @param poll_num the number of msg rsp want to be polled ++ * @param req output for received request. The buffer is allocated by hivbs, and used by SPDK. Release when IO complete. ++ * @return ++ * - >=0: the number of vmio_request has been polled ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_vmio_req_poll_batch(uint16_t tid, uint16_t poll_num, struct vmio_request **req); ++ ++/** ++ * @brief hvio_vmio_req_poll_batch_ext extra poll options ++ */ ++typedef struct hvio_vmio_req_poll_opt { ++ struct iovec ++ *sge1_iov; /**< output for req->req.cmd.iovs[1] (per VMIO req). Actual data length set in iov_len */ ++ uint16_t queue_id; /**< (optional) poll a queue id instead of using 'tid' parameter to calculate the queue */ ++ uint8_t rsvd[54]; ++} hvio_vmio_req_poll_opt_s; ++ ++/** ++ * @brief poll RQ for VMIO request, together with the contents of req->req.cmd.iovs[1]. ++ * @param tid It's used as the L2NIC RQ id per SPU core. ++ * @param poll_num the number of msg rsp want to be polled, if the poll_num > 16, the actual poll num is 16. ++ * @param req output for received request. The buffer is allocated by hivbs, and used by SPDK. Release when IO complete. ++ * @param poll_opt (optional) extra poll options. ++ * @return ++ * - >=0: the number of vmio_request has been polled ++ * - <0: fail, refer to errno.h ++ * @note req->req.cmd.writable will be used to specify the first writable index in req->req.cmd.iovs. ++ */ ++int hvio_vmio_req_poll_batch_ext(uint16_t tid, uint16_t poll_num, struct vmio_request **req, ++ hvio_vmio_req_poll_opt_s *poll_opt); ++ ++/** ++ * @brief send VMIO complete to SQ. ++ * @param tid It's used as the L2NIC SQ id per SPU core. ++ * @param resp VMIO response ++ * @return ++ * - 0: success ++ * - others: fail, refer to errno.h ++ */ ++int hvio_vmio_complete(uint16_t tid, struct vmio_response *resp); ++ ++/** ++ * @brief create vmio rx queue ++ * @param queue_id_out id of the queue create ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_vmio_rxq_create(uint16_t *queue_id_out); ++ ++/** ++ * @brief initialize PF (include all VFs belong to this PF) to specific device type. For virtio device of the PF and VF ++ * can be set to different virtio device_type. The interface must be called with increasing pf_id. The function is not ++ * visible to host after init. ++ * @param pf_id PF id ++ * @param num_vf number of VFs of the PF, they use the same type ++ * @param pf_type pf device type ++ * @param vf_type vf device type ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_setup_function(uint16_t pf_id, uint16_t num_vf, enum device_type pf_type, ++ enum device_type vf_type); ++ ++/** ++ * @brief change specific device config. ++ * @param cfg new device configuration data ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_write_function_config(struct function_config *cfg); ++ ++/** ++ * @brief get global function index by pcie device dbdf info. ++ * @param dbdf pcie device dbdf info(input para) ++ * @param glb_function_id the global function index of the chip(output para) ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_get_glb_function_id_by_dbdf(uint32_t dbdf, uint16_t *glb_function_id); ++ ++/** ++ * @brief send action to function, synchronous interface. ++ * @param req send action request ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_send_action(uint16_t glb_function_id, enum function_action action, const void *data, ++ uint16_t data_len); ++ ++/** ++ * @brief DMA request. hw will load or store data between X86 host and spu ddr, asynchronous interface. ++ * @param chnl_id is associated with L2NIC SQ ID. ++ * @param req host dma request ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_host_dma_request(uint16_t chnl_id, hvio_host_dma_req_s *req); ++ ++/** ++ * @brief poll RQ for dma response status. device provides DMA response in the same order with DMA request. ++ * @param chnl_id is associated with L2NIC RQ ID. ++ * @param poll_num the number of rsp want to be polled. ++ * @param[out] rsp output for received response. ++ * @return ++ * - >=0: the number of host dma rsp has been polled ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_host_dma_rsp_poll(uint16_t chnl_id, uint16_t poll_num, hvio_host_dma_rsp_s *rsp); ++ ++union hvio_nvme_config_cmd_info { ++ uint32_t cmd[5]; ++}; ++ ++/** ++ * @brief get hot upgrade state ++ * @param void ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_get_hot_upgrade_state(void); ++ ++/** ++ * @brief check device ready ++ * @param role 0--old process; 1--new process ++ * @param proc_type enum proc_type, supoort PROC_TYPE_VBS and PROC_TYPE_BOOT ++ * @param ready output_para 0--not ready, 1--ready ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready); ++ ++/** ++ * @brief mount VIO volume, synchronous interface. Invoked by VIO. ++ * @param glb_function_id the global function id of chip ++ * @param lun_id the lun id of this volume ++ * @param hash_paras hash item paras ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_volume_mount(uint16_t glb_function_id, uint32_t lun_id, ++ struct hvio_mount_para *hash_paras); ++ ++/** ++ * @brief umount VIO volume, synchronous interface. Invoked by VIO. ++ * @param glb_function_id the global function id of chip ++ * @param lun_id the lun id of this volume ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_volume_umount(uint16_t glb_function_id, uint32_t lun_id); ++ ++/** ++ * @brief update virtio device used or not. ++ * @param glb_function_id the global function index of the chip ++ * @param device_used virtio device is used or not ++ * @return ++ * - 0: success ++ * - -1: fail, internal error ++ * - others: fail, refer to errno.h ++ */ ++int hvio_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used); ++ ++/** ++ * @brief release virtio blk vq resource. ++ * @param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_virtio_blk_release_resource(uint16_t glb_function_id); ++ ++/** ++ * @brief alloc virtio blk vq resource. ++ * @param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num); ++ ++/** ++ * @brief vq bind core. ++ * @param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * @param queue_num the num of vqueue ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_virtio_vq_bind_core(uint16_t glb_function_id, uint16_t queue_num); ++ ++/** ++ * @brief vq unbind core. ++ * @param glb_function_id the global function index of the chip, the related function is virtio_blk ++ * @return ++ * - 0: success ++ * - <0: fail, refer to errno.h ++ */ ++int hvio_virtio_vq_unbind_core(uint16_t glb_function_id); ++ ++#endif /* HIVIO_API_H */ +diff --git a/lib/ssam/ssam_driver/ssam_dbdf.c b/lib/ssam/ssam_driver/ssam_dbdf.c +new file mode 100644 +index 0000000..63a4d09 +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_dbdf.c +@@ -0,0 +1,315 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "spdk/stdinc.h" ++ ++#include "spdk/log.h" ++#include "dpak_ssam.h" ++ ++#define SSAM_DBDF_DOMAIN_OFFSET 16 ++#define SSAM_DBDF_BUS_OFFSET 8 ++#define SSAM_DBDF_DEVICE_OFFSET 3 ++#define SSAM_DBDF_FUNC_OFFSET 0x7 ++#define SSAM_DBDF_DOMAIN_MAX 0xffff ++#define SSAM_DBDF_BUS_MAX 0xff ++#define SSAM_DBDF_DEVICE_MAX 0x1f ++#define SSAM_DBDF_FUNCTION_MAX 0x7 ++#define SSAM_DBDF_DOMAIN_MAX_LEN 4 ++#define SSAM_DBDF_BD_MAX_LEN 2 ++#define SSAM_DBDF_FUNCTION_MAX_LEN 1 ++#define SSAM_DBDF_MAX_STR_LEN 20 ++#define SSAM_STR_CONVERT_HEX 16 ++ ++ ++struct ssam_dbdf { ++ uint32_t domain; ++ uint32_t bus; ++ uint32_t device; ++ uint32_t function; ++}; ++ ++static int ++ssam_dbdf_cvt_str2num(char *input, uint16_t val_limit, uint32_t len_limit, ++ uint32_t *num_resolved) ++{ ++ char *end_ptr = NULL; ++ long int val = strtol(input, &end_ptr, SSAM_STR_CONVERT_HEX); ++ ++ if (strlen(input) > len_limit) { ++ return -EINVAL; ++ } ++ ++ if (end_ptr == NULL || end_ptr == input || *end_ptr != '\0') { ++ return -EINVAL; ++ } ++ if (val < 0 || val > val_limit) { ++ return -EINVAL; ++ } ++ ++ *num_resolved = (uint32_t)val; ++ return 0; ++} ++ ++/* resolve dbdf's domain */ ++static int ++ssam_dbdf_cvt_dom(char *str, struct ssam_dbdf *dbdf, ++ char **bus) ++{ ++ char *colon2 = NULL; ++ int rc; ++ ++ /* find second ":" from dbdf string */ ++ colon2 = strchr(str, ':'); ++ if (colon2 != NULL) { ++ *colon2++ = 0; ++ *bus = colon2; ++ if (str[0] != 0) { ++ /* convert domain number */ ++ rc = ssam_dbdf_cvt_str2num(str, SSAM_DBDF_DOMAIN_MAX, ++ SSAM_DBDF_DOMAIN_MAX_LEN, &dbdf->domain); ++ if (rc != 0) { ++ SPDK_ERRLOG("Invalid characters of domain number!\n"); ++ return rc; ++ } ++ } else { ++ SPDK_ERRLOG("domain number is blank!\n"); ++ return -EINVAL; ++ } ++ } else { ++ /* dbdf string does not contain domain number */ ++ *bus = str; ++ } ++ ++ return 0; ++} ++ ++/* resolve dbdf's bus */ ++static int ++ssam_dbdf_cvt_b(struct ssam_dbdf *dbdf, char *bus) ++{ ++ int rc; ++ ++ if (bus[0] != 0) { ++ /* convert bus number */ ++ rc = ssam_dbdf_cvt_str2num(bus, SSAM_DBDF_BUS_MAX, ++ SSAM_DBDF_BD_MAX_LEN, &dbdf->bus); ++ if (rc != 0) { ++ SPDK_ERRLOG("Invalid characters of bus number!\n"); ++ return rc; ++ } ++ } else { ++ SPDK_ERRLOG("bus number is blank!\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* resolve dbdf's domain and bus part */ ++static int ++ssam_dbdf_cvt_domb(char *str, struct ssam_dbdf *dbdf, ++ char **colon_input, char **mid_input) ++{ ++ char *bus = NULL; ++ char *colon = *colon_input; ++ int rc; ++ ++ *colon++ = 0; ++ *mid_input = colon; ++ rc = ssam_dbdf_cvt_dom(str, dbdf, &bus); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return ssam_dbdf_cvt_b(dbdf, bus); ++} ++ ++/* resolve dbdf's device */ ++static int ++ssam_dbdf_cvt_dev(struct ssam_dbdf *dbdf, char *mid) ++{ ++ int rc; ++ ++ if (mid[0] != 0) { ++ /* convert device number */ ++ rc = ssam_dbdf_cvt_str2num(mid, SSAM_DBDF_DEVICE_MAX, ++ SSAM_DBDF_BD_MAX_LEN, &dbdf->device); ++ if (rc != 0) { ++ SPDK_ERRLOG("Invalid characters of device number!\n"); ++ return rc; ++ } ++ } else { ++ SPDK_ERRLOG("device number is blank!\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_dbdf_cvt_f(struct ssam_dbdf *dbdf, char *dot) ++{ ++ int rc; ++ ++ if (dot != NULL && dot[0] != 0) { ++ /* convert function number */ ++ rc = ssam_dbdf_cvt_str2num(dot, SSAM_DBDF_FUNCTION_MAX, ++ SSAM_DBDF_FUNCTION_MAX_LEN, &dbdf->function); ++ if (rc != 0) { ++ SPDK_ERRLOG("Invalid characters of function number!\n"); ++ return rc; ++ } ++ } else { ++ SPDK_ERRLOG("function number is blank!\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* resolve dbdf's device and function part */ ++static int ++ssam_dbdf_cvt_devf(struct ssam_dbdf *dbdf, char **dot_input, char **mid_input) ++{ ++ char *dot = *dot_input; ++ int rc; ++ ++ if (dot != NULL) { ++ *dot++ = 0; ++ } else { ++ /* Input dbdf string does not contain "." */ ++ SPDK_ERRLOG("Invalid DBDF format\n"); ++ return -1; ++ } ++ ++ rc = ssam_dbdf_cvt_dev(dbdf, *mid_input); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return ssam_dbdf_cvt_f(dbdf, dot); ++} ++ ++static uint32_t ++ssam_dbdf_assemble(const struct ssam_dbdf *dbdf) ++{ ++ return ((dbdf->domain << SSAM_DBDF_DOMAIN_OFFSET) | ++ (dbdf->bus << SSAM_DBDF_BUS_OFFSET) | ++ (dbdf->device << SSAM_DBDF_DEVICE_OFFSET) | ++ (dbdf->function & SSAM_DBDF_FUNC_OFFSET)); ++} ++ ++static int ++ssam_dbdf_cvt_dbdf(char *str, size_t len, uint32_t *dbdf) ++{ ++ if (dbdf == NULL) { ++ SPDK_ERRLOG("dbdf is null\n"); ++ return -1; ++ } ++ /* find ":" from dbdf string */ ++ char *colon = strrchr(str, ':'); ++ /* find "." from dbdf string */ ++ char *dot = NULL; ++ char *mid = str; ++ int rc; ++ struct ssam_dbdf st_dbdf = {0}; ++ ++ if (colon != NULL) { ++ rc = ssam_dbdf_cvt_domb(str, &st_dbdf, &colon, &mid); ++ if (rc != 0) { ++ return rc; ++ } ++ } else { ++ /* Input dbdf string does not contain ":" */ ++ SPDK_ERRLOG("Invalid DBDF format\n"); ++ return -EINVAL; ++ } ++ ++ dot = strchr((colon ? (colon + 1) : str), '.'); ++ rc = ssam_dbdf_cvt_devf(&st_dbdf, &dot, &mid); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ *dbdf = ssam_dbdf_assemble(&st_dbdf); ++ ++ return 0; ++} ++ ++/* convert dbdf from string to number */ ++int ++ssam_dbdf_str2num(char *str, uint32_t *dbdf) ++{ ++ int len; ++ char *dbdf_str = NULL; ++ int ret; ++ ++ if (str == NULL) { ++ SPDK_ERRLOG("dbdf str2num input str null!\n"); ++ return -EINVAL; ++ } ++ ++ if (dbdf == NULL) { ++ SPDK_ERRLOG("dbdf str2num output dbdf null!\n"); ++ return -EINVAL; ++ } ++ ++ len = strlen(str); ++ if (len == 0 || len > SSAM_DBDF_MAX_STR_LEN) { ++ SPDK_ERRLOG("dbdf str2num len %u error!\n", len); ++ return -ERANGE; ++ } ++ ++ dbdf_str = (char *)malloc(len + 1); ++ if (dbdf_str == NULL) { ++ return -ENOMEM; ++ } ++ ++ ret = snprintf(dbdf_str, len + 1, "%s", str); ++ if ((ret > len) || (ret <= 0)) { ++ SPDK_ERRLOG("dbdf str2num snprintf_s error\n"); ++ free(dbdf_str); ++ return -EINVAL; ++ } ++ ++ ret = ssam_dbdf_cvt_dbdf(dbdf_str, len, dbdf); ++ free(dbdf_str); ++ dbdf_str = NULL; ++ ++ return ret; ++} ++ ++static void ++ssam_dbdf_num2struct(uint32_t dbdf, struct ssam_dbdf *st_dbdf) ++{ ++ st_dbdf->domain = (dbdf >> SSAM_DBDF_DOMAIN_OFFSET) & SSAM_DBDF_DOMAIN_MAX; ++ st_dbdf->bus = (dbdf >> SSAM_DBDF_BUS_OFFSET) & SSAM_DBDF_BUS_MAX; ++ st_dbdf->device = (dbdf >> SSAM_DBDF_DEVICE_OFFSET) & SSAM_DBDF_DEVICE_MAX; ++ st_dbdf->function = dbdf & SSAM_DBDF_FUNCTION_MAX; ++ return; ++} ++ ++int ++ssam_dbdf_num2str(uint32_t dbdf, char *str, size_t len) ++{ ++ int ret; ++ struct ssam_dbdf st_dbdf = {0}; ++ ++ if (str == NULL) { ++ SPDK_ERRLOG("dbdf num2str output str null!\n"); ++ return -EINVAL; ++ } ++ ++ ssam_dbdf_num2struct(dbdf, &st_dbdf); ++ ++ ret = snprintf(str, len - 1, "%04x:%02x:%02x.%x", ++ st_dbdf.domain, st_dbdf.bus, st_dbdf.device, st_dbdf.function); ++ if ((ret >= (int)(len - 1)) || (ret <= 0)) { ++ SPDK_ERRLOG("dbdf num2str error\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} +diff --git a/lib/ssam/ssam_driver/ssam_driver.c b/lib/ssam/ssam_driver/ssam_driver.c +new file mode 100644 +index 0000000..028a083 +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_driver.c +@@ -0,0 +1,444 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "spdk/stdinc.h" ++#include "spdk/log.h" ++#include "ssam_driver_adapter.h" ++#include "dpak_ssam.h" ++ ++#define SSAM_DRV_PRIORITY_LAST 65535 ++#define VIRTIO_F_NOTIFICATION_DATA (1UL << 38) ++#define SSAM_DPAK_DIR "/etc/dpak/" ++#define SSAM_CFG_DIR SSAM_DPAK_DIR SSAM_SERVER_NAME "/" ++#define SSAM_RECOVER_CFG_JSON SSAM_CFG_DIR "recover.json" ++#define SSAM_PARAM_CFG_JSON SSAM_CFG_DIR "parameter.json" ++#define SSAM_CONFIG_DIR_PERMIT 0750 ++ ++__attribute__((constructor(SSAM_DRV_PRIORITY_LAST))) int ssam_construct(void); ++ ++__attribute__((destructor(SSAM_DRV_PRIORITY_LAST))) void ssam_destruct(void); ++ ++int ++ssam_lib_init(struct ssam_lib_args *args_in, struct ssam_hostep_info *eps_out) ++{ ++ hvio_lib_args_s hvio_args_in; ++ hvio_hostep_info_s *hostep_info = NULL; ++ ++ if (args_in == NULL || eps_out == NULL) { ++ SPDK_ERRLOG("input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ memset(&hvio_args_in, 0, sizeof(hvio_lib_args_s)); ++ hvio_args_in.role = args_in->role; ++ hvio_args_in.core_num = args_in->core_num; ++ hvio_args_in.cb_ops.hvio_heap_malloc = (__typeof__(hvio_args_in.cb_ops.hvio_heap_malloc)) ++ args_in->ssam_heap_malloc; ++ hvio_args_in.cb_ops.hvio_heap_free = args_in->ssam_heap_free; ++ hvio_args_in.host_dma_queue_per_chnl = args_in->dma_queue_num; ++ hvio_args_in.hash_mode = args_in->hash_mode; ++ ++ hostep_info = (hvio_hostep_info_s *)(void *)eps_out; ++ ++ return ssam_drv_lib_init(&hvio_args_in, hostep_info); ++} ++ ++int ++ssam_lib_exit(void) ++{ ++ return ssam_drv_lib_deinit(); ++} ++ ++int ++ssam_setup_function(uint16_t pf_id, uint16_t num_vf, enum ssam_device_type dev_type) ++{ ++ enum device_type type; ++ switch (dev_type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ type = DEVICE_VIRTIO_BLK; ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ type = DEVICE_VIRTIO_SCSI; ++ break; ++ case SSAM_DEVICE_VIRTIO_FS: ++ type = DEVICE_VIRTIO_FS; ++ break; ++ default: ++ type = DEVICE_VIRTIO_MAX; ++ break; ++ } ++ ++ return ssam_drv_setup_function(pf_id, num_vf, type, type); ++} ++ ++int ++ssam_write_function_config(struct ssam_function_config *cfg) ++{ ++ struct function_config hvio_function_cfg; ++ ++ if (cfg == NULL) { ++ SPDK_ERRLOG("libssam input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ if ((cfg->virtio_config.device_feature & VIRTIO_F_NOTIFICATION_DATA) != 0) { ++ SPDK_ERRLOG("Virtio feature is error.\n"); ++ return -EINVAL; ++ } ++ ++ memset(&hvio_function_cfg, 0x0, sizeof(struct function_config)); ++ ++ hvio_function_cfg.function_id = (uint32_t)cfg->gfunc_id; ++ switch (cfg->type) { ++ case SSAM_DEVICE_VIRTIO_BLK: ++ hvio_function_cfg.type = DEVICE_VIRTIO_BLK; ++ break; ++ case SSAM_DEVICE_VIRTIO_SCSI: ++ hvio_function_cfg.type = DEVICE_VIRTIO_SCSI; ++ break; ++ case SSAM_DEVICE_VIRTIO_FS: ++ hvio_function_cfg.type = DEVICE_VIRTIO_FS; ++ break; ++ default: ++ hvio_function_cfg.type = DEVICE_VIRTIO_MAX; ++ break; ++ } ++ ++ memcpy(&hvio_function_cfg.config.virtio, &cfg->virtio_config, sizeof(struct ssam_virtio_config)); ++ return ssam_drv_write_function_config(&hvio_function_cfg); ++} ++ ++int ++ssam_send_action(uint16_t gfunc_id, enum ssam_function_action action, const void *data, ++ uint16_t data_len) ++{ ++ enum function_action func_act; ++ ++ if (data == NULL || data_len == 0) { ++ SPDK_ERRLOG("libssam input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ switch (action) { ++ case SSAM_FUNCTION_ACTION_START: ++ func_act = FUNCTION_ACTION_START; ++ break; ++ ++ case SSAM_FUNCTION_ACTION_STOP: ++ func_act = FUNCTION_ACTION_STOP; ++ break; ++ ++ case SSAM_FUNCTION_ACTION_RESET: ++ func_act = FUNCTION_ACTION_RESET; ++ break; ++ ++ case SSAM_FUNCTION_ACTION_CONFIG_CHANGE: ++ func_act = FUNCTION_ACTION_CONFIG_CHANGE; ++ break; ++ ++ case SSAM_FUNCTION_ACTION_SCSI_EVENT: ++ func_act = FUNCTION_ACTION_SCSI_EVENT; ++ break; ++ ++ default: ++ func_act = FUNCTION_ACTION_MAX; ++ break; ++ } ++ ++ return ssam_drv_send_action(gfunc_id, func_act, data, data_len); ++} ++ ++int ++ssam_function_mount(uint16_t gfunc_id, uint32_t lun_id, enum ssam_mount_type type, uint16_t tid) ++{ ++ struct hvio_mount_para hash_paras; ++ ++ memset(&hash_paras, 0x0, sizeof(struct hvio_mount_para)); ++ ++ hash_paras.algo_type = type; ++ hash_paras.key[0] = tid; ++ ++ return ssam_drv_volume_mount(gfunc_id, lun_id, &hash_paras); ++} ++ ++int ++ssam_function_umount(uint16_t gfunc_id, uint32_t lun_id) ++{ ++ return ssam_drv_volume_umount(gfunc_id, lun_id); ++} ++ ++int ++ssam_request_poll(uint16_t tid, uint16_t poll_num, struct ssam_request **io_req) ++{ ++ if (io_req == NULL || poll_num > SSAM_MAX_REQ_POLL_SIZE) { ++ SPDK_ERRLOG("ssam request poll input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ return ssam_drv_vmio_req_poll_batch(tid, poll_num, (struct vmio_request **)io_req); ++} ++ ++int ++ssam_request_poll_ext(uint16_t tid, uint16_t poll_num, struct ssam_request **io_req, ++ struct ssam_request_poll_opt *poll_opt) ++{ ++ if (io_req == NULL || poll_num > SSAM_MAX_REQ_POLL_SIZE || poll_opt == NULL) { ++ SPDK_ERRLOG("ssam request poll ext input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ hvio_vmio_req_poll_opt_s hvio_poll_opt = { ++ .sge1_iov = poll_opt->sge1_iov, ++ .queue_id = poll_opt->queue_id, ++ }; ++ ++ return ssam_drv_vmio_req_poll_batch_ext(tid, poll_num, (struct vmio_request **)io_req, ++ &hvio_poll_opt); ++} ++ ++int ++ssam_dma_data_request(uint16_t tid, struct ssam_dma_request *dma_req) ++{ ++ if (dma_req == NULL || dma_req->direction >= SSAM_REQUEST_DATA_MAX) { ++ SPDK_ERRLOG("ssam dma request input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ hvio_host_dma_req_s *mode_para = (hvio_host_dma_req_s *)dma_req; ++ ++ return ssam_drv_host_dma_request(tid, mode_para); ++} ++ ++int ++ssam_dma_rsp_poll(uint16_t tid, uint16_t poll_num, struct ssam_dma_rsp *dma_rsp) ++{ ++ if (dma_rsp == NULL || poll_num > SSAM_MAX_RESP_POLL_SIZE) { ++ SPDK_ERRLOG("resp poll input paramter error.\n"); ++ return -EINVAL; ++ } ++ ++ return ssam_drv_host_dma_rsp_poll(tid, poll_num, (hvio_host_dma_rsp_s *)dma_rsp); ++} ++ ++static enum vmio_type ++ssam_io_type_to_vmio(enum ssam_io_type io_type) { ++ enum vmio_type vmio_type; ++ ++ switch (io_type) ++ { ++ case SSAM_VIRTIO_BLK_IO: ++ vmio_type = VMIO_TYPE_VIRTIO_BLK_IO; ++ break; ++ ++ case SSAM_VIRTIO_SCSI_IO: ++ vmio_type = VMIO_TYPE_VIRTIO_SCSI_IO; ++ break; ++ ++ case SSAM_VIRTIO_SCSI_CTRL: ++ vmio_type = VMIO_TYPE_VIRTIO_SCSI_CTRL; ++ break; ++ ++ case SSAM_VIRTIO_SCSI_EVT: ++ vmio_type = VMIO_TYPE_VIRTIO_SCSI_EVT; ++ break; ++ ++ case SSAM_VIRTIO_FUNC_STATUS: ++ vmio_type = VMIO_TYPE_VIRTIO_FUNC_STATUS; ++ break; ++ ++ case SSAM_VIRTIO_FS_IO: ++ vmio_type = VMIO_TYPE_VIRTIO_FS_IO; ++ break; ++ ++ case SSAM_VIRTIO_FS_HIPRI: ++ vmio_type = VMIO_TYPE_VIRTIO_FS_HIPRI; ++ break; ++ ++ default: ++ vmio_type = VMIO_TYPE_RSVD; ++ } ++ ++ return vmio_type; ++} ++ ++int ++ssam_io_complete(uint16_t tid, struct ssam_io_response *resp) ++{ ++ struct vmio_response vmio_res; ++ struct virtio_response *virtio_res = NULL; ++ ++ if (resp == NULL) { ++ SPDK_ERRLOG("ssam io complete input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ memset(&vmio_res, 0x0, sizeof(vmio_res)); ++ vmio_res.glb_function_id = resp->gfunc_id; ++ vmio_res.iocb_id = resp->iocb_id; ++ vmio_res.type = ssam_io_type_to_vmio(resp->req->type); ++ ++ switch (resp->status) { ++ case SSAM_IO_STATUS_OK: ++ vmio_res.status = VMIO_STATUS_OK; ++ break; ++ case SSAM_IO_STATUS_EMPTY: ++ vmio_res.status = VMIO_STATUS_VQ_EMPTY; ++ break; ++ default: ++ vmio_res.status = VMIO_STATUS_ERROR; ++ break; ++ } ++ ++ vmio_res.req = (struct vmio_request *)(void *)resp->req; ++ vmio_res.flr_seq = resp->flr_seq; ++ ++ virtio_res = (struct virtio_response *)&vmio_res.virtio; ++ virtio_res->used_len = 0; /* virtio-blk insensitive of this value, set 0 */ ++ virtio_res->rsp_len = resp->data.rsp_len; ++ virtio_res->iovcnt = resp->data.iovcnt; ++ virtio_res->iovs = resp->data.iovs; ++ virtio_res->rsp = resp->data.rsp; ++ ++ return ssam_drv_vmio_complete(tid, &vmio_res); ++} ++ ++int ++ssam_vmio_rxq_create(uint16_t *queue_id_out) ++{ ++ if (queue_id_out == NULL) { ++ return -EINVAL; ++ } ++ return ssam_drv_vmio_rxq_create(queue_id_out); ++} ++ ++int ++ssam_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used) ++{ ++ return ssam_drv_update_virtio_device_used(glb_function_id, device_used); ++} ++ ++int ++ssam_virtio_blk_resize(uint16_t gfunc_id, uint64_t capacity) ++{ ++ return ssam_drv_update_virtio_blk_capacity(gfunc_id, capacity); ++} ++ ++int ++ssam_get_funcid_by_dbdf(uint32_t dbdf, uint16_t *gfunc_id) ++{ ++ if (gfunc_id == NULL) { ++ SPDK_ERRLOG("libssam input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ return ssam_drv_get_glb_function_id_by_dbdf(dbdf, gfunc_id); ++} ++ ++int ++ssam_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready) ++{ ++ if (ready == NULL) { ++ SPDK_ERRLOG("libssam input paramter error, null pointer.\n"); ++ return -EINVAL; ++ } ++ ++ return ssam_drv_check_device_ready(role, proc_type, ready); ++} ++ ++int ++ssam_get_hot_upgrade_state(void) ++{ ++ return ssam_drv_get_hot_upgrade_state(); ++} ++ ++int ++ssam_virtio_blk_release_resource(uint16_t glb_function_id) ++{ ++ return ssam_drv_virtio_blk_release_resource(glb_function_id); ++} ++ ++int ++ssam_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num) ++{ ++ return ssam_drv_virtio_blk_alloc_resource(glb_function_id, queue_num); ++} ++ ++int ++ssam_virtio_vq_bind_core(uint16_t glb_function_id, uint16_t queue_num) ++{ ++ return ssam_drv_virtio_vq_bind_core(glb_function_id, queue_num); ++} ++ ++int ++ssam_virtio_vq_unbind_core(uint16_t glb_function_id) ++{ ++ return ssam_drv_virtio_vq_unbind_core(glb_function_id); ++} ++ ++static int ++ssam_try_mkdir(const char *dir, mode_t mode) ++{ ++ int rc; ++ ++ rc = mkdir(dir, mode); ++ if (rc < 0 && errno != EEXIST) { ++ SPDK_ERRLOG("ssam try mkdir error, dir: '%s': %s\n", dir, strerror(errno)); ++ return -errno; ++ } ++ return 0; ++} ++ ++int ++spdk_ssam_rc_preinit(void) ++{ ++ int rc; ++ ++ rc = ssam_try_mkdir(SSAM_DPAK_DIR, SSAM_CONFIG_DIR_PERMIT); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ rc = ssam_try_mkdir(SSAM_CFG_DIR, SSAM_CONFIG_DIR_PERMIT); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ if (access(SSAM_RECOVER_CFG_JSON, F_OK) != 0) { ++ return 0; ++ } ++ ++ return 1; ++} ++ ++char * ++ssam_rc_get_recover_json_file_path(void) ++{ ++ return (char *)SSAM_RECOVER_CFG_JSON; ++} ++ ++char * ++ssam_rc_get_param_json_file_path(void) ++{ ++ return (char *)SSAM_PARAM_CFG_JSON; ++} ++ ++__attribute__((constructor(SSAM_DRV_PRIORITY_LAST))) int ++ssam_construct(void) ++{ ++ int ret = ssam_drv_ops_init(); ++ if (ret != 0) { ++ SPDK_ERRLOG("ssam drv ops init failed"); ++ return -1; ++ } ++ ++ SPDK_NOTICELOG("ssam construct finish"); ++ return 0; ++} ++ ++__attribute__((destructor(SSAM_DRV_PRIORITY_LAST))) void ++ssam_destruct(void) ++{ ++ ssam_drv_ops_uninit(); ++} +diff --git a/lib/ssam/ssam_driver/ssam_driver_adapter.c b/lib/ssam/ssam_driver/ssam_driver_adapter.c +new file mode 100644 +index 0000000..713bd80 +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_driver_adapter.c +@@ -0,0 +1,518 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include ++#include "spdk/stdinc.h" ++#include "spdk/log.h" ++#include "ssam_driver_adapter.h" ++ ++#define SSAM_DRV_SHARD_LIBRARY "/usr/lib64/libhivio.so" ++#define SSAM_DRV_FUNC_NO_PTR (-1) ++#define SSAM_DRV_ADD_FUNC(class, name) {#name, (void**)&(class).name} ++#define SSAM_FUNC_PTR_OR_ERR_RET(func, retval) do { \ ++ if ((func) == NULL) \ ++ return retval; \ ++} while (0) ++ ++struct ssam_drv_ops_map { ++ char *name; ++ void **func; ++}; ++ ++static void *g_ssam_drv_handler = NULL; ++static struct ssam_drv_ops g_ssam_drv_ops = { 0 }; ++typedef void (*lib_dlsym_uninit_cb_t)(void); ++static lib_dlsym_uninit_cb_t g_lib_dlsym_uninit_cb = NULL; ++ ++static struct ssam_drv_ops_map g_ssam_drv_ops_map[] = { ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_host_dma_request), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_vmio_req_poll_batch), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_vmio_req_poll_batch_ext), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_lib_deinit), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_volume_umount), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_lib_init), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_volume_mount), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_host_dma_rsp_poll), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_get_glb_function_id_by_dbdf), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_send_action), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_update_virtio_blk_capacity), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_setup_function), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_check_device_ready), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_write_function_config), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_get_hot_upgrade_state), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_vmio_complete), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_vmio_rxq_create), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_update_virtio_device_used), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_virtio_blk_alloc_resource), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_virtio_blk_release_resource), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_virtio_vq_bind_core), ++ SSAM_DRV_ADD_FUNC(g_ssam_drv_ops, hvio_virtio_vq_unbind_core), ++}; ++ ++void ssam_lib_dlsym_uninit_cb_register(lib_dlsym_uninit_cb_t cb); ++ ++ ++struct ssam_drv_ops * ++ssam_get_drv_ops(void) ++{ ++ return &g_ssam_drv_ops; ++} ++ ++static void ++ssam_drv_ops_cb_uninit(void) ++{ ++ if (g_ssam_drv_handler != NULL) { ++ memset(&g_ssam_drv_ops, 0, sizeof(struct ssam_drv_ops)); ++ dlclose(g_ssam_drv_handler); ++ } ++} ++ ++static int ++ssam_drv_ops_init_sub(void *handler, struct ssam_drv_ops_map driver_map[], int size) ++{ ++ for (int index = 0; index < size; index++) { ++ if (*driver_map[index].func != NULL) { ++ continue; ++ } ++ ++ *driver_map[index].func = dlsym(handler, driver_map[index].name); ++ if (*driver_map[index].func == NULL) { ++ SPDK_ERRLOG("%s load func %s fail: %s", SSAM_DRV_SHARD_LIBRARY, driver_map[index].name, dlerror()); ++ return -1; ++ } ++ } ++ return 0; ++} ++ ++void ++ssam_lib_dlsym_uninit_cb_register(lib_dlsym_uninit_cb_t cb) ++{ ++ g_lib_dlsym_uninit_cb = cb; ++} ++ ++int ++ssam_drv_ops_init(void) ++{ ++ int ret = 0; ++ void *handler = dlopen(SSAM_DRV_SHARD_LIBRARY, RTLD_NOW); ++ if (handler == NULL) { ++ SPDK_ERRLOG("%s load err %s\n", SSAM_DRV_SHARD_LIBRARY, dlerror()); ++ return -1; ++ } ++ ++ ret = ssam_drv_ops_init_sub(handler, g_ssam_drv_ops_map, ++ sizeof(g_ssam_drv_ops_map) / sizeof(g_ssam_drv_ops_map[0])); ++ if (ret != 0) { ++ SPDK_ERRLOG("hwoff drv ops init: common api load failed"); ++ dlclose(handler); ++ return -1; ++ } ++ ++ g_ssam_drv_handler = handler; ++ ssam_lib_dlsym_uninit_cb_register(ssam_drv_ops_cb_uninit); ++ ++ return 0; ++} ++ ++void ++ssam_drv_ops_uninit(void) ++{ ++ if (g_lib_dlsym_uninit_cb != NULL) { ++ g_lib_dlsym_uninit_cb(); ++ g_lib_dlsym_uninit_cb = NULL; ++ } ++} ++ ++int ++ssam_drv_host_dma_request(uint16_t chnl_id, hvio_host_dma_req_s *req) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_host_dma_request, SSAM_DRV_FUNC_NO_PTR); ++ ret = ops->hvio_host_dma_request(chnl_id, req); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_vmio_req_poll_batch(uint16_t tid, uint16_t poll_num, struct vmio_request **req) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_vmio_req_poll_batch, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_vmio_req_poll_batch(tid, poll_num, req); ++ if (ret < 0) { ++ SPDK_ERRLOG("hvio_vmio_req_poll_batch exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return ret; ++} ++ ++int ++ssam_drv_vmio_req_poll_batch_ext(uint16_t tid, uint16_t poll_num, struct vmio_request **req, ++ hvio_vmio_req_poll_opt_s *poll_opt) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_vmio_req_poll_batch_ext, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_vmio_req_poll_batch_ext(tid, poll_num, req, poll_opt); ++ if (ret < 0) { ++ SPDK_ERRLOG("hvio_vmio_req_poll_batch_ext exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return ret; ++} ++ ++int ++ssam_drv_lib_deinit(void) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_lib_deinit, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_lib_deinit(); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_lib_deinit exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_volume_umount(uint16_t glb_function_id, uint32_t lun_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_volume_umount, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_volume_umount(glb_function_id, lun_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_volume_umount exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_lib_init(hvio_lib_args_s *args_in, hvio_hostep_info_s *eps_out) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_lib_init, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_lib_init(args_in, eps_out); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_lib_init exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_volume_mount(uint16_t glb_function_id, uint32_t lun_id, struct hvio_mount_para *hash_paras) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_volume_mount, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_volume_mount(glb_function_id, lun_id, hash_paras); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_volume_mount exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_host_dma_rsp_poll(uint16_t chnl_id, uint16_t poll_num, hvio_host_dma_rsp_s *rsp) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_host_dma_rsp_poll, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_host_dma_rsp_poll(chnl_id, poll_num, rsp); ++ if (ret < 0) { ++ SPDK_ERRLOG("hvio_host_dma_rsp_poll exec fail, ret=%d\n", ret); ++ return -1; ++ } ++ ++ return ret; ++} ++ ++int ++ssam_drv_get_glb_function_id_by_dbdf(uint32_t dbdf, uint16_t *glb_function_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_get_glb_function_id_by_dbdf, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_get_glb_function_id_by_dbdf(dbdf, glb_function_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_get_glb_function_id_by_dbdf exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_send_action(uint16_t glb_function_id, enum function_action action, const void *data, ++ uint16_t data_len) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_send_action, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_send_action(glb_function_id, action, data, data_len); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_send_action exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_update_virtio_blk_capacity(uint16_t glb_function_id, uint64_t capacity) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_update_virtio_blk_capacity, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_update_virtio_blk_capacity(glb_function_id, capacity); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_update_virtio_blk_capacity exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_setup_function(uint16_t pf_id, uint16_t num_vf, enum device_type pf_type, ++ enum device_type vf_type) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_setup_function, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_setup_function(pf_id, num_vf, pf_type, vf_type); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_setup_function exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_check_device_ready, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_check_device_ready(role, proc_type, ready); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_check_device_ready exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_write_function_config(struct function_config *cfg) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_write_function_config, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_write_function_config(cfg); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_write_function_config exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_get_hot_upgrade_state(void) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_get_hot_upgrade_state, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_get_hot_upgrade_state(); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_get_hot_upgrade_state exec fail, ret=%d", ret); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_vmio_complete(uint16_t tid, struct vmio_response *resp) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_vmio_complete, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_vmio_complete(tid, resp); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_vmio_rxq_create(uint16_t *queue_id_out) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_vmio_rxq_create, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_vmio_rxq_create(queue_id_out); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_update_virtio_device_used, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_update_virtio_device_used(glb_function_id, device_used); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_virtio_blk_release_resource(uint16_t glb_function_id) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_virtio_blk_release_resource, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_virtio_blk_release_resource(glb_function_id); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num) ++{ ++ int ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_virtio_blk_alloc_resource, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_virtio_blk_alloc_resource(glb_function_id, queue_num); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_virtio_vq_bind_core(uint16_t glb_function_id, uint16_t queue_num) ++{ ++ bool ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_virtio_vq_bind_core, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_virtio_vq_bind_core(glb_function_id, queue_num); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_virtio_vq_bind_core exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++ssam_drv_virtio_vq_unbind_core(uint16_t glb_function_id) ++{ ++ bool ret; ++ struct ssam_drv_ops *ops = NULL; ++ ++ ops = ssam_get_drv_ops(); ++ SSAM_FUNC_PTR_OR_ERR_RET(ops->hvio_virtio_vq_unbind_core, SSAM_DRV_FUNC_NO_PTR); ++ ++ ret = ops->hvio_virtio_vq_unbind_core(glb_function_id); ++ if (ret != 0) { ++ SPDK_ERRLOG("hvio_virtio_vq_unbind_core exec fail, ret=%d", ret); ++ return ret; ++ } ++ ++ return 0; ++} +diff --git a/lib/ssam/ssam_driver/ssam_driver_adapter.h b/lib/ssam/ssam_driver/ssam_driver_adapter.h +new file mode 100644 +index 0000000..52fa6e5 +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_driver_adapter.h +@@ -0,0 +1,69 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#ifndef SSAM_DRIVER_ADAPTER_H ++#define SSAM_DRIVER_ADAPTER_H ++ ++#include "hivio_api.h" ++ ++struct ssam_drv_ops { ++ int (*hvio_host_dma_request)(uint16_t chnl_id, hvio_host_dma_req_s *req); ++ int (*hvio_vmio_req_poll_batch)(uint16_t tid, uint16_t poll_num, struct vmio_request **req); ++ int (*hvio_vmio_req_poll_batch_ext)(uint16_t tid, uint16_t poll_num, struct vmio_request **req, ++ hvio_vmio_req_poll_opt_s *poll_opt); ++ int (*hvio_lib_deinit)(void); ++ int (*hvio_volume_umount)(uint16_t glb_function_id, uint32_t lun_id); ++ int (*hvio_lib_init)(hvio_lib_args_s *args_in, hvio_hostep_info_s *eps_out); ++ int (*hvio_volume_mount)(uint16_t glb_function_id, uint32_t lun_id, ++ struct hvio_mount_para *hash_paras); ++ int (*hvio_host_dma_rsp_poll)(uint16_t chnl_id, uint16_t poll_num, hvio_host_dma_rsp_s *rsp); ++ int (*hvio_get_glb_function_id_by_dbdf)(uint32_t dbdf, uint16_t *glb_function_id); ++ int (*hvio_send_action)(uint16_t glb_function_id, enum function_action action, const void *data, ++ uint16_t data_len); ++ int (*hvio_update_virtio_blk_capacity)(uint16_t glb_function_id, uint64_t capacity); ++ int (*hvio_setup_function)(uint16_t pf_id, uint16_t num_vf, enum device_type pf_type, ++ enum device_type vf_type); ++ int (*hvio_check_device_ready)(uint8_t role, uint32_t proc_type, uint8_t *ready); ++ int (*hvio_write_function_config)(struct function_config *cfg); ++ int (*hvio_get_hot_upgrade_state)(void); ++ int (*hvio_vmio_complete)(uint16_t tid, struct vmio_response *resp); ++ int (*hvio_vmio_rxq_create)(uint16_t *queue_id_out); ++ int (*hvio_update_virtio_device_used)(uint16_t glb_function_id, uint64_t device_used); ++ int (*hvio_virtio_blk_release_resource)(uint16_t glb_function_id); ++ int (*hvio_virtio_blk_alloc_resource)(uint16_t glb_function_id, uint16_t queue_num); ++ int (*hvio_virtio_vq_bind_core)(uint16_t glb_function_id, uint16_t queue_num); ++ int (*hvio_virtio_vq_unbind_core)(uint16_t glb_function_id); ++}; ++ ++int ssam_drv_ops_init(void); ++void ssam_drv_ops_uninit(void); ++struct ssam_drv_ops *ssam_get_drv_ops(void); ++int ssam_drv_host_dma_request(uint16_t chnl_id, hvio_host_dma_req_s *req); ++int ssam_drv_vmio_req_poll_batch(uint16_t tid, uint16_t poll_num, struct vmio_request **req); ++int ssam_drv_vmio_req_poll_batch_ext(uint16_t tid, uint16_t poll_num, struct vmio_request **req, ++ hvio_vmio_req_poll_opt_s *poll_opt); ++int ssam_drv_lib_deinit(void); ++int ssam_drv_volume_umount(uint16_t glb_function_id, uint32_t lun_id); ++int ssam_drv_lib_init(hvio_lib_args_s *args_in, hvio_hostep_info_s *eps_out); ++int ssam_drv_volume_mount(uint16_t glb_function_id, uint32_t lun_id, ++ struct hvio_mount_para *hash_paras); ++int ssam_drv_host_dma_rsp_poll(uint16_t chnl_id, uint16_t poll_num, hvio_host_dma_rsp_s *rsp); ++int ssam_drv_get_glb_function_id_by_dbdf(uint32_t dbdf, uint16_t *glb_function_id); ++int ssam_drv_send_action(uint16_t glb_function_id, enum function_action action, const void *data, ++ uint16_t data_len); ++int ssam_drv_update_virtio_blk_capacity(uint16_t glb_function_id, uint64_t capacity); ++int ssam_drv_setup_function(uint16_t pf_id, uint16_t num_vf, enum device_type pf_type, ++ enum device_type vf_type); ++int ssam_drv_check_device_ready(uint8_t role, uint32_t proc_type, uint8_t *ready); ++int ssam_drv_write_function_config(struct function_config *cfg); ++int ssam_drv_get_hot_upgrade_state(void); ++int ssam_drv_vmio_complete(uint16_t tid, struct vmio_response *resp); ++int ssam_drv_vmio_rxq_create(uint16_t *queue_id_out); ++int ssam_drv_update_virtio_device_used(uint16_t glb_function_id, uint64_t device_used); ++int ssam_drv_virtio_blk_release_resource(uint16_t glb_function_id); ++int ssam_drv_virtio_blk_alloc_resource(uint16_t glb_function_id, uint16_t queue_num); ++int ssam_drv_virtio_vq_bind_core(uint16_t glb_function_id, uint16_t queue_num); ++int ssam_drv_virtio_vq_unbind_core(uint16_t glb_function_id); ++#endif +diff --git a/lib/ssam/ssam_driver/ssam_mempool.c b/lib/ssam/ssam_driver/ssam_mempool.c +new file mode 100644 +index 0000000..c57f6dd +--- /dev/null ++++ b/lib/ssam/ssam_driver/ssam_mempool.c +@@ -0,0 +1,774 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "spdk/stdinc.h" ++ ++#include "spdk/log.h" ++#include "spdk/env.h" ++#include "dpak_ssam.h" ++ ++#define MP_CK_HEADER_LEN sizeof(struct ssam_mp_chunk) ++#define MP_CK_END_LEN sizeof(struct ssam_mp_chunk*) ++#define MP_CK_CB_LEN (MP_CK_HEADER_LEN + MP_CK_END_LEN) ++ ++#define SHIFT_2MB 21 /* (1 << 21) == 2MB */ ++#define VALUE_2MB (1ULL << SHIFT_2MB) ++#define SHIFT_1GB 30 /* (1 << 30) == 1G */ ++#define VALUE_1GB (1ULL << SHIFT_1GB) ++#define SSAM_SPDK_VTOPHYS_ERROR (0xFFFFFFFFFFFFFFFFULL) ++#define SSAM_DMA_MEM_MAGIC (0xBABEFACEBABEFACE) ++ ++struct ssam_mp_dma_mem { ++ uint64_t magic; ++ uint64_t size; ++ char mem[0]; ++}; ++ ++struct ssam_mp_chunk { ++ struct ssam_mp_chunk *prev; ++ struct ssam_mp_chunk *next; ++ ++ /* Total size of the memory pool chunk, the chunk is in the memory block */ ++ uint64_t size; ++ ++ /* The chunk is free when true or in use when false */ ++ bool is_free; ++}; ++ ++struct ssam_mp_block { ++ struct ssam_mp_chunk *free_list; ++ struct ssam_mp_chunk *alloc_list; ++ struct ssam_mp_block *next; ++ ++ /* The memory pool block's start virtual address */ ++ char *virt_start; ++ ++ /* The memory pool block's start physical address */ ++ char *phys_start; ++ ++ /* Total size of the memory pool block */ ++ uint64_t size; ++ ++ /* Total size of the memory pool block that be allocated */ ++ uint64_t alloc_size; ++ ++ /* Total size of the memory pool block be allocated that program can be use */ ++ uint64_t alloc_prog_size; ++}; ++ ++struct ssam_mempool { ++ /* Total size of the memory pool */ ++ uint64_t size; ++ uint64_t extra_size; ++ uint64_t extra_size_limit; ++ struct ssam_mp_block *blk_list; ++ ++ /* The memory pool's start virtual address */ ++ char *virt; ++ pthread_mutex_t lock; ++}; ++ ++ ++static uint64_t ++ssam_mp_align_up(uint64_t size) ++{ ++ /* Aligin to sizeof long */ ++ return (size + sizeof(long) - 1) & (~(sizeof(long) - 1)); ++} ++ ++static inline void ++ssam_mp_lock(struct ssam_mempool *mp) ++{ ++ pthread_mutex_lock(&mp->lock); ++} ++ ++static inline void ++ssam_mp_unlock(struct ssam_mempool *mp) ++{ ++ pthread_mutex_unlock(&mp->lock); ++} ++ ++static void ++ssam_mp_init_block(struct ssam_mp_block *blk, uint64_t size) ++{ ++ blk->size = size; ++ blk->alloc_size = 0; ++ blk->alloc_prog_size = 0; ++ blk->free_list = (struct ssam_mp_chunk *)blk->virt_start; ++ blk->free_list->is_free = true; ++ blk->free_list->size = size; ++ blk->free_list->prev = NULL; ++ blk->free_list->next = NULL; ++ blk->alloc_list = NULL; ++} ++ ++static inline void ++ssam_mp_list_insert(struct ssam_mp_chunk **head, struct ssam_mp_chunk *ck) ++{ ++ struct ssam_mp_chunk *hd = *head; ++ ++ ck->prev = NULL; ++ ck->next = hd; ++ if (hd != NULL) { ++ hd->prev = ck; ++ } ++ *head = ck; ++} ++ ++static void ++ssam_mp_list_delete(struct ssam_mp_chunk **head, struct ssam_mp_chunk *ck) ++{ ++ if (ck->prev == NULL) { ++ *head = ck->next; ++ if (ck->next != NULL) { ++ ck->next->prev = NULL; ++ } ++ } else { ++ ck->prev->next = ck->next; ++ if (ck->next != NULL) { ++ ck->next->prev = ck->prev; ++ } ++ } ++} ++ ++static struct ++ ssam_mp_block * ++ssam_mp_find_block(struct ssam_mempool *mp, void *p) ++{ ++ struct ssam_mp_block *blk = mp->blk_list; ++ ++ while (blk != NULL) { ++ if ((blk->virt_start <= (char *)p) && ++ ((blk->virt_start + blk->size) > (char *)p)) { ++ break; ++ } ++ blk = blk->next; ++ } ++ ++ return blk; ++} ++ ++static void ++ssam_mp_merge_chunk(struct ssam_mp_block *blk, struct ssam_mp_chunk *ck) ++{ ++ struct ssam_mp_chunk *free_mem = ck; ++ struct ssam_mp_chunk *next = ck; ++ ++ /* Traversal free memory backward */ ++ while (next->is_free) { ++ free_mem = next; ++ if (((char *)next - MP_CK_CB_LEN) <= blk->virt_start) { ++ break; ++ } ++ next = *(struct ssam_mp_chunk **)((char *)next - MP_CK_END_LEN); ++ } ++ ++ /* Traverse free memory forward */ ++ next = (struct ssam_mp_chunk *)((char *)free_mem + free_mem->size); ++ while (((char *)next <= blk->virt_start + blk->size - MP_CK_HEADER_LEN) && next->is_free) { ++ ssam_mp_list_delete(&blk->free_list, next); ++ free_mem->size += next->size; ++ next = (struct ssam_mp_chunk *)((char *)next + next->size); ++ } ++ ++ /* Merge free memory */ ++ *(struct ssam_mp_chunk **)((char *)free_mem + free_mem->size - MP_CK_END_LEN) = free_mem; ++ ++ return; ++} ++ ++static int ++ssam_mp_get_mem_block(uint64_t start_virt_addr, uint64_t len, uint64_t *phys_addr, ++ uint64_t *blk_size) ++{ ++ uint64_t virt0, virt1, phys0, phys1; ++ uint64_t phys_len; ++ ++ if ((len % VALUE_2MB) != 0) { ++ SPDK_ERRLOG("Memory len %lu not align to %llu\n", len, VALUE_2MB); ++ return -EINVAL; ++ } ++ ++ virt0 = start_virt_addr; ++ virt1 = start_virt_addr; ++ phys0 = spdk_vtophys((void *)virt0, NULL); ++ if (phys0 == SSAM_SPDK_VTOPHYS_ERROR) { ++ SPDK_ERRLOG("Error translating virt0 address %lu\n", virt0); ++ return -EINVAL; ++ } ++ ++ /* ++ * Find a piece of memory with consecutive physical address, ++ * the memory got by spdk_dma_malloc is aligned by VALUE_2MB, ++ * this ensures that the physical addresses are consecutive ++ * within the VALUE_2MB length range. ++ */ ++ for (phys_len = VALUE_2MB; phys_len < len; phys_len += VALUE_2MB) { ++ virt1 += VALUE_2MB; ++ phys1 = spdk_vtophys((void *)virt1, NULL); ++ if (phys1 == SSAM_SPDK_VTOPHYS_ERROR) { ++ SPDK_ERRLOG("Error translating virt1 address %lu\n", virt1); ++ break; ++ } ++ if ((long)(phys1 - phys0) != (long)(virt1 - virt0)) { ++ SPDK_DEBUGLOG(ssam_mempool, "End of consecutive physical addresses\n"); ++ break; ++ } ++ } ++ ++ *phys_addr = spdk_vtophys((void *)virt0, NULL); ++ *blk_size = phys_len; ++ ++ return 0; ++} ++ ++static void ++ssam_mp_free_blk_heads(struct ssam_mp_block *blk) ++{ ++ struct ssam_mp_block *blk_head = blk; ++ struct ssam_mp_block *l_mp = NULL; ++ ++ while (blk_head != NULL) { ++ l_mp = blk_head; ++ blk_head = blk_head->next; ++ free(l_mp); ++ l_mp = NULL; ++ } ++} ++ ++static int ++ssam_mp_insert_blocks(struct ssam_mempool *mp, uint64_t size) ++{ ++ struct ssam_mp_block *blk_head = NULL; ++ uint64_t blk_size = 0; ++ uint64_t remain_size = size; ++ uint64_t phys = 0; ++ char *virt_addr = mp->virt; ++ int rc; ++ ++ /* Find memory blocks and insert them to memory pool list */ ++ while (remain_size > 0) { ++ rc = ssam_mp_get_mem_block((uint64_t)virt_addr, remain_size, &phys, &blk_size); ++ if (rc != 0) { ++ ssam_mp_free_blk_heads(mp->blk_list); ++ return -ENOMEM; ++ } ++ blk_head = (struct ssam_mp_block *)malloc(sizeof(struct ssam_mp_block)); ++ if (blk_head == NULL) { ++ SPDK_ERRLOG("mempool block head malloc failed, mempool create failed\n"); ++ ssam_mp_free_blk_heads(mp->blk_list); ++ return -ENOMEM; ++ } ++ blk_head->virt_start = virt_addr; ++ blk_head->phys_start = (char *)phys; ++ ssam_mp_init_block(blk_head, blk_size); ++ blk_head->next = mp->blk_list; ++ mp->blk_list = blk_head; ++ mp->size += blk_size; ++ virt_addr += blk_size; ++ remain_size -= blk_size; ++ } ++ ++ if (mp->size != size) { ++ SPDK_ERRLOG("mempool size lost, mempool create failed\n"); ++ ssam_mp_free_blk_heads(mp->blk_list); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_check_mempool_size(uint64_t size, uint64_t extra_size_limit) ++{ ++ if (size == 0) { ++ SPDK_ERRLOG("Memory pool size can not be %lu, mempool create failed\n", size); ++ return -EINVAL; ++ } ++ ++ if (size < VALUE_2MB) { ++ SPDK_ERRLOG("Memory pool size can not less than %llu, actually %lu, mempool create failed\n", ++ VALUE_2MB, size); ++ return -EINVAL; ++ } ++ ++ if (extra_size_limit > VALUE_1GB) { ++ SPDK_ERRLOG("Memory pool extra size can not greater than %llu, actually %lu, mempool create failed\n", ++ VALUE_1GB, extra_size_limit); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++ssam_mempool_t * ++ssam_mempool_create(uint64_t size, uint64_t extra_size_limit) ++{ ++ struct ssam_mempool *mp = NULL; ++ uint64_t mp_size = size; ++ uint64_t mp_extra_size_limit = extra_size_limit; ++ void *virt = NULL; ++ int rc; ++ ++ rc = ssam_check_mempool_size(mp_size, mp_extra_size_limit); ++ if (rc != 0) { ++ return NULL; ++ } ++ ++ if ((mp_size % VALUE_2MB) != 0) { ++ SPDK_NOTICELOG("Memory pool size %lu not align to %llu, Align down memory pool size to %llu\n", ++ mp_size, ++ VALUE_2MB, mp_size & ~(VALUE_2MB - 1)); ++ mp_size = mp_size & ~(VALUE_2MB - 1); ++ } ++ ++ if ((mp_extra_size_limit % VALUE_2MB) != 0) { ++ SPDK_NOTICELOG("Memory pool extra size %lu not align to %llu, Align down memory pool size to %llu\n", ++ mp_extra_size_limit, VALUE_2MB, mp_extra_size_limit & ~(VALUE_2MB - 1)); ++ mp_extra_size_limit = mp_extra_size_limit & ~(VALUE_2MB - 1); ++ } ++ ++ mp = (struct ssam_mempool *)calloc(1, sizeof(struct ssam_mempool)); ++ if (mp == NULL) { ++ SPDK_ERRLOG("mempool head malloc failed, mempool create failed\n"); ++ return NULL; ++ } ++ ++ virt = spdk_dma_malloc(mp_size, VALUE_2MB, NULL); ++ if (virt == NULL) { ++ SPDK_ERRLOG("spdk_dma_malloc failed, mempool create failed\n"); ++ free(mp); ++ mp = NULL; ++ return NULL; ++ } ++ mp->virt = (char *)virt; ++ ++ rc = ssam_mp_insert_blocks(mp, mp_size); ++ if (rc != 0) { ++ free(mp); ++ mp = NULL; ++ spdk_dma_free(virt); ++ return NULL; ++ } ++ ++ mp->extra_size = 0; ++ mp->extra_size_limit = mp_extra_size_limit; ++ pthread_mutex_init(&mp->lock, NULL); ++ ++ return (ssam_mempool_t *)mp; ++} ++ ++static void ++ssam_mp_split_block(struct ssam_mp_block *blk, struct ssam_mp_chunk *free_mem, ++ struct ssam_mp_chunk *allocated, uint64_t size) ++{ ++ *free_mem = *allocated; ++ free_mem->size -= size; ++ *(struct ssam_mp_chunk **)((char *)free_mem + free_mem->size - MP_CK_END_LEN) = free_mem; ++ ++ if (free_mem->prev == NULL) { ++ blk->free_list = free_mem; ++ } else { ++ free_mem->prev->next = free_mem; ++ } ++ ++ if (free_mem->next != NULL) { ++ free_mem->next->prev = free_mem; ++ } ++ ++ allocated->is_free = false; ++ allocated->size = size; ++ ++ *(struct ssam_mp_chunk **)((char *)allocated + size - MP_CK_END_LEN) = allocated; ++} ++ ++static void * ++ssam_mp_alloc_mem_from_block(struct ssam_mp_block *blk, uint64_t size, ++ uint64_t *phys_addr) ++{ ++ struct ssam_mp_chunk *free_mem = NULL; ++ struct ssam_mp_chunk *allocated = NULL; ++ char *alloc = NULL; ++ ++ free_mem = blk->free_list; ++ while (free_mem != NULL) { ++ if (free_mem->size < size) { ++ free_mem = free_mem->next; ++ continue; ++ } ++ ++ allocated = free_mem; ++ if ((free_mem->size - size) > MP_CK_CB_LEN) { ++ /* If enough mem in free chunk, split it */ ++ free_mem = (struct ssam_mp_chunk *)((char *)allocated + size); ++ ssam_mp_split_block(blk, free_mem, allocated, size); ++ } else { ++ /* If no enough mem in free chunk, all will be allocated */ ++ ssam_mp_list_delete(&blk->free_list, allocated); ++ allocated->is_free = false; ++ } ++ ssam_mp_list_insert(&blk->alloc_list, allocated); ++ ++ blk->alloc_size += allocated->size; ++ blk->alloc_prog_size += allocated->size - (uint64_t)MP_CK_CB_LEN; ++ alloc = (char *)allocated + MP_CK_HEADER_LEN; ++ if (phys_addr != NULL) { ++ *phys_addr = (uint64_t)blk->phys_start + (uint64_t)(alloc - blk->virt_start); ++ } ++ ++ return (void *)alloc; ++ } ++ ++ return NULL; ++} ++ ++static bool ++ssam_mp_check_consecutive_mem(void *start_addr, uint64_t len) ++{ ++ uint64_t phys_start; ++ uint64_t phys_end; ++ ++ phys_start = spdk_vtophys(start_addr, NULL); ++ phys_end = spdk_vtophys((void *)((uint64_t)start_addr + len - 1), NULL); ++ if ((phys_end - phys_start) == (len - 1)) { ++ return true; ++ } ++ ++ return false; ++} ++ ++/* alloc dma memory from hugepage directly */ ++static void * ++ssam_mp_dma_alloc(struct ssam_mempool *mp, uint64_t size, uint64_t *phys) ++{ ++ struct ssam_mp_dma_mem *alloc; ++ size_t len = size + sizeof(struct ssam_mp_dma_mem); ++ uint64_t phys_addr = 0; ++ ++ if (mp->extra_size + len > mp->extra_size_limit) { ++ SPDK_INFOLOG(ssam_mempool, "spdk_dma_malloc alloc failed, extra_size(%lu) size(%zu) limit(%lu).\n", ++ mp->extra_size, len, mp->extra_size_limit); ++ return NULL; ++ } ++ ++ alloc = (struct ssam_mp_dma_mem *)spdk_dma_malloc(len, 0, NULL); ++ if (alloc == NULL) { ++ SPDK_INFOLOG(ssam_mempool, "spdk_dma_malloc alloc failed, len %zu.\n", len); ++ return NULL; ++ } ++ if (!ssam_mp_check_consecutive_mem((void *)alloc->mem, size)) { ++ SPDK_ERRLOG("spdk_dma_malloc alloc failed, no consecutive mem, len %lu.\n", size); ++ spdk_dma_free(alloc); ++ return NULL; ++ } ++ phys_addr = spdk_vtophys((const void *)alloc->mem, NULL); ++ if (phys_addr == SSAM_SPDK_VTOPHYS_ERROR) { ++ SPDK_ERRLOG("Error translating spdk_dma_malloc address %lu\n", phys_addr); ++ spdk_dma_free(alloc); ++ return NULL; ++ } ++ *phys = phys_addr; ++ alloc->magic = SSAM_DMA_MEM_MAGIC; ++ alloc->size = len; ++ mp->extra_size += len; ++ ++ return (void *)alloc->mem; ++} ++ ++static void ++ssam_mp_dma_free(struct ssam_mempool *mp, const void *ptr) ++{ ++ struct ssam_mp_dma_mem *free_mem; ++ uint64_t addr = (uint64_t)ptr; ++ ++ if (addr <= sizeof(struct ssam_mp_dma_mem)) { ++ SPDK_ERRLOG("ssam_mp_dma_free mem address err\n"); ++ return; ++ } ++ ++ free_mem = (struct ssam_mp_dma_mem *)(addr - sizeof(struct ssam_mp_dma_mem)); ++ if (free_mem->magic == SSAM_DMA_MEM_MAGIC) { ++ mp->extra_size -= free_mem->size; ++ spdk_dma_free(free_mem); ++ } else { ++ SPDK_ERRLOG("ssam_mp_dma_free magic err, magic is %lx\n", free_mem->magic); ++ } ++ return; ++} ++ ++static void * ++ssam_mp_alloc_mem_from_blocks(struct ssam_mempool *mp, uint64_t size, ++ uint64_t *phys_addr) ++{ ++ struct ssam_mp_block *blk = mp->blk_list; ++ void *alloc = NULL; ++ ++ while (blk != NULL) { ++ if (size > (blk->size - blk->alloc_size)) { ++ blk = blk->next; ++ continue; ++ } ++ ++ alloc = ssam_mp_alloc_mem_from_block(blk, size, phys_addr); ++ if (alloc != NULL) { ++ return alloc; ++ } ++ ++ blk = blk->next; ++ } ++ SPDK_INFOLOG(ssam_mempool, "ssam mempool no enough memory, alloc size %lu\n", size); ++ alloc = ssam_mp_dma_alloc(mp, size, phys_addr); ++ ++ return alloc; ++} ++ ++void * ++ssam_mempool_alloc(ssam_mempool_t *mp, uint64_t size, uint64_t *phys_addr) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ void *alloc = NULL; ++ uint64_t need_size; ++ ++ if (phys_addr == NULL) { ++ SPDK_ERRLOG("alloc phys_addr pointer is NULL\n"); ++ return NULL; ++ } ++ ++ if (l_mp == NULL) { ++ SPDK_ERRLOG("alloc mp pointer is NULL\n"); ++ return NULL; ++ } ++ ++ if (size == 0) { ++ SPDK_ERRLOG("Memory pool size can not be %lu, mempool alloc failed\n", size); ++ return NULL; ++ } ++ ++ need_size = ssam_mp_align_up(size + MP_CK_CB_LEN); ++ ++ ssam_mp_lock(l_mp); ++ if (need_size > l_mp->size) { ++ SPDK_INFOLOG(ssam_mempool, "No enough memory in mempool, need %lu, actually %lu\n", ++ need_size, l_mp->size); ++ alloc = ssam_mp_dma_alloc(l_mp, size, phys_addr); ++ ssam_mp_unlock(l_mp); ++ return alloc; ++ } ++ ++ alloc = ssam_mp_alloc_mem_from_blocks(l_mp, need_size, phys_addr); ++ ++ ssam_mp_unlock(l_mp); ++ ++ return alloc; ++} ++ ++void ++ssam_mempool_free(ssam_mempool_t *mp, void *ptr) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ struct ssam_mp_chunk *ck = NULL; ++ ++ if (l_mp == NULL) { ++ SPDK_ERRLOG("free mp pointer is NULL\n"); ++ return; ++ } ++ ++ if (ptr == NULL) { ++ SPDK_ERRLOG("free ptr pointer is NULL\n"); ++ return; ++ } ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = ssam_mp_find_block(l_mp, ptr); ++ if (blk == NULL) { ++ ssam_mp_dma_free(l_mp, ptr); ++ ssam_mp_unlock(l_mp); ++ return; ++ } ++ ++ ck = (struct ssam_mp_chunk *)((char *)ptr - MP_CK_HEADER_LEN); ++ ++ ssam_mp_list_delete(&blk->alloc_list, ck); ++ ssam_mp_list_insert(&blk->free_list, ck); ++ ck->is_free = true; ++ ++ blk->alloc_size -= ck->size; ++ blk->alloc_prog_size -= ck->size - (uint64_t)MP_CK_CB_LEN; ++ ++ ssam_mp_merge_chunk(blk, ck); ++ ++ ssam_mp_unlock(l_mp); ++ ++ return; ++} ++ ++void ++ssam_mempool_destroy(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ ++ if (l_mp == NULL) { ++ SPDK_ERRLOG("destroy mp pointer is NULL\n"); ++ return; ++ } ++ ++ if (l_mp->virt == NULL) { ++ SPDK_ERRLOG("destroy mp->virt pointer is NULL\n"); ++ return; ++ } ++ ++ ssam_mp_lock(l_mp); ++ ssam_mp_free_blk_heads(l_mp->blk_list); ++ spdk_dma_free(l_mp->virt); ++ ssam_mp_unlock(l_mp); ++ pthread_mutex_destroy(&l_mp->lock); ++ free(l_mp); ++ l_mp = NULL; ++ ++ return; ++} ++ ++static uint64_t ++ssam_mp_total_memory(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ uint64_t size; ++ ++ ssam_mp_lock(l_mp); ++ size = l_mp->size; ++ ssam_mp_unlock(l_mp); ++ ++ return size; ++} ++ ++static uint64_t ++ssam_mp_total_used_memory(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ uint64_t total = 0; ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = l_mp->blk_list; ++ while (blk != NULL) { ++ total += blk->alloc_size; ++ blk = blk->next; ++ } ++ ++ ssam_mp_unlock(l_mp); ++ ++ return total; ++} ++ ++static uint32_t ++ssam_mp_alloc_num(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ struct ssam_mp_chunk *alloc = NULL; ++ uint32_t total = 0; ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = l_mp->blk_list; ++ while (blk != NULL) { ++ alloc = blk->alloc_list; ++ while (alloc != NULL) { ++ if (total == UINT32_MAX) { ++ SPDK_ERRLOG("mp alloc num out of bound\n"); ++ ssam_mp_unlock(l_mp); ++ return total; ++ } ++ total++; ++ alloc = alloc->next; ++ } ++ blk = blk->next; ++ } ++ ++ ssam_mp_unlock(l_mp); ++ ++ return total; ++} ++ ++static uint32_t ++ssam_mp_free_num(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ struct ssam_mp_chunk *free_mem = NULL; ++ uint32_t total = 0; ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = l_mp->blk_list; ++ while (blk != NULL) { ++ free_mem = blk->free_list; ++ while (free_mem != NULL) { ++ if (total == UINT32_MAX) { ++ SPDK_ERRLOG("mp free num out of bound\n"); ++ ssam_mp_unlock(l_mp); ++ return total; ++ } ++ total++; ++ free_mem = free_mem->next; ++ } ++ blk = blk->next; ++ } ++ ++ ssam_mp_unlock(l_mp); ++ ++ return total; ++} ++ ++static uint64_t ++ssam_mp_get_greatest_free_size(ssam_mempool_t *mp) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ struct ssam_mp_block *blk = NULL; ++ struct ssam_mp_chunk *free_mem = NULL; ++ uint64_t max_size = 0; ++ ++ ssam_mp_lock(l_mp); ++ ++ blk = l_mp->blk_list; ++ while (blk != NULL) { ++ free_mem = blk->free_list; ++ while (free_mem != NULL) { ++ if (max_size < free_mem->size) { ++ max_size = free_mem->size; ++ } ++ free_mem = free_mem->next; ++ } ++ blk = blk->next; ++ } ++ ++ ssam_mp_unlock(l_mp); ++ ++ return max_size; ++} ++ ++int ++ssam_get_mempool_info(ssam_mempool_t *mp, struct memory_info_stats *info) ++{ ++ struct ssam_mempool *l_mp = (struct ssam_mempool *)mp; ++ ++ if (l_mp == NULL || info == NULL) { ++ SPDK_ERRLOG("ssam get mempool info mp or info pointer is NULL\n"); ++ return -EINVAL; ++ } ++ ++ info->total_size = ssam_mp_total_memory(l_mp); ++ info->used_size = ssam_mp_total_used_memory(l_mp); ++ info->free_size = info->total_size - info->used_size; ++ info->greatest_free_size = ssam_mp_get_greatest_free_size(l_mp); ++ info->alloc_count = ssam_mp_alloc_num(l_mp); ++ info->free_count = ssam_mp_free_num(l_mp); ++ ++ return 0; ++} ++SPDK_LOG_REGISTER_COMPONENT(ssam_mempool) +diff --git a/lib/ssam/ssam_internal.h b/lib/ssam/ssam_internal.h +new file mode 100644 +index 0000000..ea78fbb +--- /dev/null ++++ b/lib/ssam/ssam_internal.h +@@ -0,0 +1,519 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#ifndef SSAM_INTERNAL_H ++#define SSAM_INTERNAL_H ++ ++#include "stdint.h" ++ ++#include ++#include "ssam_driver/dpak_ssam.h" ++ ++#include "spdk_internal/thread.h" ++#include "spdk/log.h" ++#include "spdk/util.h" ++#include "spdk/rpc.h" ++#include "spdk/bdev.h" ++#include "spdk/ssam.h" ++#include "ssam_config.h" ++ ++#define SPDK_SSAM_FEATURES ((1ULL << VHOST_F_LOG_ALL) | \ ++ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \ ++ (1ULL << VIRTIO_F_VERSION_1) | \ ++ (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \ ++ (1ULL << VIRTIO_RING_F_EVENT_IDX) | \ ++ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \ ++ (1ULL << VIRTIO_F_RING_PACKED)) ++ ++#define VIRITO_DEFAULT_QUEUE_SIZE 256 ++ ++#define SPDK_SSAM_VQ_MAX_SUBMISSIONS 16 ++#define SPDK_SSAM_MAX_VQUEUES 64 ++#define SPDK_SSAM_MAX_VQ_SIZE 256 ++#define SPDK_SSAM_VF_DEFAULTE_VQUEUES 1 ++#define SSAM_JSON_DEFAULT_QUEUES_NUM 16 ++ ++/* ssam not support config vq size so far */ ++#define SPDK_SSAM_DEFAULT_VQ_SIZE SPDK_SSAM_MAX_VQ_SIZE ++#define SPDK_SSAM_DEFAULT_VQUEUES 16 ++#define SPDK_SSAM_IOVS_MAX 32 ++#define SPDK_SSAM_MAX_SEG_SIZE (32 * 1024) ++ ++#define SPDK_INVALID_GFUNC_ID UINT16_MAX ++#define SPDK_INVALID_CORE_ID UINT16_MAX ++#define SPDK_INVALID_VQUEUE_NUM UINT16_MAX ++#define SPDK_INVALID_ID UINT16_MAX ++ ++#define SSAM_PF_MAX_NUM 32 ++#define SPDK_SSAM_SCSI_CTRLR_MAX_DEVS 255 ++#define SSAM_VIRTIO_SCSI_LUN_ID 0x400001 ++#define SPDK_SSAM_SCSI_DEFAULT_VQUEUES 128 ++#define SSAM_MAX_SESSION_PER_DEV UINT16_MAX ++#define SSAM_DEFAULT_MEMPOOL_EXTRA_SIZE 0 ++#define SSAM_MAX_CORE_NUM 16 ++#define SSAM_MAX_CORE_NUM_WITH_LARGE_IO 10 ++ ++#define SPDK_LIMIT_LOG_MAX_INTERNEL_IN_MS 3000 ++#define SPDK_CONVERT_MS_TO_US 1000 ++ ++#define SPDK_SSAM_VIRTIO_BLK_DEFAULT_FEATURE 0x3f11001046 ++#define SPDK_SSAM_VIRTIO_SCSI_DEFAULT_FEATURE 0x3f11000007 ++ ++enum spdk_ssam_iostat_mode { ++ SSAM_IOSTAT_NORMAL, ++ SSAM_IOSTAT_SUM, ++ SSAM_IOSTAT_DUMP_VQ, ++ SSAM_IOSTAT_SPARSE, ++}; ++ ++typedef void (*spdk_ssam_session_io_wait_cb)(void *cb_arg); ++ ++struct spdk_ssam_session_io_wait { ++ spdk_ssam_session_io_wait_cb cb_fn; ++ void *cb_arg; ++ TAILQ_ENTRY(spdk_ssam_session_io_wait) link; ++}; ++ ++typedef void (*spdk_ssam_session_io_wait_r_cb)(void *cb_arg); ++ ++struct spdk_ssam_session_io_wait_r { ++ spdk_ssam_session_io_wait_r_cb cb_fn; ++ void *cb_arg; ++ TAILQ_ENTRY(spdk_ssam_session_io_wait_r) link; ++}; ++ ++struct spdk_ssam_virtqueue { ++ void *tasks; ++ struct spdk_ssam_session *smsession; ++ uint32_t *index; ++ int num; ++ int use_num; ++ int index_l; ++ int index_r; ++}; ++ ++struct spdk_ssam_show_iostat_args { ++ /* vq_idx for blk; tgt_id for scsi */ ++ uint32_t id; ++ enum spdk_ssam_iostat_mode mode; ++}; ++ ++struct spdk_ssam_session_backend { ++ enum spdk_virtio_type type; ++ int (*remove_session)(struct spdk_ssam_session *smsession); ++ void (*remove_self)(struct spdk_ssam_session *smsession); ++ void (*request_worker)(struct spdk_ssam_session *smsession, void *arg); ++ void (*destroy_bdev_device)(struct spdk_ssam_session *smsession, void *args); ++ void (*response_worker)(struct spdk_ssam_session *smsession, void *arg); ++ void (*no_data_req_worker)(struct spdk_ssam_session *smsession); ++ ++ int (*ssam_get_config)(struct spdk_ssam_session *smsession, ++ uint8_t *config, uint32_t len, uint16_t queues); ++ int (*ssam_set_config)(struct spdk_ssam_session *smsession, ++ uint8_t *config, uint32_t offset, uint32_t size, uint32_t flags); ++ ++ void (*print_stuck_io_info)(struct spdk_ssam_session *smsession); ++ ++ void (*dump_info_json)(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++ void (*write_config_json)(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++ void (*show_iostat_json)(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_show_iostat_args *args, ++ struct spdk_json_write_ctx *w); ++ void (*clear_iostat_json)(struct spdk_ssam_session *smsession); ++ struct spdk_bdev *(*get_bdev)(struct spdk_ssam_session *smsession, uint32_t id); ++}; ++ ++struct spdk_ssam_session { ++ /* Unique session name, format as ssam.tid.gfunc_id. */ ++ char *name; ++ ++ struct spdk_ssam_dev *smdev; ++ ++ /* Session poller thread, same as ssam dev poller thread */ ++ struct spdk_thread *thread; ++ struct ssam_mempool *mp; ++ const struct spdk_ssam_session_backend *backend; ++ spdk_ssam_session_rsp_fn rsp_fn; ++ void *rsp_ctx; ++ struct spdk_ssam_virtqueue virtqueue[SPDK_SSAM_MAX_VQUEUES]; ++ ++ /* Number of processing tasks, can not remove session when task_cnt > 0 */ ++ int task_cnt; ++ ++ /* Number of pending asynchronous operations */ ++ uint32_t pending_async_op_num; ++ ++ /* ssam global virtual function id */ ++ uint16_t gfunc_id; ++ ++ /* Depth of virtio-blk virtqueue */ ++ uint16_t queue_size; ++ ++ /* Number of virtio-blk virtqueue */ ++ uint16_t max_queues; ++ bool started; ++ bool initialized; ++ ++ /* spdk_ssam_session_fn process finish flag */ ++ bool async_done; ++ ++ bool registered; ++ ++ TAILQ_ENTRY(spdk_ssam_session) tailq; ++}; ++ ++struct ssam_iovs { ++ struct iovec sges[SPDK_SSAM_IOVS_MAX]; ++}; ++ ++struct ssam_iovec { ++ struct ssam_iovs virt; /* virt's iov_base is virtual address */ ++ struct ssam_iovs phys; /* phys's iov_base is physical address */ ++}; ++ ++struct ssam_stat { ++ uint64_t poll_cur_tsc; ++ uint64_t poll_tsc; ++ uint64_t poll_count; ++}; ++ ++struct spdk_ssam_dev { ++ /* ssam device name, format as ssam.tid */ ++ char *name; ++ /* virtio type */ ++ enum spdk_virtio_type type; ++ ++ /* ssam device poller thread, same as session poller thread */ ++ struct spdk_thread *thread; ++ struct spdk_poller *requestq_poller; ++ struct spdk_poller *responseq_poller; ++ struct spdk_poller *stop_poller; ++ ++ /* Store sessions of this dev, max number is SSAM_MAX_SESSION_PER_DEV */ ++ struct spdk_ssam_session **smsessions; ++ ++ TAILQ_ENTRY(spdk_ssam_dev) tailq; ++ ++ /* IO num that is on flight */ ++ uint64_t io_num; ++ ++ uint64_t discard_io_num; ++ ++ /* IO stuck ticks in dma process */ ++ uint64_t io_stuck_tsc; ++ struct ssam_stat stat; ++ ++ uint64_t io_wait_cnt; ++ uint64_t io_wait_r_cnt; ++ ++ /* Number of started and actively polled sessions */ ++ uint32_t active_session_num; ++ ++ /* Information of tid, indicate from which ssam queue to receive or send data */ ++ uint16_t tid; ++ TAILQ_HEAD(, spdk_ssam_session_io_wait) io_wait_queue; ++ TAILQ_HEAD(, spdk_ssam_session_io_wait_r) io_wait_queue_r; ++}; ++ ++struct spdk_ssam_dma_cb { ++ uint8_t status; ++ uint8_t req_dir; ++ uint16_t vq_idx; ++ uint16_t task_idx; ++ uint16_t gfunc_id; ++}; ++ ++struct spdk_ssam_send_event_flag { ++ bool need_async; ++ bool need_rsp; ++}; ++ ++/** ++ * Remove a session from sessions array. ++ * ++ * \param smsessions sessions array. ++ * \param smsession the session to be removed. ++ */ ++void ssam_sessions_remove(struct spdk_ssam_session **smsessions, ++ struct spdk_ssam_session *smsession); ++ ++/** ++ * Check out whether sessions is empty or not. ++ * ++ * \param smsessions sessions array. ++ * \return true indicate sessions is empty or false not empty. ++ */ ++bool ssam_sessions_empty(struct spdk_ssam_session **smsessions); ++ ++/** ++ * Get next session in sessions array, begin with current session. ++ * ++ * \param smsessions sessions array. ++ * \param smsession the begin session. ++ * \return the next session found or null not found. ++ */ ++struct spdk_ssam_session *ssam_sessions_next(struct spdk_ssam_session **smsessions, ++ struct spdk_ssam_session *smsession); ++ ++/** ++ * Insert io wait task to session. ++ * ++ * \param smsession the session that io wait insert to. ++ * \param io_wait the io wait to be insert. ++ */ ++void ssam_session_insert_io_wait(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_session_io_wait *io_wait); ++ ++/** ++ * Insert io wait compilete or dma task to smdev. ++ * ++ * \param smdev the smdev that io wait insert to. ++ * \param io_wait_r the io wait to be insert. ++ */ ++void ssam_session_insert_io_wait_r(struct spdk_ssam_dev *smdev, ++ struct spdk_ssam_session_io_wait_r *io_wait_r); ++ ++/** ++ * Remove session from sessions and then stop session dev poller. ++ * ++ * \param smsession the session that to be removed. ++ */ ++void ssam_session_destroy(struct spdk_ssam_session *smsession); ++ ++/** ++ * Show a ssam device info in json format. ++ * ++ * \param smdev ssam device. ++ * \param gfunc_id ssam global vf id. ++ * \param arg user-provided parameter. ++ */ ++void ssam_dump_info_json(struct spdk_ssam_dev *smdev, uint16_t gfunc_id, ++ struct spdk_json_write_ctx *w); ++ ++/** ++ * Get a ssam device name. ++ * ++ * \param smdev ssam device. ++ * \return ssam device name or NULL ++ */ ++const char *ssam_dev_get_name(const struct spdk_ssam_dev *smdev); ++ ++/** ++ * Get a ssam session name. ++ * ++ * \param smdev smsession session. ++ * \return ssam session name or NULL ++ */ ++const char *ssam_session_get_name(const struct spdk_ssam_session *smsession); ++ ++/** ++ * Call a function of the provided ssam session. ++ * The function will be called on this session's thread. ++ * ++ * \param smsession ssam session. ++ * \param fn function to call on each session's thread ++ * \param cpl_fn function to be called at the end of the ssam management thread. ++ * Optional, can be NULL. ++ * \param send_event_flag whether an asynchronous operation or response is required ++ * \param ctx additional argument to the both callbacks ++ * \return error code ++ */ ++int ssam_send_event_to_session(struct spdk_ssam_session *smsession, spdk_ssam_session_fn fn, ++ spdk_ssam_session_cpl_fn cpl_fn, struct spdk_ssam_send_event_flag send_event_flag, void *ctx); ++ ++/** ++ * Finish a blocking ssam_send_event_to_session() call and finally ++ * start the session. This must be called on the target lcore, which ++ * will now receive all session-related messages (e.g. from ++ * ssam_send_event_to_session()). ++ * ++ * Must be called under the global ssam lock. ++ * ++ * \param smsession ssam session ++ * \param response return code ++ */ ++void ssam_session_start_done(struct spdk_ssam_session *smsession, int response); ++ ++/** ++ * Finish a blocking ssam_send_event_to_session() call and finally ++ * stop the session. This must be called on the session's lcore which ++ * used to receive all session-related messages (e.g. from ++ * ssam_send_event_to_session()). After this call, the session- ++ * related messages will be once again processed by any arbitrary thread. ++ * ++ * Must be called under the global ssam lock. ++ * ++ * \param smsession ssam session ++ * \param rsp return code ++ * \param ctx user context ++ */ ++void ssam_session_stop_done(struct spdk_ssam_session *smsession, int rsp, void **ctx); ++ ++/** ++ * Set session be freed, so that not access session any more. ++ * ++ * \param ctx user context ++ */ ++void ssam_set_session_be_freed(void **ctx); ++ ++/** ++ * Find a ssam device in the global g_ssam_devices list by gfunc_id, ++ * if find the ssam device, register a session to the existent ssam device ++ * sessions list, if not find, first create a ssam device to the global ++ * g_ssam_devices list, and then register a session to the new ssam device ++ * sessions list. ++ * ++ * Must be called under the global ssam lock. ++ * ++ * \param info ssam session register info. ++ * \param smsession ssam session created. ++ * \return 0 for success or negative for failed. ++ */ ++int ssam_session_register(struct spdk_ssam_session_reg_info *info, ++ struct spdk_ssam_session **smsession); ++ ++/** ++ * unregister smsession response call back function. ++ * ++ * \param smsession ssam session ++\ */ ++void ssam_session_unreg_response_cb(struct spdk_ssam_session *smsession); ++ ++void ssam_dev_unregister(struct spdk_ssam_dev **dev); ++ ++void ssam_send_event_async_done(void **ctx); ++ ++void ssam_send_dev_destroy_msg(struct spdk_ssam_session *smsession, void *args); ++ ++/** ++ * Get ssam config. ++ * ++ * \param smsession ssam session ++ * \param config a memory region to store config. ++ * \param len the input config param memory region length. ++ * \return 0 success or -1 failed. ++ */ ++int ssam_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues); ++ ++/** ++ * Mount gfunc_id volume to the ssam normal queue. ++ * ++ * \param smsession ssam session ++ * \param lun_id lun id of gfunc_id. ++ * ++ * \return 0 success or not 0 failed. ++ */ ++int ssam_mount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id); ++ ++/** ++ * Unmount function. ++ * ++ * \param smsession ssam session ++ * \param lun_id lun id of gfunc_id. ++ * ++ * \return 0 success or not 0 failed. ++ */ ++int ssam_umount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id); ++ ++/** ++ * Mount gfunc_id volume to the ssam normal queue again. ++ * ++ * \param smsession ssam session ++ * \param lun_id lun id of gfunc_id. ++ * ++ * \return 0 success or not 0 failed. ++ */ ++int ssam_remount_normal(struct spdk_ssam_session *smsession, uint32_t lun_id); ++ ++/** ++ * Register worker poller to dev. ++ * ++ * \param smdev the dev that to be registered worker poller. ++ * \return 0 success or not 0 failed. ++ */ ++int ssam_dev_register_worker_poller(struct spdk_ssam_dev *smdev); ++ ++/** ++ * Unregister worker poller for dev. ++ * ++ * \param smdev the dev that to be unregistered woker poller. ++ */ ++void ssam_dev_unregister_worker_poller(struct spdk_ssam_dev *smdev); ++ ++/** ++ * Get the differential value of the current tsc. ++ * ++ * \param tsc the current tsc. ++ * \return the differential value. ++ */ ++uint64_t ssam_get_diff_tsc(uint64_t tsc); ++ ++/** ++ * Get the bdev name of the specific gfunc_id. ++ * ++ * \param gfunc_id ssam global vf id. ++ * ++ * \return the bdev name of gfunc_id ++ */ ++const char *ssam_get_bdev_name_by_gfunc_id(uint16_t gfunc_id); ++ ++/** ++ * Remove a ssam session. Remove a session associate to the unique gfunc_id, ++ * then remove the ssam device if the device not have a session any more. ++ * ++ * Notice that this interface cannot be reentrant, so must call ssam_lock first. ++ * ++ * \param smsession ssam session ++ * ++ * \return 0 on success, negative errno on error. ++ */ ++int ssam_session_unregister(struct spdk_ssam_session *smsession); ++ ++/** ++ * Get ssam iostat. ++ * ++ * \param smsession ssam session ++ * \param stat a memory region to store iostat. ++ */ ++void spdk_ssam_get_iostat(struct spdk_ssam_session *smsession, ++ struct spdk_bdev_io_stat *stat); ++ ++/** ++ * Decrease dev io num. ++ * ++ * \param smdev ssam device. ++ */ ++void ssam_dev_io_dec(struct spdk_ssam_dev *smdev); ++ ++/** ++ * Get ssam session bdev. ++ * ++ * \param smsession ssam session ++ * ++ * \return the session bdev. ++ */ ++struct spdk_bdev *ssam_get_session_bdev(struct spdk_ssam_session *smsession); ++ ++/** ++ * free memory with rte. ++ * ++ * \param smsession ssam session ++ * ++ * \return 0 on success. ++ */ ++int ssam_free_ex(void *addr); ++ ++/** ++ * Get elem info from memory addr. ++ * ++ * \param memory addr ++ * ++ */ ++int ssam_malloc_elem_from_addr(const void *data, unsigned long long *pg_size, int *socket_id); ++ ++#endif /* SSAM_INTERNAL_H */ +diff --git a/lib/ssam/ssam_malloc.c b/lib/ssam/ssam_malloc.c +new file mode 100644 +index 0000000..2ac8160 +--- /dev/null ++++ b/lib/ssam/ssam_malloc.c +@@ -0,0 +1,31 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include ++#include "spdk/env.h" ++ ++#include "ssam_internal.h" ++ ++int ++ssam_free_ex(void *addr) ++{ ++ spdk_free(addr); ++ return 0; ++} ++ ++int ++ssam_malloc_elem_from_addr(const void *data, unsigned long long *pg_size, int *socket_id) ++{ ++ struct rte_memseg_list *msl = NULL; ++ ++ msl = rte_mem_virt2memseg_list(data); ++ if (msl == NULL) { ++ return -1; ++ } ++ ++ *socket_id = msl->socket_id; ++ *pg_size = msl->page_sz; ++ return 0; ++} +diff --git a/lib/ssam/ssam_rpc.c b/lib/ssam/ssam_rpc.c +new file mode 100644 +index 0000000..158fb52 +--- /dev/null ++++ b/lib/ssam/ssam_rpc.c +@@ -0,0 +1,1949 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include ++#include "spdk/string.h" ++#include "spdk/env.h" ++#include "spdk/bdev_module.h" ++#include "spdk/ssam.h" ++#include "spdk/bdev.h" ++ ++#include "ssam_internal.h" ++#include "ssam_config.h" ++#include "rte_malloc.h" ++ ++static int ssam_rpc_get_gfunc_id_by_dbdf(char *dbdf, uint16_t *gfunc_id); ++ ++int delete_flag = 0; ++int delete_dev_times[2000]; ++int session_delete_times = 0; ++ ++struct rpc_ssam_blk_ctrlr { ++ char *dev_name; ++ char *index; ++ bool readonly; ++ char *serial; ++ uint16_t vqueue; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_construct_ssam_blk_ctrlr[] = { ++ {"dev_name", offsetof(struct rpc_ssam_blk_ctrlr, dev_name), spdk_json_decode_string}, ++ {"index", offsetof(struct rpc_ssam_blk_ctrlr, index), spdk_json_decode_string}, ++ {"readonly", offsetof(struct rpc_ssam_blk_ctrlr, readonly), spdk_json_decode_bool, true}, ++ {"serial", offsetof(struct rpc_ssam_blk_ctrlr, serial), spdk_json_decode_string, true}, ++ {"vqueue", offsetof(struct rpc_ssam_blk_ctrlr, vqueue), spdk_json_decode_uint16, true}, ++}; ++ ++static void ++free_rpc_ssam_blk_ctrlr(struct rpc_ssam_blk_ctrlr *req) ++{ ++ if (req->dev_name != NULL) { ++ free(req->dev_name); ++ req->dev_name = NULL; ++ } ++ ++ if (req->index != NULL) { ++ free(req->index); ++ req->index = NULL; ++ } ++ ++ if (req->serial != NULL) { ++ free(req->serial); ++ req->serial = NULL; ++ } ++} ++ ++static int ++ssam_rpc_para_check(uint16_t gfunc_id) ++{ ++ int rc; ++ ++ rc = ssam_check_gfunc_id(gfunc_id); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_rpc_para_check_type(uint16_t gfunc_id, enum ssam_device_type target_type) ++{ ++ int rc; ++ enum ssam_device_type type; ++ ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ type = ssam_get_virtio_type(gfunc_id); ++ if (type == target_type) { ++ return 0; ++ } ++ SPDK_ERRLOG("Invalid virtio type, need type %d, actually %d\n", target_type, type); ++ ++ return -EINVAL; ++} ++ ++static void ++rpc_ssam_send_response_cb(void *arg, int rsp) ++{ ++ struct spdk_jsonrpc_request *request = arg; ++ ++ if (rsp != 0) { ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rsp)); ++ } else { ++ spdk_jsonrpc_send_bool_response(request, true); ++ } ++ return; ++} ++ ++struct ssam_log_command_info { ++ char *user_name; ++ char *event; ++ char *src_addr; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_construct_log_command_info[] = { ++ {"user_name", offsetof(struct ssam_log_command_info, user_name), spdk_json_decode_string}, ++ {"event", offsetof(struct ssam_log_command_info, event), spdk_json_decode_string}, ++ {"src_addr", offsetof(struct ssam_log_command_info, src_addr), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_ssam_log_command_info(struct ssam_log_command_info *req) ++{ ++ if (req->user_name != NULL) { ++ free(req->user_name); ++ req->user_name = NULL; ++ } ++ if (req->event != NULL) { ++ free(req->event); ++ req->event = NULL; ++ } ++ if (req->src_addr != NULL) { ++ free(req->src_addr); ++ req->src_addr = NULL; ++ } ++} ++ ++static void ++rpc_ssam_log_command_info(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct ssam_log_command_info req = {0}; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("log info params error, skip\n"); ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_construct_log_command_info, ++ SPDK_COUNTOF(g_rpc_construct_log_command_info), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("decode cmd info failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ SPDK_NOTICELOG("log event: from %s user %s event %s\n", req.src_addr, req.user_name, req.event); ++ ++invalid: ++ free_rpc_ssam_log_command_info(&req); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++} ++SPDK_RPC_REGISTER("log_command_info", rpc_ssam_log_command_info, ++ SPDK_RPC_RUNTIME) ++ ++static int ++rpc_ssam_session_reg_response_cb(struct spdk_ssam_session *smsession, ++ struct spdk_jsonrpc_request *request) ++{ ++ if (smsession->rsp_fn != NULL) { ++ return -1; ++ } ++ smsession->rsp_fn = rpc_ssam_send_response_cb; ++ smsession->rsp_ctx = request; ++ return 0; ++} ++ ++static void ++rpc_init_session_reg_info(struct spdk_ssam_session_reg_info *info, ++ uint16_t queues, uint16_t gfunc_id, struct spdk_jsonrpc_request *request) ++{ ++ info->queues = queues; ++ info->gfunc_id = gfunc_id; ++ info->rsp_ctx = (void *)request; ++ info->rsp_fn = rpc_ssam_send_response_cb; ++} ++ ++static void ++free_rpc_ssam_session_reg_info(struct spdk_ssam_session_reg_info *info) ++{ ++ if (info->name != NULL) { ++ free(info->name); ++ info->name = NULL; ++ } ++ if (info->dbdf != NULL) { ++ free(info->dbdf); ++ info->dbdf = NULL; ++ } ++} ++ ++static uint16_t ++rpc_ssam_get_gfunc_id_by_index(char *index) ++{ ++ uint16_t gfunc_id, i; ++ int rc; ++ if (strlen(index) <= 0x5) { ++ for (i = 0; i < strlen(index); i++) { ++ if (!isdigit(index[i])) { ++ return SPDK_INVALID_GFUNC_ID; ++ } ++ } ++ gfunc_id = spdk_strtol(index, 10) > SPDK_INVALID_GFUNC_ID ? SPDK_INVALID_GFUNC_ID : spdk_strtol( ++ index, 10); ++ } else { ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(index, &gfunc_id); ++ if (rc != 0) { ++ return SPDK_INVALID_GFUNC_ID; ++ } ++ } ++ return gfunc_id; ++} ++ ++static void ++ssam_set_virtio_blk_config(struct ssam_virtio_config *cfg, uint16_t queues) ++{ ++ struct virtio_blk_config *dev_cfg = (struct virtio_blk_config *)cfg->device_config; ++ ++ cfg->device_feature = SPDK_SSAM_VIRTIO_BLK_DEFAULT_FEATURE; ++ cfg->queue_num = queues; ++ cfg->config_len = sizeof(struct virtio_blk_config); ++ ++ memset(dev_cfg, 0, cfg->config_len); ++ dev_cfg->blk_size = 0x200; ++ dev_cfg->min_io_size = 0; ++ dev_cfg->capacity = 0; ++ dev_cfg->num_queues = cfg->queue_num; ++ dev_cfg->seg_max = 0x7d; ++ dev_cfg->size_max = 0x200000; ++ cfg->queue_size = VIRITO_DEFAULT_QUEUE_SIZE; ++ ++ return; ++} ++ ++static int ++ssam_get_vqueue(struct rpc_ssam_blk_ctrlr *req, uint16_t gfunc_id, uint16_t *queues) ++{ ++ if (gfunc_id <= SSAM_PF_MAX_NUM) { ++ if (req->vqueue != SPDK_INVALID_VQUEUE_NUM) { ++ SPDK_ERRLOG("The PF does not allow dynamic modification of the vqueue(%d).\n", req->vqueue); ++ return -1; ++ } ++ *queues = ssam_get_queues(); ++ return 0; ++ } ++ ++ if (req->vqueue == SPDK_INVALID_VQUEUE_NUM) { ++ *queues = SPDK_SSAM_VF_DEFAULTE_VQUEUES; ++ return 0; ++ } ++ ++ if (req->vqueue > SPDK_SSAM_MAX_VQUEUES || req->vqueue == 0) { ++ SPDK_ERRLOG("The queue number is out of range. Currently (%u) .\n", req->vqueue); ++ return -1; ++ } ++ ++ *queues = req->vqueue; ++ return 0; ++} ++ ++static int ++ssam_blk_controller_set_vqueue(uint16_t gfunc_id, uint16_t queues) ++{ ++ int rc; ++ struct ssam_function_config cfg = { 0 }; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ ssam_unlock(); ++ if (smsession != NULL) { ++ SPDK_ERRLOG("Session with function id %d already exists.\n", gfunc_id); ++ return -EEXIST; ++ } ++ ++ if (gfunc_id <= SSAM_PF_MAX_NUM) { ++ if (ssam_get_hash_mode() == SSAM_VQ_HASH_MODE) { ++ rc = ssam_virtio_vq_bind_core(gfunc_id, queues); ++ if (rc != 0) { ++ SPDK_ERRLOG("virtio blk vq(%u) bind core failed.\n", queues); ++ return rc; ++ } ++ } ++ return 0; ++ } ++ ++ cfg.gfunc_id = gfunc_id; ++ cfg.type = SSAM_DEVICE_VIRTIO_BLK; ++ ssam_set_virtio_blk_config(&cfg.virtio_config, queues); ++ ++ if (spdk_ssam_is_starting() == false) { ++ rc = ssam_write_function_config(&cfg); ++ if (rc != 0) { ++ SPDK_ERRLOG("ssam write function(%d) config failed:%s\n", cfg.gfunc_id, spdk_strerror(-rc)); ++ return rc; ++ } ++ } else { ++ rc = ssam_virtio_blk_alloc_resource(gfunc_id, queues); ++ if (rc != 0) { ++ SPDK_ERRLOG("virtio blk alloc vq(%u) failed.\n", queues); ++ return rc; ++ } ++ } ++ ++ if (ssam_get_hash_mode() == SSAM_VQ_HASH_MODE) { ++ rc = ssam_virtio_vq_bind_core(gfunc_id, queues); ++ if (rc != 0) { ++ SPDK_ERRLOG("virtio blk vq(%u) bind core failed.\n", queues); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++rpc_ssam_create_blk_controller(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct spdk_ssam_session_reg_info info = {0}; ++ struct rpc_ssam_blk_ctrlr req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ uint16_t queues = 0; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_create_blk_controller params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ req.vqueue = SPDK_INVALID_VQUEUE_NUM; ++ rc = spdk_json_decode_object(params, g_rpc_construct_ssam_blk_ctrlr, ++ SPDK_COUNTOF(g_rpc_construct_ssam_blk_ctrlr), &req); ++ if (rc != 0) { ++ SPDK_DEBUGLOG(ssam_rpc, "spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = rpc_ssam_get_gfunc_id_by_index(req.index); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_BLK); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ if (req.dev_name == NULL) { ++ rc = -ENODEV; ++ goto invalid; ++ } ++ ++ rc = ssam_get_vqueue(&req, gfunc_id, &queues); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = ssam_blk_controller_set_vqueue(gfunc_id, queues); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rpc_init_session_reg_info(&info, queues, gfunc_id, request); ++ ++ rc = ssam_blk_construct(&info, req.dev_name, req.readonly, req.serial); ++ if (rc < 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_blk_ctrlr(&req); ++ free_rpc_ssam_session_reg_info(&info); ++ return; ++ ++invalid: ++ free_rpc_ssam_blk_ctrlr(&req); ++ free_rpc_ssam_session_reg_info(&info); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("create_blk_controller", rpc_ssam_create_blk_controller, ++ SPDK_RPC_RUNTIME) ++ ++struct rpc_delete_ssam_ctrlr { ++ char *index; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_delete_ssam_ctrlr_decoder[] = { ++ {"index", offsetof(struct rpc_delete_ssam_ctrlr, index), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_delete_ssam_ctrlr(struct rpc_delete_ssam_ctrlr *req) ++{ ++ if (req->index != NULL) { ++ free(req->index); ++ req->index = NULL; ++ } ++} ++ ++static void ++rpc_ssam_delete_controller(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_delete_ssam_ctrlr req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession; ++ struct spdk_ssam_dev *smdev = NULL; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_delete_controller params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_delete_ssam_ctrlr_decoder, ++ SPDK_COUNTOF(g_rpc_delete_ssam_ctrlr_decoder), &req); ++ if (rc != 0) { ++ SPDK_DEBUGLOG(ssam_rpc, "spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = rpc_ssam_get_gfunc_id_by_index(req.index); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ session_delete_times = 0; ++ delete_flag = 0; ++ delete_dev_times[gfunc_id] = 0; ++ ++ smdev = ssam_dev_next(NULL); ++ if (smdev->type == VIRTIO_TYPE_BLK) { ++ while (smdev != NULL) { ++ smsession = smdev->smsessions[gfunc_id]; ++ if ((smsession != NULL) && (smsession->task_cnt != 0)) { ++ SPDK_ERRLOG("The controller is busy.\n"); ++ rc = -EBUSY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ } ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = smdev->smsessions[gfunc_id]; ++ if (smsession == NULL && session_delete_times == 0) { ++ SPDK_ERRLOG("Couldn't find session with function id %d.\n", gfunc_id); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ if (smsession == NULL && session_delete_times == 1) { ++ break; ++ } ++ ++ if (smsession == NULL) { ++ continue; ++ } ++ ++ rc = rpc_ssam_session_reg_response_cb(smsession, request); ++ if (rc != 0) { ++ SPDK_ERRLOG("The controller is being operated.\n"); ++ rc = -EALREADY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = ssam_session_unregister(smsession); ++ if (rc != 0) { ++ /* ++ * Unregitster response cb to avoid use request in the cb function, ++ * because if error happend, request will be responsed immediately ++ */ ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ session_delete_times++; ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++ ++ free_rpc_delete_ssam_ctrlr(&req); ++ return; ++ ++invalid: ++ free_rpc_delete_ssam_ctrlr(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("delete_controller", rpc_ssam_delete_controller, SPDK_RPC_RUNTIME) ++ ++struct rpc_delete_ssam_scsi_ctrlr { ++ char *name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_delete_ssam_scsi_ctrlr_decoder[] = { ++ {"name", offsetof(struct rpc_delete_ssam_scsi_ctrlr, name), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_delete_ssam_scsi_ctrlrs(struct rpc_delete_ssam_scsi_ctrlr *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_delete_scsi_controller(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_delete_ssam_scsi_ctrlr req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_delete_controller params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_delete_ssam_scsi_ctrlr_decoder, ++ SPDK_COUNTOF(g_rpc_delete_ssam_scsi_ctrlr_decoder), &req); ++ if (rc != 0) { ++ SPDK_DEBUGLOG(ssam_rpc, "spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Couldn't find session with function id %d.\n", gfunc_id); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_session_reg_response_cb(smsession, request); ++ if (rc != 0) { ++ SPDK_ERRLOG("The controller is being operated.\n"); ++ rc = -EALREADY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ if (smsession->task_cnt > 0) { ++ SPDK_ERRLOG("%s is processing I/O(%d) and cannot be deleted.\n", ++ smsession->name, smsession->task_cnt); ++ rc = -EBUSY; ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = ssam_session_unregister(smsession); ++ if (rc != 0) { ++ /* ++ * Unregitster response cb to avoid use request in the cb function, ++ * because if error happend, request will be responsed immediately ++ */ ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ ssam_unlock(); ++ ++ free_rpc_delete_ssam_scsi_ctrlrs(&req); ++ return; ++ ++invalid: ++ free_rpc_delete_ssam_scsi_ctrlrs(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("delete_scsi_controller", rpc_ssam_delete_scsi_controller, SPDK_RPC_RUNTIME) ++ ++struct rpc_get_ssam_ctrlrs { ++ uint32_t function_id; ++ char *dbdf; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_get_ssam_ctrlrs_decoder[] = { ++ {"function_id", offsetof(struct rpc_get_ssam_ctrlrs, function_id), spdk_json_decode_uint32, true}, ++ {"dbdf", offsetof(struct rpc_get_ssam_ctrlrs, dbdf), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_get_ssam_ctrlrs(struct rpc_get_ssam_ctrlrs *req) ++{ ++ if (req->dbdf != NULL) { ++ free(req->dbdf); ++ req->dbdf = NULL; ++ } ++} ++ ++static void ++_rpc_get_ssam_controller(struct spdk_json_write_ctx *w, ++ struct spdk_ssam_dev *smdev, uint16_t gfunc_id) ++{ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_string(w, "ctrlr", ssam_dev_get_name(smdev)); ++ spdk_json_write_named_string_fmt(w, "cpumask", "0x%s", ++ spdk_cpuset_fmt(spdk_thread_get_cpumask(smdev->thread))); ++ spdk_json_write_named_uint32(w, "session_num", (uint32_t)smdev->active_session_num); ++ ++ spdk_json_write_named_object_begin(w, "backend_specific"); ++ ssam_dump_info_json(smdev, gfunc_id, w); ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static int ++rpc_ssam_show_controllers(struct spdk_jsonrpc_request *request, uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ if (gfunc_id != SPDK_INVALID_GFUNC_ID) { ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } ++ } ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_array_begin(w); ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ if (gfunc_id != SPDK_INVALID_GFUNC_ID && smdev->smsessions[gfunc_id] == NULL) { ++ smdev = ssam_dev_next(smdev); ++ continue; ++ } ++ _rpc_get_ssam_controller(w, smdev, gfunc_id); ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ ++ return 0; ++} ++ ++static int ++rpc_ssam_show_scsi_controllers(struct spdk_jsonrpc_request *request, uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ if (gfunc_id != SPDK_INVALID_GFUNC_ID) { ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } else if (smsession->backend->type != VIRTIO_TYPE_SCSI) { ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ smdev = smsession->smdev; ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_array_begin(w); ++ ++ smsession = smdev->smsessions[gfunc_id]; ++ smsession->backend->dump_info_json(smsession, w); ++ ssam_unlock(); ++ ++ spdk_json_write_array_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_array_begin(w); ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ if (smsession->backend->type == VIRTIO_TYPE_SCSI) { ++ smsession->backend->dump_info_json(smsession, w); ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ ++ return 0; ++} ++ ++static void ++rpc_ssam_get_controllers(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_get_ssam_ctrlrs req = { ++ .function_id = SPDK_INVALID_GFUNC_ID, ++ .dbdf = NULL, ++ }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params != NULL) { ++ rc = spdk_json_decode_object(params, g_rpc_get_ssam_ctrlrs_decoder, ++ SPDK_COUNTOF(g_rpc_get_ssam_ctrlrs_decoder), &req); ++ if (rc != 0) { ++ SPDK_DEBUGLOG(ssam_rpc, "spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ if (req.function_id != SPDK_INVALID_GFUNC_ID && req.dbdf != NULL) { ++ SPDK_ERRLOG("get_controllers can have at most one parameter\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ if (req.function_id != SPDK_INVALID_GFUNC_ID) { ++ gfunc_id = req.function_id; ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ if (req.dbdf != NULL) { ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(req.dbdf, &gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ rc = rpc_ssam_show_controllers(request, gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_get_ssam_ctrlrs(&req); ++ return; ++ ++invalid: ++ free_rpc_get_ssam_ctrlrs(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("get_controllers", rpc_ssam_get_controllers, SPDK_RPC_RUNTIME) ++ ++struct rpc_get_ssam_scsi_ctrlrs { ++ char *name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_get_ssam_scsi_ctrlrs_decoder[] = { ++ {"name", offsetof(struct rpc_get_ssam_scsi_ctrlrs, name), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_ssam_ctrlrs(struct rpc_get_ssam_scsi_ctrlrs *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_get_scsi_controllers(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_get_ssam_scsi_ctrlrs req = { ++ .name = NULL, ++ }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params != NULL) { ++ rc = spdk_json_decode_object(params, g_rpc_get_ssam_scsi_ctrlrs_decoder, ++ SPDK_COUNTOF(g_rpc_get_ssam_scsi_ctrlrs_decoder), &req); ++ if (rc != 0) { ++ SPDK_DEBUGLOG(ssam_rpc, "spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ if (req.name != NULL) { ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ rc = rpc_ssam_show_scsi_controllers(request, gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_ctrlrs(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_ctrlrs(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("get_scsi_controllers", rpc_ssam_get_scsi_controllers, SPDK_RPC_RUNTIME) ++ ++struct rpc_ssam_controller_get_iostat { ++ uint32_t function_id; ++ char *dbdf; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_ssam_controller_get_iostat_decoder[] = { ++ {"function_id", offsetof(struct rpc_ssam_controller_get_iostat, function_id), spdk_json_decode_uint32, true}, ++ {"dbdf", offsetof(struct rpc_ssam_controller_get_iostat, dbdf), spdk_json_decode_string, true}, ++}; ++ ++static void ++free_rpc_ssam_controller_get_iostat(struct rpc_ssam_controller_get_iostat *req) ++{ ++ if (req->dbdf != NULL) { ++ free(req->dbdf); ++ req->dbdf = NULL; ++ } ++} ++ ++struct rpc_ssam_show_iostat_args { ++ uint16_t gfunc_id; ++ uint16_t tid; ++ /* vq_idx for blk; tgt_id for scsi */ ++ uint16_t id; ++ enum spdk_ssam_iostat_mode mode; ++}; ++ ++static int ++rpc_ssam_show_iostat(struct spdk_jsonrpc_request *request, struct rpc_ssam_show_iostat_args *args) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_ssam_show_iostat_args iostat_args = { ++ .id = args->id, ++ .mode = args->mode, ++ }; ++ ++ ssam_lock(); ++ if (args->gfunc_id != SPDK_INVALID_GFUNC_ID) { ++ smsession = ssam_session_find(args->gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_uint64(w, "tick_rate", spdk_get_ticks_hz()); ++ spdk_json_write_named_array_begin(w, "dbdfs"); ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ if (args->gfunc_id == SPDK_INVALID_GFUNC_ID) { ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "name", smdev->name); ++ spdk_json_write_named_uint64(w, "flight_io", smdev->io_num); ++ spdk_json_write_named_uint64(w, "discard_io_num", smdev->discard_io_num); ++ spdk_json_write_named_uint64(w, "wait_io", smdev->io_wait_cnt); ++ spdk_json_write_named_uint64(w, "wait_io_r", smdev->io_wait_r_cnt); ++ spdk_json_write_object_end(w); ++ } ++ if (smdev->active_session_num == 0 || (args->tid != SPDK_INVALID_CORE_ID && ++ smdev->tid != args->tid)) { ++ smdev = ssam_dev_next(smdev); ++ continue; ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ if (args->gfunc_id != SPDK_INVALID_GFUNC_ID && args->gfunc_id != smsession->gfunc_id) { ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ continue; ++ } ++ if (smsession->backend->show_iostat_json != NULL) { ++ smsession->backend->show_iostat_json(smsession, &iostat_args, w); ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ++ ssam_unlock(); ++ spdk_json_write_array_end(w); ++ spdk_json_write_object_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++} ++ ++static void ++rpc_ssam_controller_get_iostat(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_ssam_controller_get_iostat req = { ++ .function_id = SPDK_INVALID_GFUNC_ID, ++ .dbdf = NULL, ++ }; ++ struct rpc_ssam_show_iostat_args iostat_args = { ++ .gfunc_id = SPDK_INVALID_GFUNC_ID, ++ .tid = SPDK_INVALID_CORE_ID, ++ .id = SPDK_INVALID_ID, ++ .mode = SSAM_IOSTAT_NORMAL, ++ }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params != NULL) { ++ rc = spdk_json_decode_object(params, g_rpc_ssam_controller_get_iostat_decoder, ++ SPDK_COUNTOF(g_rpc_ssam_controller_get_iostat_decoder), &req); ++ if (rc != 0) { ++ SPDK_DEBUGLOG(ssam_rpc, "spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ if (req.function_id != SPDK_INVALID_GFUNC_ID && req.dbdf != NULL) { ++ SPDK_ERRLOG("controller_get_iostat can have at most one parameter\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ if (req.function_id != SPDK_INVALID_GFUNC_ID) { ++ gfunc_id = req.function_id; ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ if (req.dbdf != NULL) { ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(req.dbdf, &gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ } ++ ++ iostat_args.gfunc_id = gfunc_id; ++ rc = rpc_ssam_show_iostat(request, &iostat_args); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_controller_get_iostat(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_controller_get_iostat(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("controller_get_iostat", rpc_ssam_controller_get_iostat, SPDK_RPC_RUNTIME) ++ ++struct rpc_ssam_blk_device_iostat { ++ char *index; ++ uint16_t tid; ++ uint16_t vq_idx; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_ssam_blk_device_iostat[] = { ++ {"index", offsetof(struct rpc_ssam_blk_device_iostat, index), spdk_json_decode_string}, ++ {"tid", offsetof(struct rpc_ssam_blk_device_iostat, tid), spdk_json_decode_uint16, true}, ++ {"vq_idx", offsetof(struct rpc_ssam_blk_device_iostat, vq_idx), spdk_json_decode_uint16, true}, ++}; ++ ++static void ++free_rpc_ssam_blk_device_iostat(struct rpc_ssam_blk_device_iostat *req) ++{ ++ if (req->index != NULL) { ++ free(req->index); ++ req->index = NULL; ++ } ++} ++ ++static int ++ssam_rpc_set_blk_device_iostat_args(struct rpc_ssam_show_iostat_args *iostat_args, ++ struct rpc_ssam_blk_device_iostat *req, uint16_t gfunc_id) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ ++ iostat_args->gfunc_id = gfunc_id; ++ iostat_args->tid = req->tid; ++ iostat_args->id = req->vq_idx; ++ if (iostat_args->tid == SPDK_INVALID_CORE_ID && iostat_args->id == SPDK_INVALID_VQUEUE_NUM) { ++ iostat_args->mode = SSAM_IOSTAT_SUM; ++ return 0; ++ } else if (iostat_args->tid == SPDK_INVALID_CORE_ID) { ++ iostat_args->mode = SSAM_IOSTAT_SPARSE; ++ } else if (iostat_args->id == SPDK_INVALID_VQUEUE_NUM) { ++ iostat_args->mode = SSAM_IOSTAT_DUMP_VQ; ++ } else { ++ iostat_args->mode = SSAM_IOSTAT_NORMAL; ++ } ++ ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ if ((iostat_args->tid != SPDK_INVALID_CORE_ID && smdev->tid != iostat_args->tid) || ++ smdev->smsessions[gfunc_id] == NULL) { ++ smdev = ssam_dev_next(smdev); ++ continue; ++ } else if (iostat_args->id != SPDK_INVALID_VQUEUE_NUM && ++ iostat_args->id >= smdev->smsessions[gfunc_id]->max_queues) { ++ SPDK_ERRLOG("vq_index(%u) should less then max_queues(%u)\n", iostat_args->id, ++ smdev->smsessions[gfunc_id]->max_queues); ++ return -ENODEV; ++ } ++ return 0; ++ } ++ SPDK_ERRLOG("cannot find blk device(%u)\n", gfunc_id); ++ return -ENODEV; ++} ++ ++static void ++rpc_ssam_blk_device_iostat(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_ssam_blk_device_iostat req = { ++ .tid = SPDK_INVALID_CORE_ID, ++ .vq_idx = SPDK_INVALID_VQUEUE_NUM, ++ }; ++ struct rpc_ssam_show_iostat_args iostat_args = { 0 }; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_scsi_device_iostat params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_ssam_blk_device_iostat, ++ SPDK_COUNTOF(g_rpc_ssam_blk_device_iostat), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = rpc_ssam_get_gfunc_id_by_index(req.index); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_BLK); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_set_blk_device_iostat_args(&iostat_args, &req, gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_show_iostat(request, &iostat_args); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_blk_device_iostat(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_blk_device_iostat(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("blk_device_iostat", rpc_ssam_blk_device_iostat, SPDK_RPC_RUNTIME) ++ ++static void ++rpc_ssam_clear_iostat(void) ++{ ++ struct spdk_ssam_dev *smdev = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ ssam_lock(); ++ smdev = ssam_dev_next(NULL); ++ while (smdev != NULL) { ++ smsession = ssam_sessions_next(smdev->smsessions, NULL); ++ while (smsession != NULL) { ++ if (smsession->backend->clear_iostat_json != NULL) { ++ smsession->backend->clear_iostat_json(smsession); ++ } ++ smsession = ssam_sessions_next(smdev->smsessions, smsession); ++ } ++ smdev = ssam_dev_next(smdev); ++ } ++ ssam_unlock(); ++} ++ ++static void ++rpc_ssam_controller_clear_iostat(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ rpc_ssam_clear_iostat(); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++} ++SPDK_RPC_REGISTER("controller_clear_iostat", rpc_ssam_controller_clear_iostat, SPDK_RPC_RUNTIME) ++ ++struct rpc_bdev_resize { ++ uint32_t function_id; ++ uint64_t new_size_in_mb; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_bdev_resize[] = { ++ {"function_id", offsetof(struct rpc_bdev_resize, function_id), spdk_json_decode_uint32}, ++ {"new_size_in_mb", offsetof(struct rpc_bdev_resize, new_size_in_mb), spdk_json_decode_uint64}, ++}; ++ ++static int ++ssam_bdev_resize(struct spdk_bdev *bdev, uint64_t new_size_in_mb) ++{ ++ char *bdev_name = bdev->name; ++ int rc; ++ uint64_t current_size_in_mb; ++ uint64_t new_size_in_byte; ++ ++ if (bdev->blocklen == 0) { ++ SPDK_ERRLOG("The blocklen of bdev %s is zero\n", bdev_name); ++ return -EINVAL; ++ } ++ ++ if (UINT64_MAX / bdev->blockcnt < bdev->blocklen) { ++ SPDK_ERRLOG("The old size of bdev is too large, blockcnt: %lu, blocklen: %u\n", ++ bdev->blockcnt, bdev->blocklen); ++ return -EINVAL; ++ } ++ ++ if (new_size_in_mb == 0) { ++ goto end; ++ } ++ ++ current_size_in_mb = bdev->blocklen * bdev->blockcnt / SSAM_MB; ++ if (new_size_in_mb < current_size_in_mb) { ++ SPDK_ERRLOG("The new bdev size must not be smaller than current bdev size\n"); ++ return -EINVAL; ++ } ++ ++ if (UINT64_MAX / new_size_in_mb < SSAM_MB) { ++ SPDK_ERRLOG("The new bdev size is too large\n"); ++ return -EINVAL; ++ } ++ ++end: ++ new_size_in_byte = new_size_in_mb * SSAM_MB; ++ ++ rc = spdk_bdev_notify_blockcnt_change(bdev, new_size_in_byte / bdev->blocklen); ++ if (rc != 0) { ++ SPDK_ERRLOG("failed to notify block cnt change\n"); ++ return -EINVAL; ++ } ++ SPDK_NOTICELOG("bdev %s resize %lu(mb) done.\n", bdev->name, new_size_in_mb); ++ ++ return 0; ++} ++ ++static void ++rpc_ssam_bdev_resize(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_bdev_resize req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_bdev_resize params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_bdev_resize, ++ SPDK_COUNTOF(g_rpc_bdev_resize), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = req.function_id; ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_BLK); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Before resize target, there need to create controller.\n"); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ if (smsession->backend->get_bdev != NULL) { ++ bdev = smsession->backend->get_bdev(smsession, 0); ++ } ++ if (bdev == NULL) { ++ SPDK_ERRLOG("The controller hasn't correlated to a bdev.\n"); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ ssam_unlock(); ++ ++ rc = ssam_bdev_resize(bdev, req.new_size_in_mb); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("bdev_resize", rpc_ssam_bdev_resize, SPDK_RPC_RUNTIME) ++ ++struct rpc_scsi_bdev_resize { ++ char *name; ++ uint32_t tgt_id; ++ uint64_t new_size_in_mb; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_scsi_bdev_resize[] = { ++ {"name", offsetof(struct rpc_scsi_bdev_resize, name), spdk_json_decode_string}, ++ {"tgt_id", offsetof(struct rpc_scsi_bdev_resize, tgt_id), spdk_json_decode_uint32}, ++ {"new_size_in_mb", offsetof(struct rpc_scsi_bdev_resize, new_size_in_mb), spdk_json_decode_uint64}, ++}; ++ ++static void ++free_rpc_scsi_bdev_resize(struct rpc_scsi_bdev_resize *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_scsi_bdev_resize(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_scsi_bdev_resize req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_bdev *bdev = NULL; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_bdev_resize params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_scsi_bdev_resize, ++ SPDK_COUNTOF(g_rpc_scsi_bdev_resize), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Before resize target, there need to create controller.\n"); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ if (smsession->backend->get_bdev != NULL) { ++ bdev = smsession->backend->get_bdev(smsession, req.tgt_id); ++ } ++ if (bdev == NULL) { ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ ssam_unlock(); ++ ++ rc = ssam_bdev_resize(bdev, req.new_size_in_mb); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_scsi_bdev_resize(&req); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ free_rpc_scsi_bdev_resize(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("scsi_bdev_resize", rpc_ssam_scsi_bdev_resize, SPDK_RPC_RUNTIME) ++ ++struct rpc_bdev_aio_resize { ++ char *name; ++ uint64_t new_size_in_mb; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_bdev_aio_resize[] = { ++ {"name", offsetof(struct rpc_bdev_aio_resize, name), spdk_json_decode_string}, ++ {"new_size_in_mb", offsetof(struct rpc_bdev_aio_resize, new_size_in_mb), spdk_json_decode_uint64}, ++}; ++ ++static void ++free_rpc_ssam_bdev_aio_resize(struct rpc_bdev_aio_resize *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_bdev_aio_resize(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_bdev_aio_resize req = {0}; ++ struct spdk_bdev *bdev = NULL; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_bdev_resize params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_bdev_aio_resize, ++ SPDK_COUNTOF(g_rpc_bdev_aio_resize), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ if (req.name) { ++ bdev = spdk_bdev_get_by_name(req.name); ++ if (bdev == NULL) { ++ SPDK_ERRLOG("bdev '%s' does not exist\n", req.name); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ } ++ ++ rc = ssam_bdev_resize(bdev, req.new_size_in_mb); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_bdev_aio_resize(&req); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ free_rpc_ssam_bdev_aio_resize(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("bdev_aio_resize", rpc_ssam_bdev_aio_resize, SPDK_RPC_RUNTIME) ++ ++static void ++rpc_os_ready(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) ++{ ++ int rc = 0; ++ int fd; ++ char *enable = "1"; ++ ++ fd = open(SSAM_STORAGE_READY_FILE, O_RDWR); ++ if (fd < 0) { ++ SPDK_ERRLOG("Open storage ready file failed.\n"); ++ rc = EPERM; ++ goto invalid; ++ } ++ ++ rc = write(fd, enable, strlen(enable)); ++ if (rc < 0) { ++ SPDK_ERRLOG("Write storage ready file failed.\n"); ++ close(fd); ++ goto invalid; ++ } ++ ++ close(fd); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("os_ready", rpc_os_ready, SPDK_RPC_RUNTIME) ++ ++static void ++rpc_set_os_status(struct spdk_jsonrpc_request *request, const struct spdk_json_val *params) ++{ ++ int rc = 0; ++ int fd; ++ char *disable = "0"; ++ ++ fd = open(SSAM_STORAGE_READY_FILE, O_RDWR); ++ if (fd < 0) { ++ SPDK_ERRLOG("Open storage ready file failed.\n"); ++ rc = -EPERM; ++ goto invalid; ++ } ++ ++ rc = write(fd, disable, strlen(disable)); ++ if (rc < 0) { ++ SPDK_ERRLOG("Write storage ready file failed.\n"); ++ close(fd); ++ goto invalid; ++ } ++ ++ close(fd); ++ spdk_jsonrpc_send_bool_response(request, true); ++ return; ++ ++invalid: ++ spdk_jsonrpc_send_error_response(request, rc, spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("os_not_ready", rpc_set_os_status, SPDK_RPC_RUNTIME) ++ ++struct rpc_create_scsi_controller { ++ char *dbdf; ++ char *name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_create_scsi_controller[] = { ++ {"dbdf", offsetof(struct rpc_create_scsi_controller, dbdf), spdk_json_decode_string}, ++ {"name", offsetof(struct rpc_create_scsi_controller, name), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_ssam_create_scsi_controller(struct rpc_create_scsi_controller *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++ if (req->dbdf != NULL) { ++ free(req->dbdf); ++ req->dbdf = NULL; ++ } ++} ++ ++static int ++ssam_rpc_get_gfunc_id_by_dbdf(char *dbdf, uint16_t *gfunc_id) ++{ ++ int rc; ++ uint32_t dbdf_num; ++ ++ rc = ssam_dbdf_str2num(dbdf, &dbdf_num); ++ if (rc != 0) { ++ SPDK_ERRLOG("convert dbdf(%s) to num failed, rc: %d.\n", dbdf, rc); ++ return -EINVAL; ++ } ++ ++ rc = ssam_get_funcid_by_dbdf(dbdf_num, gfunc_id); ++ if (rc != 0) { ++ SPDK_ERRLOG("find gfuncid by dbdf(%u) failed, rc: %d.\n", dbdf_num, rc); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_rpc_para_check_name(char *name) ++{ ++ uint16_t gfunc_id = ssam_get_gfunc_id_by_name(name); ++ if (gfunc_id == SPDK_INVALID_GFUNC_ID) { ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++ ++static void ++rpc_ssam_create_scsi_controller(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct spdk_ssam_session_reg_info info = {0}; ++ struct rpc_create_scsi_controller req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ uint16_t queues; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_create_scsi_controller params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_create_scsi_controller, ++ SPDK_COUNTOF(g_rpc_create_scsi_controller), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_para_check_name(req.name); ++ if (rc != 0) { ++ SPDK_ERRLOG("controller name(%s) is existed\n", req.name); ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_get_gfunc_id_by_dbdf(req.dbdf, &gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ queues = ssam_get_queues(); ++ if (queues > SPDK_SSAM_MAX_VQUEUES) { ++ SPDK_ERRLOG("Queue number out of range, need less or equal than %u, actually %u.\n", ++ SPDK_SSAM_MAX_VQUEUES, queues); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rpc_init_session_reg_info(&info, queues, gfunc_id, request); ++ ++ info.name = strdup(req.name); ++ if (info.name == NULL) { ++ SPDK_ERRLOG("Failed to create name(%s) for ssam session reg info.\n", req.name); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ info.dbdf = strdup(req.dbdf); ++ if (info.dbdf == NULL) { ++ SPDK_ERRLOG("Failed to create dbdf(%s) for ssam session reg info.\n", req.dbdf); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = ssam_scsi_construct(&info); ++ if (rc < 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_create_scsi_controller(&req); ++ free_rpc_ssam_session_reg_info(&info); ++ return; ++ ++invalid: ++ free_rpc_ssam_create_scsi_controller(&req); ++ free_rpc_ssam_session_reg_info(&info); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++ return; ++} ++ ++SPDK_RPC_REGISTER("create_scsi_controller", rpc_ssam_create_scsi_controller, SPDK_RPC_RUNTIME) ++ ++struct rpc_scsi_controller_add_target { ++ char *name; ++ int32_t scsi_tgt_num; ++ char *bdev_name; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_scsi_controller_add_target[] = { ++ {"name", offsetof(struct rpc_scsi_controller_add_target, name), spdk_json_decode_string}, ++ {"scsi_tgt_num", offsetof(struct rpc_scsi_controller_add_target, scsi_tgt_num), spdk_json_decode_uint32}, ++ {"bdev_name", offsetof(struct rpc_scsi_controller_add_target, bdev_name), spdk_json_decode_string}, ++}; ++ ++static void ++free_rpc_ssam_scsi_ctrlr_add_target(struct rpc_scsi_controller_add_target *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++ if (req->bdev_name != NULL) { ++ free(req->bdev_name); ++ req->bdev_name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_scsi_controller_add_target(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_scsi_controller_add_target req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ struct spdk_ssam_session *smsession; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_scsi_controller_add_target params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_scsi_controller_add_target, ++ SPDK_COUNTOF(g_rpc_scsi_controller_add_target), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ SPDK_ERRLOG("Before adding a SCSI target, there should be a SCSI controller.\n"); ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_session_reg_response_cb(smsession, request); ++ if (rc != 0) { ++ SPDK_ERRLOG("The controller is being operated.\n"); ++ rc = -EALREADY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = ssam_scsi_dev_add_tgt(smsession, req.scsi_tgt_num, req.bdev_name); ++ if (rc != 0) { ++ /* ++ * Unregitster response cb to avoid use request in the cb function, ++ * because if error happend, request will be responsed immediately ++ */ ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ ssam_unlock(); ++ ++ free_rpc_ssam_scsi_ctrlr_add_target(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_scsi_ctrlr_add_target(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("scsi_controller_add_target", rpc_ssam_scsi_controller_add_target, ++ SPDK_RPC_RUNTIME) ++ ++struct rpc_scsi_controller_remove_target { ++ char *name; ++ int32_t scsi_tgt_num; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_scsi_controller_remove_target[] = { ++ {"name", offsetof(struct rpc_scsi_controller_remove_target, name), spdk_json_decode_string}, ++ {"scsi_tgt_num", offsetof(struct rpc_scsi_controller_remove_target, scsi_tgt_num), spdk_json_decode_int32}, ++}; ++ ++static void ++free_rpc_scsi_controller_remove_target(struct rpc_scsi_controller_remove_target *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static void ++rpc_ssam_scsi_controller_remove_target(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_scsi_controller_remove_target req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ struct spdk_ssam_session *smsession = NULL; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_scsi_controller_remove_target params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_scsi_controller_remove_target, ++ SPDK_COUNTOF(g_rpc_scsi_controller_remove_target), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check_type(gfunc_id, SSAM_DEVICE_VIRTIO_SCSI); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ ssam_lock(); ++ ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ rc = -ENODEV; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_session_reg_response_cb(smsession, request); ++ if (rc != 0) { ++ SPDK_ERRLOG("The controller is being operated.\n"); ++ rc = -EALREADY; ++ ssam_unlock(); ++ goto invalid; ++ } ++ ++ rc = ssam_scsi_dev_remove_tgt(smsession, req.scsi_tgt_num, ++ rpc_ssam_send_response_cb, request); ++ if (rc != 0) { ++ /* ++ * Unregitster response cb to avoid use request in the cb function, ++ * because if error happend, request will be responsed immediately ++ */ ++ ssam_session_unreg_response_cb(smsession); ++ ssam_unlock(); ++ goto invalid; ++ } ++ ssam_unlock(); ++ free_rpc_scsi_controller_remove_target(&req); ++ return; ++ ++invalid: ++ free_rpc_scsi_controller_remove_target(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INVALID_PARAMS, ++ spdk_strerror(-rc)); ++} ++ ++SPDK_RPC_REGISTER("scsi_controller_remove_target", rpc_ssam_scsi_controller_remove_target, ++ SPDK_RPC_RUNTIME) ++ ++struct rpc_ssam_scsi_device_iostat { ++ char *name; ++ int32_t scsi_tgt_num; ++}; ++ ++static const struct spdk_json_object_decoder g_rpc_ssam_scsi_device_iostat[] = { ++ {"name", offsetof(struct rpc_ssam_scsi_device_iostat, name), spdk_json_decode_string}, ++ {"scsi_tgt_num", offsetof(struct rpc_ssam_scsi_device_iostat, scsi_tgt_num), spdk_json_decode_int32}, ++}; ++ ++static void ++free_rpc_ssam_scsi_device_iostat(struct rpc_ssam_scsi_device_iostat *req) ++{ ++ if (req->name != NULL) { ++ free(req->name); ++ req->name = NULL; ++ } ++} ++ ++static int ++rpc_ssam_show_scsi_iostat(struct spdk_jsonrpc_request *request, uint16_t gfunc_id, ++ uint16_t scsi_tgt_num) ++{ ++ struct spdk_json_write_ctx *w = NULL; ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_ssam_show_iostat_args iostat_args = { ++ .id = scsi_tgt_num, ++ .mode = SSAM_IOSTAT_NORMAL, ++ }; ++ ++ ssam_lock(); ++ smsession = ssam_session_find(gfunc_id); ++ if (smsession == NULL) { ++ ssam_unlock(); ++ return -ENODEV; ++ } else if (smsession->backend->type != VIRTIO_TYPE_SCSI) { ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ ++ if (smsession->backend->show_iostat_json != NULL) { ++ smsession->backend->show_iostat_json(smsession, &iostat_args, w); ++ } ++ ++ ssam_unlock(); ++ ++ spdk_jsonrpc_end_result(request, w); ++ return 0; ++} ++ ++static void ++rpc_ssam_scsi_device_iostat(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct rpc_ssam_scsi_device_iostat req = {0}; ++ uint16_t gfunc_id = SPDK_INVALID_GFUNC_ID; ++ int rc; ++ ++ if (params == NULL) { ++ SPDK_ERRLOG("rpc_ssam_scsi_device_iostat params null\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ rc = spdk_json_decode_object(params, g_rpc_ssam_scsi_device_iostat, ++ SPDK_COUNTOF(g_rpc_ssam_scsi_device_iostat), &req); ++ if (rc != 0) { ++ SPDK_ERRLOG("spdk_json_decode_object failed\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ if (req.scsi_tgt_num < 0 || req.scsi_tgt_num > SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("scsi_tgt_num is out of range\n"); ++ rc = -EINVAL; ++ goto invalid; ++ } ++ ++ gfunc_id = ssam_get_gfunc_id_by_name(req.name); ++ rc = ssam_rpc_para_check(gfunc_id); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ rc = rpc_ssam_show_scsi_iostat(request, gfunc_id, req.scsi_tgt_num); ++ if (rc != 0) { ++ goto invalid; ++ } ++ ++ free_rpc_ssam_scsi_device_iostat(&req); ++ return; ++ ++invalid: ++ free_rpc_ssam_scsi_device_iostat(&req); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++} ++SPDK_RPC_REGISTER("scsi_device_iostat", rpc_ssam_scsi_device_iostat, SPDK_RPC_RUNTIME) ++ ++struct rpc_limit_log_interval { ++ int interval; ++}; ++ ++static void ++rpc_ssam_device_pcie_list(struct spdk_jsonrpc_request *request, ++ const struct spdk_json_val *params) ++{ ++ struct spdk_json_write_ctx *w = NULL; ++ int rc; ++ uint32_t size = ssam_get_device_pcie_list_size(); ++ if (size == 0) { ++ rc = ssam_init_device_pcie_list(); ++ if (rc != 0) { ++ SPDK_ERRLOG("init device_pcie_list failed\n"); ++ spdk_jsonrpc_send_error_response(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR, ++ spdk_strerror(-rc)); ++ return; ++ } ++ } ++ ++ w = spdk_jsonrpc_begin_result(request); ++ spdk_json_write_object_begin(w); ++ ++ ssam_dump_device_pcie_list(w); ++ ++ spdk_json_write_object_end(w); ++ spdk_jsonrpc_end_result(request, w); ++ return; ++} ++ ++SPDK_RPC_REGISTER("device_pcie_list", rpc_ssam_device_pcie_list, SPDK_RPC_RUNTIME) ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_rpc) +diff --git a/lib/ssam/ssam_scsi.c b/lib/ssam/ssam_scsi.c +new file mode 100644 +index 0000000..2420304 +--- /dev/null ++++ b/lib/ssam/ssam_scsi.c +@@ -0,0 +1,2420 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "linux/virtio_scsi.h" ++ ++#include "spdk/stdinc.h" ++ ++#include "spdk/likely.h" ++#include "spdk/scsi_spec.h" ++#include "spdk/env.h" ++#include "spdk/scsi.h" ++#include "spdk/ssam.h" ++#include "spdk/string.h" ++#include "spdk/bdev_module.h" ++ ++#include "ssam_internal.h" ++ ++#define SESSION_STOP_POLLER_PERIOD 1000 ++#define IOV_HEADER_TAIL_NUM 2 ++#define PAYLOAD_SIZE_MAX (2048U * 2048) ++#define VMIO_TYPE_VIRTIO_SCSI_CTRL 4 ++#define SSAM_SPDK_SCSI_DEV_MAX_LUN 1 ++#define SSAM_SENSE_DATE_LEN 32 ++#define PERF_STAT ++ ++/* Features supported by virtio-scsi lib. */ ++#define SPDK_SSAM_SCSI_FEATURES (SPDK_SSAM_FEATURES | \ ++ (1ULL << VIRTIO_SCSI_F_INOUT) | \ ++ (1ULL << VIRTIO_SCSI_F_HOTPLUG) | \ ++ (1ULL << VIRTIO_SCSI_F_CHANGE) | \ ++ (1ULL << VIRTIO_SCSI_F_T10_PI)) ++ ++/* Features that are specified in VIRTIO SCSI but currently not supported: ++ * - Live migration not supported yet ++ * - T10 PI ++ */ ++#define SPDK_SSAM_SCSI_DISABLED_FEATURES (SPDK_SSAM_DISABLED_FEATURES | \ ++ (1ULL << VIRTIO_SCSI_F_T10_PI)) ++ ++/* ssam-user-scsi support protocol features */ ++#define SPDK_SSAM_SCSI_PROTOCOL_FEATURES (1ULL << SSAM_USER_PROTOCOL_F_INFLIGHT_SHMFD) ++ ++enum spdk_scsi_dev_ssam_status { ++ /* Target ID is empty. */ ++ SSAM_SCSI_DEV_EMPTY, ++ ++ /* Target is still being added. */ ++ SSAM_SCSI_DEV_ADDING, ++ ++ /* Target ID occupied. */ ++ SSAM_SCSI_DEV_PRESENT, ++ ++ /* Target ID is occupied but removal is in progress. */ ++ SSAM_SCSI_DEV_REMOVING, ++ ++ /* In session - device (SCSI target) seen but removed. */ ++ SSAM_SCSI_DEV_REMOVED, ++}; ++ ++struct ssam_scsi_stat { ++ uint64_t count; ++ uint64_t total_tsc; /* pre_dma <- -> post_return */ ++ uint64_t dma_tsc; /* pre_dma <- -> post_dma */ ++ uint64_t bdev_tsc; /* pre_bdev <- -> post_bdev */ ++ uint64_t bdev_submit_tsc; /* <- spdk_bdev_xxx -> */ ++ uint64_t complete_tsc; /* pre_return <- -> post_return */ ++ uint64_t internel_tsc; /* total_tsc - dma_tsc - bdev_tsc - complete_tsc */ ++ ++ uint64_t complete_read_ios; /* Number of successfully completed read requests */ ++ uint64_t err_read_ios; /* Number of failed completed read requests */ ++ uint64_t complete_write_ios; /* Number of successfully completed write requests */ ++ uint64_t err_write_ios; /* Number of failed completed write requests */ ++ uint64_t flush_ios; /* Total number of flush requests */ ++ uint64_t complete_flush_ios; /* Number of successfully completed flush requests */ ++ uint64_t err_flush_ios; /* Number of failed completed flush requests */ ++ uint64_t fatal_ios; ++ uint64_t io_retry; ++ ++ uint64_t start_count; ++ uint64_t dma_count; ++ uint64_t dma_complete_count; ++ uint64_t bdev_count; ++ uint64_t bdev_complete_count; ++}; ++ ++struct spdk_scsi_dev_io_state { ++ struct spdk_bdev_io_stat stat; ++ uint64_t submit_tsc; ++ struct ssam_scsi_stat scsi_stat; ++}; ++ ++/** Context for a SCSI target in a ssam device */ ++struct spdk_scsi_dev_ssam_state { ++ struct spdk_scsi_dev_io_state *io_stat[SSAM_SPDK_SCSI_DEV_MAX_LUN]; ++ struct spdk_scsi_dev *dev; ++ ++ enum spdk_scsi_dev_ssam_status status; ++ ++ uint64_t flight_io; ++}; ++ ++struct ssam_scsi_tgt_hotplug_ctx { ++ unsigned scsi_tgt_num; ++}; ++ ++struct spdk_ssam_scsi_session { ++ struct spdk_ssam_session smsession; ++ int ref; ++ bool registered; ++ struct spdk_poller *stop_poller; ++ struct spdk_scsi_dev_ssam_state scsi_dev_state[SPDK_SSAM_SCSI_CTRLR_MAX_DEVS]; ++ char *dbdf; ++}; ++ ++struct ssam_scsi_session_ctx { ++ struct spdk_ssam_scsi_session *ssmsession; ++ void **user_ctx; ++}; ++ ++struct ssam_scsi_task_stat { ++ uint64_t start_tsc; ++ uint64_t dma_start_tsc; ++ uint64_t dma_end_tsc; ++ uint64_t bdev_start_tsc; ++ uint64_t bdev_func_tsc; ++ uint64_t bdev_end_tsc; ++ uint64_t complete_start_tsc; ++ uint64_t complete_end_tsc; ++}; ++ ++struct spdk_ssam_scsi_task { ++ struct spdk_scsi_task scsi_task; ++ /* Returned status of I/O processing, it can be VIRTIO_BLK_S_OK, ++ * VIRTIO_BLK_S_IOERR or VIRTIO_BLK_S_UNSUPP ++ */ ++ union { ++ struct virtio_scsi_cmd_resp resp; ++ struct virtio_scsi_ctrl_tmf_resp tmf_resp; ++ }; ++ ++ /* Number of bytes processed successfully */ ++ uint32_t used_len; ++ ++ /* Records the amount of valid data in the struct iovec iovs array. */ ++ uint32_t iovcnt; ++ struct ssam_iovec iovs; ++ ++ /* If set, the task is currently used for I/O processing. */ ++ bool used; ++ ++ /* For bdev io wait */ ++ struct spdk_ssam_scsi_session *ssmsession; ++ struct spdk_ssam_session_io_wait session_io_wait; ++ ++ /* ssam request data */ ++ struct ssam_request *io_req; ++ ++ uint16_t vq_idx; ++ uint16_t task_idx; ++ int32_t tgt_id; ++ struct spdk_ssam_session *smsession; ++ struct spdk_scsi_dev *scsi_dev; ++ struct ssam_scsi_task_stat task_stat; ++}; ++ ++struct ssam_add_tgt_ev_ctx { ++ char *bdev_name; ++ int tgt_num; ++}; ++ ++static void ssam_scsi_request_worker(struct spdk_ssam_session *smsession, void *arg); ++static void ssam_scsi_destroy_bdev_device(struct spdk_ssam_session *smsession, void *args); ++static void ssam_scsi_response_worker(struct spdk_ssam_session *smsession, void *arg); ++static int ssam_scsi_remove_session(struct spdk_ssam_session *smsession); ++static void ssam_scsi_remove_self(struct spdk_ssam_session *smsession); ++static void ssam_scsi_dump_info_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++static void ssam_scsi_write_config_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w); ++static int ssam_scsi_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues); ++static void ssam_scsi_show_iostat_json(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_show_iostat_args *args, ++ struct spdk_json_write_ctx *w); ++static void ssam_scsi_clear_iostat_json(struct spdk_ssam_session *smsession); ++static void ssam_scsi_print_stuck_io_info(struct spdk_ssam_session *smsession); ++static void ssam_scsi_req_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, ++ uint8_t status); ++static struct spdk_bdev *ssam_scsi_get_bdev(struct spdk_ssam_session *smsession, uint32_t id); ++ ++static void ssam_free_scsi_task_pool(struct spdk_ssam_scsi_session *ssmsession); ++static int ssam_scsi_dev_hot_remove_tgt(struct spdk_ssam_session *smsession, void **_ctx); ++static void ssam_scsi_process_io_task(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_scsi_task *task); ++static int ssam_scsi_task_iovs_memory_get(struct spdk_ssam_scsi_task *task, uint32_t payload_size); ++static void ssam_scsi_submit_io_task(struct spdk_ssam_scsi_task *task); ++static void ssam_scsi_destruct_tgt(struct spdk_ssam_scsi_session *ssmsession, int scsi_tgt_num); ++ ++static const struct spdk_ssam_session_backend g_ssam_scsi_session_backend = { ++ .type = VIRTIO_TYPE_SCSI, ++ .request_worker = ssam_scsi_request_worker, ++ .destroy_bdev_device = ssam_scsi_destroy_bdev_device, ++ .response_worker = ssam_scsi_response_worker, ++ .remove_session = ssam_scsi_remove_session, ++ .remove_self = ssam_scsi_remove_self, ++ .print_stuck_io_info = ssam_scsi_print_stuck_io_info, ++ .dump_info_json = ssam_scsi_dump_info_json, ++ .write_config_json = ssam_scsi_write_config_json, ++ .ssam_get_config = ssam_scsi_get_config, ++ .show_iostat_json = ssam_scsi_show_iostat_json, ++ .clear_iostat_json = ssam_scsi_clear_iostat_json, ++ .get_bdev = ssam_scsi_get_bdev, ++}; ++ ++static void ++ssam_scsi_task_stat_tick(uint64_t *tsc) ++{ ++#ifdef PERF_STAT ++ *tsc = spdk_get_ticks(); ++#endif ++ return; ++} ++ ++static void ++ssam_scsi_stat_statistics(struct spdk_ssam_scsi_task *task) ++{ ++#ifdef PERF_STAT ++ if (task->scsi_task.lun == NULL || task->io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL || ++ task->task_stat.bdev_func_tsc == 0 || task->task_stat.bdev_end_tsc == 0) { ++ return; ++ } ++ ++ int32_t lun_id = spdk_scsi_lun_get_id(task->scsi_task.lun); ++ struct ssam_scsi_stat *scsi_stat = ++ &task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[lun_id]->scsi_stat; ++ ++ uint64_t dma_tsc = task->task_stat.dma_end_tsc - task->task_stat.dma_start_tsc; ++ uint64_t bdev_tsc = task->task_stat.bdev_end_tsc - task->task_stat.bdev_start_tsc; ++ uint64_t bdev_submit_tsc = task->task_stat.bdev_func_tsc - task->task_stat.bdev_start_tsc; ++ uint64_t complete_tsc = task->task_stat.complete_end_tsc - task->task_stat.complete_start_tsc; ++ uint64_t total_tsc = task->task_stat.complete_end_tsc - task->task_stat.start_tsc; ++ ++ struct ssam_io_message *io_cmd = &task->io_req->req.cmd; ++ if (io_cmd->writable) { /* read io */ ++ if (task->scsi_task.status == SPDK_SCSI_STATUS_GOOD) { ++ scsi_stat->complete_read_ios++; ++ } else { ++ scsi_stat->err_read_ios++; ++ } ++ } else { ++ if (task->scsi_task.status == SPDK_SCSI_STATUS_GOOD) { ++ scsi_stat->complete_write_ios++; ++ } else { ++ scsi_stat->err_write_ios++; ++ } ++ } ++ ++ scsi_stat->dma_tsc += dma_tsc; ++ scsi_stat->bdev_tsc += bdev_tsc; ++ scsi_stat->bdev_submit_tsc += bdev_submit_tsc; ++ scsi_stat->complete_tsc += complete_tsc; ++ scsi_stat->total_tsc += total_tsc; ++ scsi_stat->internel_tsc += total_tsc - complete_tsc - bdev_tsc - dma_tsc; ++ scsi_stat->count += 1; ++#endif ++} ++ ++static uint32_t ++ssam_scsi_tgtid_to_lunid(uint32_t tgt_id) ++{ ++ return (((tgt_id) << 0x8) | SSAM_VIRTIO_SCSI_LUN_ID); ++} ++ ++static int ++ssam_scsi_get_config(struct spdk_ssam_session *smsession, uint8_t *config, ++ uint32_t len, uint16_t queues) ++{ ++ struct virtio_scsi_config scsi_cfg; ++ scsi_cfg.num_queues = 0x80; ++ scsi_cfg.seg_max = 0x6f; ++ scsi_cfg.max_sectors = 0x1ff; ++ scsi_cfg.cmd_per_lun = 0x80; ++ scsi_cfg.event_info_size = 0; ++ scsi_cfg.sense_size = 0x60; ++ scsi_cfg.cdb_size = 0x20; ++ scsi_cfg.max_channel = 0; ++ scsi_cfg.max_target = SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; ++ scsi_cfg.max_lun = 0xff; ++ ++ memcpy(config, (void *)&scsi_cfg, sizeof(struct virtio_scsi_config)); ++ return 0; ++} ++ ++static int ++ssam_scsi_send_event(struct spdk_ssam_session *smsession, unsigned scsi_dev_num, ++ uint32_t event, uint32_t reason) ++{ ++ struct virtio_scsi_event vscsi_event = {0}; ++ int ret; ++ ++ vscsi_event.event = event; ++ vscsi_event.reason = reason; ++ ++ vscsi_event.lun[0] = 1; ++ vscsi_event.lun[0x1] = (uint8_t)scsi_dev_num; ++ vscsi_event.lun[0x2] = 0; ++ vscsi_event.lun[0x3] = 0; ++ memset(&vscsi_event.lun[0x4], 0, 0x4); ++ ++ ret = ssam_send_action(smsession->gfunc_id, SSAM_FUNCTION_ACTION_SCSI_EVENT, ++ (const void *)&vscsi_event, sizeof(struct virtio_scsi_event)); ++ if (ret < 0) { ++ SPDK_ERRLOG("%s: SCSI target %d send event %u(reason %u) failed: %s.\n", ++ smsession->name, scsi_dev_num, event, reason, strerror(-ret)); ++ } ++ return ret; ++} ++ ++static void ++ssam_scsi_stop_cpl_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ spdk_ssam_session_rsp_fn rsp_fn = smsession->rsp_fn; ++ void *rsp_ctx = smsession->rsp_ctx; ++ ++ SPDK_NOTICELOG("SCSI controller %s deleted\n", smsession->name); ++ ++ if (smsession->name != NULL) { ++ free(smsession->name); ++ smsession->name = NULL; ++ } ++ ++ if (ssmsession->dbdf != NULL) { ++ free(ssmsession->dbdf); ++ ssmsession->dbdf = NULL; ++ } ++ ++ ssam_set_session_be_freed(ctx); ++ memset(ssmsession, 0, sizeof(*ssmsession)); ++ free(ssmsession); ++ ++ if (rsp_fn != NULL) { ++ rsp_fn(rsp_ctx, 0); ++ rsp_fn = NULL; ++ } ++} ++ ++static void ++ssam_scsi_destroy_session(struct ssam_scsi_session_ctx *ctx) ++{ ++ struct spdk_ssam_session *smsession = &ctx->ssmsession->smsession; ++ struct spdk_ssam_scsi_session *ssmsession = ctx->ssmsession; ++ ++ if (smsession->task_cnt > 0) { ++ return; ++ } ++ ++ if (ssmsession->ref > 0) { ++ return; ++ } ++ ++ ssam_session_destroy(smsession); ++ ++ ssmsession->registered = false; ++ spdk_poller_unregister(&ssmsession->stop_poller); ++ ssam_free_scsi_task_pool(ssmsession); ++ ssam_session_stop_done(&ssmsession->smsession, 0, ctx->user_ctx); ++ free(ctx); ++ ++ return; ++} ++ ++static int ++ssam_scsi_destroy_session_poller_cb(void *arg) ++{ ++ struct ssam_scsi_session_ctx *ctx = arg; ++ ++ if (ssam_trylock() != 0) { ++ return SPDK_POLLER_BUSY; ++ } ++ ++ ssam_scsi_destroy_session(ctx); ++ ++ ssam_unlock(); ++ ++ return SPDK_POLLER_BUSY; ++} ++ ++static int ++ssam_scsi_stop_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct ssam_scsi_session_ctx *_ctx = ++ (struct ssam_scsi_session_ctx *)calloc(1, sizeof(struct ssam_scsi_session_ctx)); ++ ++ if (_ctx == NULL) { ++ SPDK_ERRLOG("%s: calloc scsi session ctx error.\n", smsession->name); ++ return -ENOMEM; ++ } ++ ++ _ctx->ssmsession = ssmsession; ++ _ctx->user_ctx = ctx; ++ ++ ssmsession->stop_poller = SPDK_POLLER_REGISTER(ssam_scsi_destroy_session_poller_cb, ++ _ctx, SESSION_STOP_POLLER_PERIOD); ++ if (ssmsession->stop_poller == NULL) { ++ SPDK_ERRLOG("%s: ssam_destroy_session_poller_cb start failed.\n", smsession->name); ++ ssam_session_stop_done(smsession, -EBUSY, ctx); ++ free(_ctx); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++static int ++ssam_scsi_stop(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = true, ++ .need_rsp = true, ++ }; ++ return ssam_send_event_to_session(smsession, ssam_scsi_stop_cb, ssam_scsi_stop_cpl_cb, ++ send_event_flag, NULL); ++} ++ ++/* sync interface for hot-remove */ ++static void ++ssam_scsi_remove_self(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ /* no need error */ ++ if (ssmsession->ref > 0) { ++ return; /* still have targets */ ++ } ++ ++ SPDK_NOTICELOG("%s: is being freed\n", smsession->name); ++ ++ ssmsession->registered = false; ++ ssam_free_scsi_task_pool(ssmsession); ++ ++ ssam_sessions_remove(smsession->smdev->smsessions, smsession); ++ ++ if (smsession->smdev->active_session_num > 0) { ++ smsession->smdev->active_session_num--; ++ } ++ smsession->smdev = NULL; ++ /* free smsession */ ++ free(smsession->name); ++ free(ssmsession->dbdf); ++ free(ssmsession); ++} ++ ++/* async interface */ ++static int ++ssam_scsi_remove_session(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ int ret; ++ ++ if (smsession->registered && ssmsession->ref != 0) { ++ SPDK_ERRLOG("%s: SCSI target %d is still present.\n", smsession->name, ssmsession->ref); ++ return -EBUSY; ++ } ++ ++ ret = ssam_scsi_stop(smsession); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static struct spdk_scsi_dev * ++ssam_scsi_dev_get_tgt(struct spdk_ssam_scsi_session *ssmsession, uint8_t num) ++{ ++ if (ssmsession == NULL) { ++ SPDK_ERRLOG("ssmsession is null.\n"); ++ return NULL; ++ } ++ if (num >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: tgt num %u over %u.\n", ssmsession->smsession.name, num, ++ SPDK_SSAM_SCSI_CTRLR_MAX_DEVS); ++ return NULL; ++ } ++ if (ssmsession->scsi_dev_state[num].status != SSAM_SCSI_DEV_PRESENT) { ++ return NULL; ++ } ++ ++ if (ssmsession->scsi_dev_state[num].dev == NULL) { ++ SPDK_ERRLOG("%s: no tgt num %u device.\n", ssmsession->smsession.name, num); ++ return NULL; ++ } ++ return ssmsession->scsi_dev_state[num].dev; ++} ++ ++static void ++ssam_scsi_dump_device_info(struct spdk_ssam_session *smsession, struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev *sdev; ++ struct spdk_scsi_lun *lun; ++ int32_t tgt_id; ++ ++ spdk_json_write_named_array_begin(w, "scsi_targets"); ++ for (tgt_id = 0; tgt_id < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; tgt_id++) { ++ sdev = ssam_scsi_dev_get_tgt(ssmsession, tgt_id); ++ if (!sdev) { ++ continue; ++ } ++ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_uint32(w, "scsi_target_num", tgt_id); ++ spdk_json_write_named_uint32(w, "id", spdk_scsi_dev_get_id(sdev)); ++ spdk_json_write_named_string(w, "target_name", spdk_scsi_dev_get_name(sdev)); ++ lun = spdk_scsi_dev_get_lun(sdev, 0); ++ if (!lun) { ++ continue; ++ } ++ spdk_json_write_named_string(w, "bdev_name", spdk_scsi_lun_get_bdev_name(lun)); ++ ++ spdk_json_write_object_end(w); ++ } ++ ++ spdk_json_write_array_end(w); ++} ++ ++static void ++ssam_scsi_dump_info_json(struct spdk_ssam_session *smsession, struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_string(w, "dbdf", ssmsession->dbdf); ++ spdk_json_write_named_string(w, "name", ssam_session_get_name(smsession)); ++ spdk_json_write_named_uint32(w, "function_id", (uint32_t)smsession->gfunc_id); ++ spdk_json_write_named_uint32(w, "queues", (uint32_t)smsession->max_queues); ++ spdk_json_write_named_string(w, "ctrlr", ssam_dev_get_name(smsession->smdev)); ++ spdk_json_write_named_string_fmt(w, "cpumask", "0x%s", ++ spdk_cpuset_fmt(spdk_thread_get_cpumask(smsession->smdev->thread))); ++ ++ ssam_scsi_dump_device_info(smsession, w); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_scsi_write_config_json(struct spdk_ssam_session *smsession, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev *sdev; ++ struct spdk_scsi_lun *lun; ++ int32_t tgt_id; ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "method", "create_scsi_controller"); ++ ++ spdk_json_write_named_object_begin(w, "params"); ++ spdk_json_write_named_string(w, "dbdf", ssmsession->dbdf); ++ spdk_json_write_named_string(w, "name", smsession->name); ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++ ++ for (tgt_id = 0; tgt_id < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; tgt_id++) { ++ sdev = ssam_scsi_dev_get_tgt(ssmsession, tgt_id); ++ if (!sdev) { ++ continue; ++ } ++ ++ lun = spdk_scsi_dev_get_lun(sdev, 0); ++ if (!lun) { ++ SPDK_ERRLOG("%s: no lun, continue.\n", smsession->name); ++ continue; ++ } ++ ++ spdk_json_write_object_begin(w); ++ spdk_json_write_named_string(w, "method", "scsi_controller_add_target"); ++ ++ spdk_json_write_named_object_begin(w, "params"); ++ spdk_json_write_named_string(w, "name", smsession->name); ++ spdk_json_write_named_uint32(w, "scsi_tgt_num", tgt_id); ++ ++ spdk_json_write_named_string(w, "bdev_name", spdk_scsi_lun_get_bdev_name(lun)); ++ spdk_json_write_object_end(w); ++ ++ spdk_json_write_object_end(w); ++ } ++} ++ ++static void ++ssam_scsi_show_tgt_iostat_json(struct spdk_ssam_scsi_session *ssmsession, ++ struct spdk_json_write_ctx *w, int32_t tgt_id, struct spdk_scsi_dev *sdev) ++{ ++ struct spdk_scsi_dev_io_state *io_stat; ++ struct spdk_scsi_lun *lun; ++ struct ssam_scsi_stat scsi_stat; ++ uint64_t ticks_hz = spdk_get_ticks_hz(); ++ uint64_t count; ++ uint64_t poll_count; ++ ++ lun = spdk_scsi_dev_get_lun(sdev, 0); ++ if (lun == NULL) { ++ return; ++ } ++ ++ io_stat = ssmsession->scsi_dev_state[tgt_id].io_stat[0]; ++ if (io_stat == NULL) { ++ SPDK_ERRLOG("No scsi iostat, tgt_id %d\n", tgt_id); ++ return; ++ } ++ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_uint32(w, "scsi_dev_num", tgt_id); ++ spdk_json_write_named_uint32(w, "id", spdk_scsi_dev_get_id(sdev)); ++ spdk_json_write_named_string(w, "target_name", spdk_scsi_dev_get_name(sdev)); ++ ++ memcpy(&scsi_stat, &io_stat->scsi_stat, sizeof(struct ssam_scsi_stat)); ++ ++ spdk_json_write_named_int32(w, "id", spdk_scsi_lun_get_id(lun)); ++ spdk_json_write_named_string(w, "bdev_name", spdk_scsi_lun_get_bdev_name(lun)); ++ spdk_json_write_named_uint64(w, "bytes_read", io_stat->stat.bytes_read); ++ spdk_json_write_named_uint64(w, "num_read_ops", io_stat->stat.num_read_ops); ++ spdk_json_write_named_uint64(w, "bytes_written", io_stat->stat.bytes_written); ++ spdk_json_write_named_uint64(w, "num_write_ops", io_stat->stat.num_write_ops); ++ spdk_json_write_named_uint64(w, "read_latency_ticks", io_stat->stat.read_latency_ticks); ++ spdk_json_write_named_uint64(w, "write_latency_ticks", io_stat->stat.write_latency_ticks); ++ ++ spdk_json_write_named_uint64(w, "complete_read_ios", scsi_stat.complete_read_ios); ++ spdk_json_write_named_uint64(w, "err_read_ios", scsi_stat.err_read_ios); ++ spdk_json_write_named_uint64(w, "complete_write_ios", scsi_stat.complete_write_ios); ++ spdk_json_write_named_uint64(w, "err_write_ios", scsi_stat.err_write_ios); ++ spdk_json_write_named_uint64(w, "flush_ios", scsi_stat.flush_ios); ++ spdk_json_write_named_uint64(w, "complete_flush_ios", scsi_stat.complete_flush_ios); ++ spdk_json_write_named_uint64(w, "err_flush_ios", scsi_stat.err_flush_ios); ++ spdk_json_write_named_uint64(w, "fatal_ios", scsi_stat.fatal_ios); ++ spdk_json_write_named_uint64(w, "io_retry", scsi_stat.io_retry); ++ ++ spdk_json_write_named_uint64(w, "start_count", scsi_stat.start_count); ++ spdk_json_write_named_uint64(w, "dma_count", scsi_stat.dma_count); ++ spdk_json_write_named_uint64(w, "dma_complete_count", scsi_stat.dma_complete_count); ++ spdk_json_write_named_uint64(w, "bdev_count", scsi_stat.bdev_count); ++ spdk_json_write_named_uint64(w, "bdev_complete_count", scsi_stat.bdev_complete_count); ++ spdk_json_write_named_uint64(w, "flight_io", ssmsession->scsi_dev_state[tgt_id].flight_io); ++ ++ if (scsi_stat.count == 0) { ++ count = 1; ++ } else { ++ count = scsi_stat.count; ++ } ++ ++ if (ssmsession->smsession.smdev->stat.poll_count == 0) { ++ poll_count = 1; ++ } else { ++ poll_count = ssmsession->smsession.smdev->stat.poll_count; ++ } ++ ++ spdk_json_write_named_string_fmt(w, "poll_lat", "%.9f", ++ (float)ssmsession->smsession.smdev->stat.poll_tsc / poll_count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "total_lat", "%.9f", ++ (float)scsi_stat.total_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "dma_lat", "%.9f", (float)scsi_stat.dma_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "bdev_lat", "%.9f", ++ (float)scsi_stat.bdev_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "bdev_submit_lat", "%.9f", ++ (float)scsi_stat.bdev_submit_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "complete_lat", "%.9f", ++ (float)scsi_stat.complete_tsc / count / ticks_hz); ++ spdk_json_write_named_string_fmt(w, "internal_lat", "%.9f", ++ (float)scsi_stat.internel_tsc / count / ticks_hz); ++ ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_scsi_show_iostat_json(struct spdk_ssam_session *smsession, ++ struct spdk_ssam_show_iostat_args *args, ++ struct spdk_json_write_ctx *w) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev *sdev; ++ int32_t tgt_id; ++ ++ if (args->id != SPDK_INVALID_ID) { ++ sdev = ssam_scsi_dev_get_tgt(ssmsession, args->id); ++ if (sdev != NULL) { ++ ssam_scsi_show_tgt_iostat_json(ssmsession, w, args->id, sdev); ++ } else { ++ spdk_json_write_object_begin(w); ++ spdk_json_write_object_end(w); ++ } ++ return; ++ } ++ ++ spdk_json_write_object_begin(w); ++ ++ spdk_json_write_named_uint32(w, "function_id", smsession->gfunc_id); ++ ++ spdk_json_write_named_array_begin(w, "scsi_target"); ++ ++ for (tgt_id = 0; tgt_id < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; tgt_id++) { ++ sdev = ssam_scsi_dev_get_tgt(ssmsession, tgt_id); ++ if (!sdev) { ++ continue; ++ } ++ ssam_scsi_show_tgt_iostat_json(ssmsession, w, tgt_id, sdev); ++ } ++ ++ spdk_json_write_array_end(w); ++ spdk_json_write_object_end(w); ++} ++ ++static void ++ssam_scsi_clear_iostat_json(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev_io_state *io_stat; ++ int32_t tgt_id; ++ int32_t lun_id; ++ for (tgt_id = 0; tgt_id < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; tgt_id++) { ++ for (lun_id = 0; lun_id < SSAM_SPDK_SCSI_DEV_MAX_LUN; lun_id++) { ++ io_stat = ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ if (io_stat == NULL) { ++ continue; ++ } ++ memset(io_stat, 0, sizeof(struct spdk_scsi_dev_io_state)); ++ } ++ } ++ return; ++} ++ ++static struct spdk_bdev * ++ssam_scsi_get_bdev(struct spdk_ssam_session *smsession, uint32_t tgt_id) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev *scsi_dev; ++ struct spdk_scsi_lun *scsi_lun = NULL; ++ const char *bdev_name = NULL; ++ if (tgt_id >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: tgt %d invalid\n", smsession->name, tgt_id); ++ return NULL; ++ } ++ if (ssmsession->scsi_dev_state[tgt_id].dev == NULL) { ++ SPDK_ERRLOG("%s: tgt %d not be created\n", smsession->name, tgt_id); ++ return NULL; ++ } ++ ++ scsi_dev = ssmsession->scsi_dev_state[tgt_id].dev; ++ /* lun id use 0 */ ++ scsi_lun = spdk_scsi_dev_get_lun(scsi_dev, 0); ++ if (scsi_lun == NULL) { ++ return NULL; ++ } ++ bdev_name = spdk_scsi_lun_get_bdev_name(scsi_lun); ++ if (bdev_name == NULL) { ++ return NULL; ++ } ++ return spdk_bdev_get_by_name(bdev_name); ++} ++ ++static int ++ssam_scsi_iostat_construct(struct spdk_ssam_scsi_session *ssmsession, int32_t tgt_id, ++ int *lun_id_list, int num_luns) ++{ ++ struct spdk_scsi_dev_io_state *io_stat; ++ int32_t lun_id; ++ int i; ++ ++ for (i = 0; i < num_luns; i++) { ++ lun_id = lun_id_list[i]; ++ io_stat = ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ if (io_stat != NULL) { ++ SPDK_ERRLOG("io_stat with tgt %d lun %d already exist\n", tgt_id, lun_id); ++ return -EEXIST; ++ } ++ ++ io_stat = calloc(1, sizeof(*io_stat)); ++ if (io_stat == NULL) { ++ SPDK_ERRLOG("Could not allocate io_stat for tgt %d lun %d\n", tgt_id, lun_id); ++ return -ENOMEM; ++ } ++ ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id] = io_stat; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_scsi_iostat_destruct(struct spdk_scsi_dev_ssam_state *state) ++{ ++ int32_t lun_id; ++ ++ for (lun_id = 0; lun_id < SSAM_SPDK_SCSI_DEV_MAX_LUN; lun_id++) { ++ if (state->io_stat[lun_id] != NULL) { ++ free(state->io_stat[lun_id]); ++ state->io_stat[lun_id] = NULL; ++ } ++ } ++ ++ return; ++} ++ ++static void ++ssam_remove_scsi_tgt(struct spdk_ssam_scsi_session *ssmsession, unsigned scsi_tgt_num) ++{ ++ struct spdk_scsi_dev_ssam_state *state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ struct spdk_ssam_session *smsession = &ssmsession->smsession; ++ spdk_ssam_session_rsp_fn rsp_fn = smsession->rsp_fn; ++ void *rsp_ctx = smsession->rsp_ctx; ++ ++ smsession->rsp_fn = NULL; ++ smsession->rsp_ctx = NULL; ++ ++ /* delete scsi port */ ++ spdk_scsi_dev_delete_port(state->dev, 0); ++ ++ /* destruct scsi dev */ ++ spdk_scsi_dev_destruct(state->dev, NULL, NULL); ++ state->dev = NULL; ++ ++ /* free iostat */ ++ ssam_scsi_iostat_destruct(state); ++ state->status = SSAM_SCSI_DEV_EMPTY; ++ ++ /* ref-- */ ++ if (ssmsession->ref > 0) { ++ ssmsession->ref--; ++ } else { ++ SPDK_ERRLOG("%s: ref internel error\n", smsession->name); ++ } ++ if (rsp_fn != NULL) { ++ rsp_fn(rsp_ctx, 0); ++ rsp_fn = NULL; ++ } ++ SPDK_NOTICELOG("%s: target %u is removed\n", smsession->name, scsi_tgt_num); ++} ++ ++static int ++ssam_scsi_get_payload_size(struct ssam_request *io_req, uint32_t *payload_size) ++{ ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ uint32_t payload = 0; ++ uint32_t first_vec; ++ uint32_t end_vec; ++ uint32_t loop; ++ ++ if (io_cmd->writable) { /* read io */ ++ /* FROM_DEV: [req][resp][write_buf]...[write_buf ]*, write_buf start at index 2 */ ++ first_vec = 2; ++ end_vec = io_cmd->iovcnt - 1; ++ } else { /* write io */ ++ first_vec = 1; ++ /* TO_DEV: [req][read_buf]...[read_buf][resp], read_buf last index is iovnt-2 */ ++ end_vec = io_cmd->iovcnt - 2; ++ } ++ ++ for (loop = first_vec; loop <= end_vec; loop++) { ++ if (spdk_unlikely((UINT32_MAX - io_cmd->iovs[loop].iov_len) < payload)) { ++ SPDK_ERRLOG("payload size overflow\n"); ++ return -1; ++ } ++ payload += io_cmd->iovs[loop].iov_len; ++ } ++ ++ if (spdk_unlikely(payload > PAYLOAD_SIZE_MAX)) { ++ SPDK_ERRLOG("payload size larger than %u, payload_size = %u\n", ++ PAYLOAD_SIZE_MAX, payload); ++ return -1; ++ } ++ ++ *payload_size = payload; ++ ++ return 0; ++} ++ ++static void ++ssam_session_io_resubmit(void *arg) ++{ ++ struct spdk_ssam_scsi_task *task = (struct spdk_ssam_scsi_task *)arg; ++ struct spdk_ssam_session *smsession = &task->ssmsession->smsession; ++ uint32_t payload_size = task->scsi_task.transfer_len; ++ int rc; ++ ++ rc = ssam_scsi_task_iovs_memory_get(task, payload_size); ++ if (rc != 0) { ++ ssam_session_insert_io_wait(smsession, &task->session_io_wait); ++ return; ++ } ++ ssam_scsi_process_io_task(smsession, task); ++} ++ ++static void ++ssam_scsi_task_init(struct spdk_ssam_scsi_task *task) ++{ ++ memset(&task->scsi_task, 0, sizeof(struct spdk_scsi_task)); ++ ++ task->used = true; ++ task->iovcnt = 0; ++ task->io_req = NULL; ++ task->session_io_wait.cb_fn = ssam_session_io_resubmit; ++ task->session_io_wait.cb_arg = task; ++} ++ ++static void ++ssam_scsi_task_dma_request_para(struct ssam_dma_request *data_request, ++ struct spdk_ssam_scsi_task *task, ++ uint32_t type, uint8_t status) ++{ ++ struct spdk_scsi_task *scsi_task = &task->scsi_task; ++ struct ssam_io_message *io_cmd = NULL; ++ struct spdk_ssam_dma_cb dma_cb = { ++ .status = status, ++ .req_dir = type, ++ .gfunc_id = task->io_req->gfunc_id, ++ .vq_idx = task->vq_idx, ++ .task_idx = task->task_idx ++ }; ++ ++ io_cmd = &task->io_req->req.cmd; ++ data_request->cb = (void *) * (uint64_t *)&dma_cb; ++ data_request->gfunc_id = task->io_req->gfunc_id; ++ data_request->flr_seq = task->io_req->flr_seq; ++ data_request->direction = type; ++ data_request->data_len = scsi_task->transfer_len; ++ if (type == SSAM_REQUEST_DATA_STORE) { ++ data_request->src = task->iovs.phys.sges; ++ data_request->src_num = task->iovcnt; ++ /* FROM_DEV: [req][resp][write_buf]...[write_buf ]*, write_buf start at index 2 */ ++ data_request->dst = &io_cmd->iovs[2]; ++ /* dma data iovs does not contain header and tail */ ++ data_request->dst_num = io_cmd->iovcnt - IOV_HEADER_TAIL_NUM; ++ } else if (type == SSAM_REQUEST_DATA_LOAD) { ++ data_request->src = &io_cmd->iovs[1]; ++ /* dma data iovs does not contain header and tail */ ++ data_request->src_num = io_cmd->iovcnt - IOV_HEADER_TAIL_NUM; ++ data_request->dst = task->iovs.phys.sges; ++ data_request->dst_num = task->iovcnt; ++ } ++} ++ ++static void ++ssam_scsi_task_finish(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_ssam_session *smsession = task->smsession; ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[task->vq_idx]; ++ ++ if (smsession->task_cnt == 0) { ++ SPDK_ERRLOG("%s: task count internel error\n", smsession->name); ++ return; ++ } ++ ++ task->io_req = NULL; ++ ++ if (task->iovs.virt.sges[0].iov_base != NULL) { ++ ssam_mempool_free(smsession->mp, task->iovs.virt.sges[0].iov_base); ++ task->iovs.virt.sges[0].iov_base = NULL; ++ } ++ ++ memset(&task->iovs, 0, sizeof(task->iovs)); ++ ++ task->iovcnt = 0; ++ smsession->task_cnt--; ++ task->used = false; ++ vq->index[vq->index_l] = task->task_idx; ++ vq->index_l = (vq->index_l + 1) & 0xFF; ++ vq->use_num--; ++} ++ ++static int ++ssam_scsi_io_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, void *rsp_buf, ++ uint32_t rsp_len) ++{ ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ struct ssam_virtio_res *virtio_res = NULL; ++ struct ssam_io_response io_resp; ++ struct iovec io_vec; ++ int rc; ++ ++ memset(&io_resp, 0, sizeof(io_resp)); ++ io_resp.gfunc_id = io_req->gfunc_id; ++ io_resp.iocb_id = io_req->iocb_id; ++ io_resp.status = io_req->status; ++ io_resp.req = io_req; ++ io_resp.flr_seq = io_req->flr_seq; ++ ++ virtio_res = (struct ssam_virtio_res *)&io_resp.data; ++ virtio_res->iovs = &io_vec; ++ if (io_cmd->writable) { /* FROM_DEV: [req][resp][write_buf]...[write_buf ] */ ++ virtio_res->iovs->iov_base = io_cmd->iovs[1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[1].iov_len; ++ } else { /* TO_DEV: [req][read_buf]...[read_buf][resp] */ ++ virtio_res->iovs->iov_base = io_cmd->iovs[io_cmd->iovcnt - 1].iov_base; ++ virtio_res->iovs->iov_len = io_cmd->iovs[io_cmd->iovcnt - 1].iov_len; ++ } ++ virtio_res->iovcnt = 1; ++ virtio_res->rsp = rsp_buf; ++ virtio_res->rsp_len = rsp_len; ++ ++ rc = ssam_io_complete(smdev->tid, &io_resp); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ ssam_dev_io_dec(smdev); ++ return 0; ++} ++ ++struct ssam_scsi_req_complete_arg { ++ struct spdk_ssam_dev *smdev; ++ struct ssam_request *io_req; ++ uint8_t status; ++}; ++ ++static void ++ssam_scsi_req_complete_cb(void *arg) ++{ ++ struct ssam_scsi_req_complete_arg *cb_arg = (struct ssam_scsi_req_complete_arg *)arg; ++ struct virtio_scsi_cmd_resp resp = {0}; ++ struct virtio_scsi_ctrl_tmf_resp tmf_resp = {0}; ++ int rc; ++ ++ if (spdk_unlikely(cb_arg->io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ tmf_resp.response = cb_arg->status; ++ rc = ssam_scsi_io_complete(cb_arg->smdev, cb_arg->io_req, &tmf_resp, ++ sizeof(struct virtio_scsi_ctrl_tmf_resp)); ++ } else { ++ resp.response = cb_arg->status; ++ rc = ssam_scsi_io_complete(cb_arg->smdev, cb_arg->io_req, &resp, ++ sizeof(struct virtio_scsi_cmd_resp)); ++ } ++ ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_scsi_req_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_scsi_req_complete(struct spdk_ssam_dev *smdev, struct ssam_request *io_req, uint8_t status) ++{ ++ struct virtio_scsi_cmd_resp resp = {0}; ++ struct virtio_scsi_ctrl_tmf_resp tmf_resp = {0}; ++ int rc; ++ ++ if (spdk_unlikely(io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ tmf_resp.response = status; ++ rc = ssam_scsi_io_complete(smdev, io_req, &tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp)); ++ } else { ++ resp.response = status; ++ rc = ssam_scsi_io_complete(smdev, io_req, &resp, sizeof(struct virtio_scsi_cmd_resp)); ++ } ++ ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_scsi_req_complete_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_scsi_req_complete_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smdev; ++ cb_arg->io_req = io_req; ++ cb_arg->status = status; ++ io_wait_r->cb_fn = ssam_scsi_req_complete_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smdev, io_wait_r); ++ } ++} ++ ++static void ++ssam_scsi_task_put(struct spdk_ssam_scsi_task *task) ++{ ++ memset(&task->resp, 0, sizeof(task->resp)); ++ if (task->io_req->type != VMIO_TYPE_VIRTIO_SCSI_CTRL) { ++ task->ssmsession->scsi_dev_state[task->tgt_id].flight_io--; ++ } ++ spdk_scsi_task_put(&task->scsi_task); ++} ++ ++static void ++ssam_scsi_submit_completion_cb(void *arg) ++{ ++ struct spdk_ssam_scsi_task *task = (struct spdk_ssam_scsi_task *)arg; ++ struct spdk_ssam_session *smsession = task->smsession; ++ int rc; ++ ++ if (spdk_unlikely(task->io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ rc = ssam_scsi_io_complete(smsession->smdev, task->io_req, &task->tmf_resp, ++ sizeof(struct virtio_scsi_ctrl_tmf_resp)); ++ } else { ++ rc = ssam_scsi_io_complete(smsession->smdev, task->io_req, &task->resp, ++ sizeof(struct virtio_scsi_cmd_resp)); ++ } ++ ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_scsi_submit_completion_cb; ++ io_wait_r->cb_arg = task; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_scsi_task_stat_tick(&task->task_stat.complete_end_tsc); ++ ssam_scsi_stat_statistics(task); ++ ++ /* after spdk_task_construct called, put task */ ++ ssam_scsi_task_put(task); ++} ++ ++static void ++ssam_scsi_submit_completion(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_ssam_session *smsession = task->smsession; ++ struct ssam_request *io_req = task->io_req; ++ int rc; ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.complete_start_tsc); ++ if (spdk_unlikely(io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ rc = ssam_scsi_io_complete(smsession->smdev, io_req, &task->tmf_resp, ++ sizeof(struct virtio_scsi_ctrl_tmf_resp)); ++ } else { ++ rc = ssam_scsi_io_complete(smsession->smdev, io_req, &task->resp, ++ sizeof(struct virtio_scsi_cmd_resp)); ++ } ++ ++ if (rc != 0) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_scsi_submit_completion_cb; ++ io_wait_r->cb_arg = task; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ssam_scsi_task_stat_tick(&task->task_stat.complete_end_tsc); ++ ssam_scsi_stat_statistics(task); ++ ++ /* after spdk_task_construct called, put task */ ++ ssam_scsi_task_put(task); ++} ++ ++struct ssam_scsi_dma_data_request_arg { ++ struct spdk_ssam_dev *smdev; ++ struct spdk_ssam_scsi_task *task; ++ struct ssam_dma_request dma_req; ++}; ++ ++static void ++ssam_scsi_dma_data_request_cb(void *arg) ++{ ++ struct ssam_scsi_dma_data_request_arg *cb_arg = (struct ssam_scsi_dma_data_request_arg *)arg; ++ int ret = ssam_dma_data_request(cb_arg->smdev->tid, &cb_arg->dma_req); ++ if (ret == -ENOMEM || ret == -EIO) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ if (io_wait_r == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ io_wait_r->cb_fn = ssam_scsi_dma_data_request_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(cb_arg->smdev, io_wait_r); ++ return; ++ } ++ if (ret < 0) { ++ SPDK_ERRLOG("ssam dma data request failed(%d)\n", ret); ++ cb_arg->task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(cb_arg->task); ++ } ++ free(cb_arg); ++ cb_arg = NULL; ++} ++ ++static void ++ssam_scsi_task_dma_request(struct spdk_ssam_scsi_task *task, enum data_request_dma_type data_dir) ++{ ++ struct spdk_ssam_session *smsession = task->smsession; ++ struct ssam_dma_request data_request = {0}; ++ int ret = 0; ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.dma_start_tsc); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.dma_count++; ++ ++ switch (data_dir) { ++ case SSAM_REQUEST_DATA_STORE: ++ ssam_scsi_task_dma_request_para(&data_request, task, SSAM_REQUEST_DATA_STORE, 0); ++ ++ /* dma request: ipu -> Host */ ++ ret = ssam_dma_data_request(smsession->smdev->tid, &data_request); ++ break; ++ ++ case SSAM_REQUEST_DATA_LOAD: ++ ssam_scsi_task_dma_request_para(&data_request, task, SSAM_REQUEST_DATA_LOAD, 0); ++ ++ /* dma request: Host -> ipu */ ++ ret = ssam_dma_data_request(smsession->smdev->tid, &data_request); ++ break; ++ ++ default: ++ SPDK_ERRLOG("Invalid data dir: %u.\n", data_dir); ++ break; ++ } ++ ++ if (ret == -ENOMEM || ret == -EIO) { ++ struct spdk_ssam_session_io_wait_r *io_wait_r = ++ calloc(1, sizeof(struct spdk_ssam_session_io_wait_r)); ++ struct ssam_scsi_dma_data_request_arg *cb_arg = ++ calloc(1, sizeof(struct ssam_scsi_dma_data_request_arg)); ++ if (io_wait_r == NULL || cb_arg == NULL) { ++ SPDK_ERRLOG("calloc for io_wait_r failed\n"); ++ sleep(1); ++ raise(SIGTERM); ++ } ++ cb_arg->smdev = smsession->smdev; ++ cb_arg->dma_req = data_request; ++ cb_arg->task = task; ++ io_wait_r->cb_fn = ssam_scsi_dma_data_request_cb; ++ io_wait_r->cb_arg = cb_arg; ++ ssam_session_insert_io_wait_r(smsession->smdev, io_wait_r); ++ return; ++ } ++ ++ if (ret < 0) { ++ SPDK_ERRLOG("ssam dma data request failed(%d)\n", ret); ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ } ++} ++ ++static void ++ssam_scsi_task_copy_resp(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_scsi_task *scsi_task = &task->scsi_task; ++ ++ if (spdk_unlikely(task->io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ task->tmf_resp.response = scsi_task->status; ++ } else { ++ task->resp.status = scsi_task->status; ++ if (spdk_unlikely(scsi_task->sense_data_len > SSAM_SENSE_DATE_LEN)) { ++ return; ++ } ++ if (scsi_task->status != SPDK_SCSI_STATUS_GOOD) { ++ memcpy(task->resp.sense, scsi_task->sense_data, scsi_task->sense_data_len); ++ task->resp.sense_len = scsi_task->sense_data_len; ++ } ++ ++ if (scsi_task->transfer_len != scsi_task->length) { ++ SPDK_ERRLOG("task transfer_len(%u) not equal length(%u), internel error.\n", ++ scsi_task->transfer_len, scsi_task->length); ++ } ++ ++ task->resp.resid = scsi_task->length - scsi_task->data_transferred; ++ } ++} ++ ++static void ++ssam_scsi_read_task_cpl_cb(struct spdk_scsi_task *scsi_task) ++{ ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the task and bdev_io memory may have been released. ++ * Therefore, task and bdev_io are not released in this scenario. ++ */ ++ return; ++ } ++ struct spdk_ssam_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_ssam_scsi_task, ++ scsi_task); ++ int32_t tgt_id = task->tgt_id; ++ int32_t lun_id = spdk_scsi_lun_get_id(scsi_task->lun); ++ struct spdk_scsi_dev_io_state *io_stat = task->ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ ++ /* Second part start of read */ ++ io_stat->submit_tsc = spdk_get_ticks(); ++ ++ ssam_scsi_task_copy_resp(task); ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.bdev_end_tsc); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.bdev_complete_count++; ++ ++ /* 1) Read request without data is no need to dma; ++ 2) Read request failed just complete it. ++ */ ++ if (scsi_task->length == 0 || scsi_task->status != SPDK_SCSI_STATUS_GOOD) { ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ ++ /* Dma data from IPU to HOST */ ++ ssam_scsi_task_dma_request(task, SSAM_REQUEST_DATA_STORE); ++ ++ return; ++} ++ ++static void ++ssam_scsi_write_task_cpl_cb(struct spdk_scsi_task *scsi_task) ++{ ++ if (spdk_unlikely(spdk_get_shutdown_sig_received())) { ++ /* ++ * In the hot restart process, when this callback is triggered, ++ * the task and bdev_io memory may have been released. ++ * Therefore, task and bdev_io are not released in this scenario. ++ */ ++ return; ++ } ++ struct spdk_ssam_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_ssam_scsi_task, ++ scsi_task); ++ int32_t tgt_id = task->tgt_id; ++ int32_t lun_id = spdk_scsi_lun_get_id(scsi_task->lun); ++ struct spdk_scsi_dev_io_state *io_stat = task->ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ uint32_t payload_size = task->scsi_task.transfer_len; ++ ++ /* Second part start of write */ ++ io_stat->submit_tsc = spdk_get_ticks(); ++ ++ /* copy result from spdk_scsi_task to spdk_ssam_scsi_task->resp */ ++ ssam_scsi_task_copy_resp(task); ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.bdev_end_tsc); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.bdev_complete_count++; ++ ++ ssam_scsi_submit_completion(task); ++ /* Second part end of write */ ++ io_stat->stat.write_latency_ticks += ssam_get_diff_tsc(io_stat->submit_tsc); ++ io_stat->stat.bytes_written += payload_size; ++ io_stat->stat.num_write_ops++; ++ ++ return; ++} ++ ++static void ++ssam_scsi_ctl_task_cpl_cb(struct spdk_scsi_task *scsi_task) ++{ ++ struct spdk_ssam_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_ssam_scsi_task, ++ scsi_task); ++ ++ ssam_scsi_task_copy_resp(task); ++ ++ ssam_scsi_submit_completion(task); ++} ++ ++static void ++ssam_scsi_task_free_cb(struct spdk_scsi_task *scsi_task) ++{ ++ struct spdk_ssam_scsi_task *task = SPDK_CONTAINEROF(scsi_task, struct spdk_ssam_scsi_task, ++ scsi_task); ++ ++ ssam_scsi_task_finish(task); ++} ++ ++static int ++ssam_scsi_task_init_target(struct spdk_ssam_scsi_task *task, const __u8 *lun) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = task->ssmsession; ++ struct spdk_scsi_dev_ssam_state *state = NULL; ++ int32_t lun_id = (((uint16_t)lun[2] << 8) | lun[3]) & 0x3FFF; ++ int32_t tgt_id = lun[1]; ++ ++ if (lun[0] != 1 || tgt_id >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("First byte must be 1 and second is target\n"); ++ ssmsession->smsession.smdev->discard_io_num++; ++ return -1; ++ } ++ ++ state = &ssmsession->scsi_dev_state[tgt_id]; ++ task->scsi_dev = state->dev; ++ if (state->dev == NULL || state->status != SSAM_SCSI_DEV_PRESENT) { ++ return -1; ++ } ++ ++ task->tgt_id = tgt_id; ++ task->scsi_task.target_port = spdk_scsi_dev_find_port_by_id(task->scsi_dev, 0); ++ task->scsi_task.lun = spdk_scsi_dev_get_lun(state->dev, lun_id); ++ if (task->scsi_task.lun == NULL) { ++ SPDK_ERRLOG("Failed to init scsi task lun by lun_id(%d)\n", lun_id); ++ return -1; ++ } ++ return 0; ++} ++ ++static void ++ssam_scsi_submit_io_task(struct spdk_ssam_scsi_task *task) ++{ ++ task->resp.response = VIRTIO_SCSI_S_OK; ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.bdev_start_tsc); ++ spdk_scsi_dev_queue_task(task->scsi_dev, &task->scsi_task); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.bdev_count++; ++ ssam_scsi_task_stat_tick(&task->task_stat.bdev_func_tsc); ++ ++ SPDK_DEBUGLOG(ssam_blk_data, "====== Task: task_idx %u submitted ======\n", task->task_idx); ++} ++ ++static int ++ssam_scsi_task_iovs_memory_get(struct spdk_ssam_scsi_task *task, uint32_t payload_size) ++{ ++ struct ssam_mempool *mp = task->smsession->mp; ++ void *buffer = NULL; ++ uint64_t phys_addr = 0; ++ uint32_t alloc_size; ++ ++ if (payload_size == 0) { /* A little strange */ ++ alloc_size = 1; /* Alloc one iov at least */ ++ } else { ++ alloc_size = payload_size; ++ } ++ ++ buffer = ssam_mempool_alloc(mp, alloc_size, &phys_addr); ++ if (spdk_unlikely(buffer == NULL)) { ++ return -ENOMEM; ++ } ++ ++ /* ssam request max IO size is PAYLOAD_SIZE_MAX, only use one iov to save data */ ++ task->iovs.virt.sges[0].iov_base = buffer; ++ task->iovs.phys.sges[0].iov_base = (void *)phys_addr; ++ task->iovs.virt.sges[0].iov_len = payload_size; ++ task->iovs.phys.sges[0].iov_len = payload_size; ++ task->iovcnt = 1; ++ ++ return 0; ++} ++ ++static void ++scsi_mgmt_task_submit(struct spdk_ssam_scsi_task *task, enum spdk_scsi_task_func func) ++{ ++ task->tmf_resp.response = VIRTIO_SCSI_S_OK; ++ task->scsi_task.function = func; ++ spdk_scsi_dev_queue_mgmt_task(task->scsi_dev, &task->scsi_task); ++} ++ ++static void ++ssam_scsi_process_ctl_task(struct spdk_ssam_session *smsession, struct spdk_ssam_scsi_task *task) ++{ ++ struct virtio_scsi_ctrl_tmf_req *ctrl_req = (struct virtio_scsi_ctrl_tmf_req *) ++ task->io_req->req.cmd.header; ++ int32_t lun_id = spdk_scsi_lun_get_id(task->scsi_task.lun); ++ struct spdk_scsi_dev_io_state *io_stat = ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[lun_id]; ++ int ret = 0; ++ ++ spdk_scsi_task_construct(&task->scsi_task, ssam_scsi_ctl_task_cpl_cb, ssam_scsi_task_free_cb); ++ ret = ssam_scsi_task_init_target(task, ctrl_req->lun); ++ if (ret < 0) { ++ task->tmf_resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ ++ switch (ctrl_req->type) { ++ case VIRTIO_SCSI_T_TMF: ++ /* Check if we are processing a valid request */ ++ if (task->scsi_dev == NULL) { ++ task->tmf_resp.response = VIRTIO_SCSI_S_BAD_TARGET; ++ ssam_scsi_submit_completion(task); ++ break; ++ } ++ ++ switch (ctrl_req->subtype) { ++ case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: ++ /* Handle LUN reset */ ++ SPDK_DEBUGLOG(ssam_scsi, "%s: LUN reset\n", smsession->name); ++ ++ scsi_mgmt_task_submit(task, SPDK_SCSI_TASK_FUNC_LUN_RESET); ++ return; ++ default: ++ task->tmf_resp.response = VIRTIO_SCSI_S_ABORTED; ++ ssam_scsi_submit_completion(task); ++ /* Unsupported command */ ++ SPDK_DEBUGLOG(ssam_scsi, "%s: unsupported TMF command %x\n", ++ smsession->name, ctrl_req->subtype); ++ break; ++ } ++ break; ++ ++ case VIRTIO_SCSI_T_AN_QUERY: ++ case VIRTIO_SCSI_T_AN_SUBSCRIBE: ++ task->tmf_resp.response = VIRTIO_SCSI_S_ABORTED; ++ ssam_scsi_submit_completion(task); ++ break; ++ ++ default: ++ SPDK_DEBUGLOG(ssam_scsi, "%s: Unsupported control command %x\n", ++ smsession->name, ctrl_req->type); ++ io_stat->scsi_stat.fatal_ios++; ++ break; ++ } ++} ++ ++static void ++ssam_scsi_io_task_construct(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_scsi_task *scsi_task = &task->scsi_task; ++ struct ssam_io_message *io_cmd = &task->io_req->req.cmd; ++ ++ if (io_cmd->writable) { /* read io */ ++ spdk_scsi_task_construct(scsi_task, ssam_scsi_read_task_cpl_cb, ssam_scsi_task_free_cb); ++ } else { /* write io */ ++ spdk_scsi_task_construct(scsi_task, ssam_scsi_write_task_cpl_cb, ssam_scsi_task_free_cb); ++ } ++} ++ ++static int32_t ++ssam_scsi_io_task_setup(struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_scsi_task *scsi_task = &task->scsi_task; ++ struct ssam_io_message *io_cmd = &task->io_req->req.cmd; ++ struct virtio_scsi_cmd_req *req = (struct virtio_scsi_cmd_req *)io_cmd->header; ++ uint32_t payload_size; ++ int ret; ++ ++ ssam_scsi_io_task_construct(task); ++ ++ ret = ssam_scsi_get_payload_size(task->io_req, &payload_size); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ ret = ssam_scsi_task_init_target(task, req->lun); ++ if (ret < 0) { ++ return ret; ++ } ++ ++ scsi_task->dxfer_dir = (io_cmd->writable ? SPDK_SCSI_DIR_FROM_DEV : SPDK_SCSI_DIR_TO_DEV); ++ scsi_task->iovs = task->iovs.virt.sges; ++ scsi_task->cdb = req->cdb; ++ scsi_task->transfer_len = payload_size; ++ scsi_task->length = payload_size; ++ ++ ret = ssam_scsi_task_iovs_memory_get(task, payload_size); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_scsi_process_io_task(struct spdk_ssam_session *smsession, struct spdk_ssam_scsi_task *task) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev_io_state *io_stat; ++ uint64_t cur_tsc; ++ int32_t lun_id; ++ ++ ssmsession->scsi_dev_state[task->tgt_id].flight_io++; ++ ++ if (spdk_unlikely(task->scsi_task.lun == NULL)) { ++ spdk_scsi_task_process_null_lun(&task->scsi_task); ++ task->resp.response = VIRTIO_SCSI_S_OK; ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ ++ lun_id = spdk_scsi_lun_get_id(task->scsi_task.lun); ++ io_stat = ssmsession->scsi_dev_state[task->tgt_id].io_stat[lun_id]; ++ if (io_stat == NULL) { ++ SPDK_ERRLOG("No io_stat with tgt %d lun %d\n", task->tgt_id, lun_id); ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ /* First part start of read and write */ ++ cur_tsc = spdk_get_ticks(); ++ io_stat->submit_tsc = cur_tsc; ++ memset(&task->task_stat, 0, sizeof(task->task_stat)); ++ task->task_stat.start_tsc = cur_tsc; ++ io_stat->scsi_stat.start_count++; ++ ++ switch (task->scsi_task.dxfer_dir) { ++ case SPDK_SCSI_DIR_FROM_DEV: /* read: read data from backend to ipu, then dma to host */ ++ ssam_scsi_submit_io_task(task); ++ /* First part end of read */ ++ uint8_t rw_type = task->scsi_task.cdb[0]; ++ if (rw_type == SPDK_SBC_READ_6 || rw_type == SPDK_SBC_READ_10 || ++ rw_type == SPDK_SBC_READ_12 || rw_type == SPDK_SBC_READ_16) { ++ io_stat->stat.read_latency_ticks += ssam_get_diff_tsc(io_stat->submit_tsc); ++ io_stat->stat.bytes_read += task->scsi_task.transfer_len; ++ io_stat->stat.num_read_ops++; ++ } ++ break; ++ ++ case SPDK_SCSI_DIR_TO_DEV: /* write: dma data from host to ipu, then submit to backend */ ++ ssam_scsi_task_dma_request(task, SSAM_REQUEST_DATA_LOAD); ++ break; ++ ++ default: ++ SPDK_ERRLOG("scsi task dxfer dir error, dir is %u.\n", task->scsi_task.dxfer_dir); ++ io_stat->scsi_stat.fatal_ios++; ++ break; ++ } ++} ++ ++static void ++ssam_scsi_pre_process_io_task(struct spdk_ssam_session *smsession, struct spdk_ssam_scsi_task *task) ++{ ++ int ret; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ ++ ret = ssam_scsi_io_task_setup(task); ++ if (ret != 0) { ++ if (ret == -ENOMEM) { ++ ssam_session_insert_io_wait(smsession, &task->session_io_wait); ++ return; ++ } ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssmsession->scsi_dev_state[task->tgt_id].flight_io++; ++ ssam_scsi_submit_completion(task); ++ return; ++ } ++ ++ ssam_scsi_process_io_task(smsession, task); ++} ++ ++static void ++ssam_scsi_process_request(struct spdk_ssam_session *smsession, struct ssam_request *io_req, ++ uint16_t vq_idx) ++{ ++ struct spdk_ssam_scsi_task *task = NULL; ++ struct spdk_ssam_virtqueue *vq = &smsession->virtqueue[vq_idx]; ++ ++ if (spdk_unlikely(vq->use_num >= vq->num)) { ++ SPDK_ERRLOG("Session:%s vq(%hu) task_cnt(%u) limit(%u).\n", smsession->name, vq_idx, vq->use_num, ++ vq->num); ++ ssam_scsi_req_complete(smsession->smdev, io_req, VIRTIO_SCSI_S_FAILURE); ++ return; ++ } ++ ++ uint32_t index = vq->index[vq->index_r]; ++ task = &((struct spdk_ssam_scsi_task *)vq->tasks)[index]; ++ if (spdk_unlikely(task->used)) { ++ SPDK_ERRLOG("%s: vq(%hu) task_idx(%hu) is already pending.\n", smsession->name, vq_idx, ++ task->task_idx); ++ ssam_scsi_req_complete(smsession->smdev, io_req, VIRTIO_SCSI_S_FAILURE); ++ return; ++ } ++ ++ smsession->task_cnt++; ++ vq->index_r = (vq->index_r + 1) & 0xFF; ++ vq->use_num++; ++ ssam_scsi_task_init(task); ++ task->io_req = io_req; ++ ++ if (spdk_unlikely(io_req->type == VMIO_TYPE_VIRTIO_SCSI_CTRL)) { ++ ssam_scsi_process_ctl_task(smsession, task); ++ } else { ++ ssam_scsi_pre_process_io_task(smsession, task); ++ } ++ ++ return; ++} ++ ++static void ++ssam_scsi_request_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct ssam_request *io_req = (struct ssam_request *)arg; ++ struct ssam_io_message *io_cmd = &io_req->req.cmd; ++ struct spdk_ssam_dev *smdev = smsession->smdev; ++ struct virtio_scsi_cmd_req *req = (struct virtio_scsi_cmd_req *)io_cmd->header; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ uint16_t vq_idx = io_cmd->virtio.vq_idx; ++ uint32_t tgt_id = req->lun[1]; ++ ++ smdev->io_num++; ++ ++ if (vq_idx >= smsession->max_queues) { ++ SPDK_ERRLOG("vq_idx out of range, need less than %u, actually %u\n", ++ smsession->max_queues, vq_idx); ++ goto err; ++ } ++ ++ if (io_req->status != SSAM_IO_STATUS_OK) { ++ SPDK_WARNLOG("%s: ssam request status invalid, but still process, status=%d\n", ++ smsession->name, io_req->status); ++ goto err; ++ } ++ ++ if (ssmsession->scsi_dev_state[tgt_id].status != SSAM_SCSI_DEV_PRESENT) { ++ /* If dev has been deleted, return io err */ ++ goto err; ++ } ++ ++ ssam_scsi_process_request(smsession, io_req, vq_idx); ++ ++ return; ++ ++err: ++ ssam_scsi_req_complete(smsession->smdev, io_req, VIRTIO_SCSI_S_FAILURE); ++ return; ++} ++ ++static void ++ssam_scsi_response_worker(struct spdk_ssam_session *smsession, void *arg) ++{ ++ struct ssam_dma_rsp *dma_rsp = (struct ssam_dma_rsp *)arg; ++ const struct spdk_ssam_dma_cb *dma_cb = (const struct spdk_ssam_dma_cb *)&dma_rsp->cb; ++ struct spdk_ssam_scsi_task *task = NULL; ++ uint16_t vq_idx = dma_cb->vq_idx; ++ uint16_t task_idx = dma_cb->task_idx; ++ uint8_t req_dir = dma_cb->req_dir; ++ ++ if (spdk_unlikely(vq_idx >= smsession->max_queues)) { ++ smsession->smdev->discard_io_num++; ++ SPDK_ERRLOG("vq_idx out of range, need less than %u, actually %u\n", ++ smsession->max_queues, vq_idx); ++ return; ++ } ++ ++ task = &((struct spdk_ssam_scsi_task *)smsession->virtqueue[vq_idx].tasks)[task_idx]; ++ if (dma_rsp->status != 0) { ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ SPDK_ERRLOG("dma data process failed!\n"); ++ return; ++ } ++ if (dma_rsp->last_flag == 0) { ++ task->resp.response = VIRTIO_SCSI_S_FAILURE; ++ ssam_scsi_submit_completion(task); ++ SPDK_ERRLOG("last_flag should not equal 0!\n"); ++ return; ++ } ++ int32_t tgt_id = task->tgt_id; ++ int32_t lun_id = spdk_scsi_lun_get_id(task->scsi_task.lun); ++ struct spdk_scsi_dev_io_state *io_stat = task->ssmsession->scsi_dev_state[tgt_id].io_stat[lun_id]; ++ ++ ssam_scsi_task_stat_tick(&task->task_stat.dma_end_tsc); ++ task->ssmsession->scsi_dev_state[task->tgt_id].io_stat[0]->scsi_stat.dma_complete_count++; ++ ++ if (req_dir == SSAM_REQUEST_DATA_LOAD) { ++ /* Write: write data ready, submit task to backend */ ++ ssam_scsi_submit_io_task(task); ++ /* First part end of write */ ++ io_stat->stat.write_latency_ticks += ssam_get_diff_tsc(io_stat->submit_tsc); ++ } else if (req_dir == SSAM_REQUEST_DATA_STORE) { ++ /* Read: data have been read by user, complete the task */ ++ task->resp.response = VIRTIO_SCSI_S_OK; ++ ssam_scsi_submit_completion(task); ++ /* Second part end of read */ ++ io_stat->stat.read_latency_ticks += ssam_get_diff_tsc(io_stat->submit_tsc); ++ } else { ++ io_stat->scsi_stat.fatal_ios++; ++ } ++} ++ ++static void ++ssam_scsi_destroy_bdev_device(struct spdk_ssam_session *smsession, void *args) ++{ ++ unsigned scsi_tgt_num = (unsigned)(uintptr_t)(args); ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ ++ ssam_remove_scsi_tgt(ssmsession, scsi_tgt_num); ++} ++ ++static void ++ssam_free_scsi_task_pool(struct spdk_ssam_scsi_session *ssmsession) ++{ ++ struct spdk_ssam_session *smsession = &ssmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = NULL; ++ uint16_t max_queues = smsession->max_queues; ++ uint16_t i; ++ ++ if (max_queues > SPDK_SSAM_MAX_VQUEUES) { ++ return; ++ } ++ ++ for (i = 0; i < max_queues; i++) { ++ vq = &smsession->virtqueue[i]; ++ if (vq->tasks != NULL) { ++ spdk_free(vq->tasks); ++ vq->tasks = NULL; ++ } ++ ++ if (vq->index != NULL) { ++ spdk_free(vq->index); ++ vq->index = NULL; ++ } ++ } ++} ++ ++static int ++ssam_alloc_scsi_task_pool(struct spdk_ssam_scsi_session *ssmsession) ++{ ++ struct spdk_ssam_session *smsession = &ssmsession->smsession; ++ struct spdk_ssam_virtqueue *vq = NULL; ++ struct spdk_ssam_scsi_task *task = NULL; ++ uint16_t max_queues = smsession->max_queues; ++ uint32_t task_cnt = smsession->queue_size; ++ uint16_t i; ++ uint32_t j; ++ ++ if ((max_queues > SPDK_SSAM_MAX_VQUEUES) || (max_queues == 0)) { ++ SPDK_ERRLOG("%s: max_queues %u invalid\n", smsession->name, max_queues); ++ return -EINVAL; ++ } ++ ++ if ((task_cnt == 0) || (task_cnt > SPDK_SSAM_MAX_VQ_SIZE)) { ++ SPDK_ERRLOG("%s: virtuque size %u invalid\n", smsession->name, task_cnt); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < max_queues; i++) { ++ vq = &smsession->virtqueue[i]; ++ vq->smsession = smsession; ++ vq->num = task_cnt; ++ vq->use_num = 0; ++ vq->index_l = 0; ++ vq->index_r = 0; ++ vq->tasks = spdk_zmalloc(sizeof(struct spdk_ssam_scsi_task) * task_cnt, ++ SPDK_CACHE_LINE_SIZE, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ vq->index = spdk_zmalloc(sizeof(uint32_t) * task_cnt, ++ SPDK_CACHE_LINE_SIZE, NULL, ++ SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); ++ if (vq->tasks == NULL || vq->index == NULL) { ++ SPDK_ERRLOG("%s: failed to allocate %"PRIu32" tasks for virtqueue %"PRIu16"\n", ++ smsession->name, task_cnt, i); ++ ssam_free_scsi_task_pool(ssmsession); ++ return -ENOMEM; ++ } ++ ++ for (j = 0; j < task_cnt; j++) { ++ task = &((struct spdk_ssam_scsi_task *)vq->tasks)[j]; ++ task->ssmsession = ssmsession; ++ task->smsession = &ssmsession->smsession; ++ task->vq_idx = i; ++ task->task_idx = j; ++ vq->index[j] = j; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++ssam_scsi_print_stuck_io_info(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_task *tasks; ++ struct spdk_ssam_scsi_task *task; ++ int i, j; ++ ++ for (i = 0; i < smsession->max_queues; i++) { ++ for (j = 0; j < smsession->queue_size; j++) { ++ tasks = (struct spdk_ssam_scsi_task *)smsession->virtqueue[i].tasks; ++ task = &tasks[j]; ++ if (task == NULL) { ++ continue; ++ } ++ if (task->used) { ++ SPDK_INFOLOG(ssam_scsi, "%s: stuck io payload_size %u, vq_idx %u, task_idx %u\n", ++ smsession->name, task->scsi_task.length, task->vq_idx, task->task_idx); ++ } ++ } ++ } ++} ++ ++static int ++ssam_scsi_start_cb(struct spdk_ssam_session *smsession, void **unused) ++{ ++ SPDK_NOTICELOG("SCSI controller %s created with queues %u\n", ++ smsession->name, smsession->max_queues); ++ ++ ssam_session_start_done(smsession, 0); ++ ++ return 0; ++} ++ ++static int ++ssam_scsi_start(struct spdk_ssam_session *smsession) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ int rc = ssam_alloc_scsi_task_pool(ssmsession); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: failed to alloc task pool.\n", smsession->name); ++ return rc; ++ } ++ return ssam_send_event_to_session(smsession, ssam_scsi_start_cb, NULL, send_event_flag, NULL); ++} ++ ++static int ++ssam_scsi_session_connect(struct spdk_ssam_session *smsession, uint16_t queues) ++{ ++ uint16_t queue_cnt = queues; ++ ++ if (queue_cnt == 0) { ++ queue_cnt = SPDK_SSAM_SCSI_DEFAULT_VQUEUES; ++ } ++ ++ smsession->max_queues = queue_cnt; ++ smsession->queue_size = SPDK_SSAM_DEFAULT_VQ_SIZE; ++ ++ return ssam_scsi_start(smsession); ++} ++ ++int ++ssam_scsi_construct(struct spdk_ssam_session_reg_info *info) ++{ ++ struct spdk_ssam_session *smsession = NULL; ++ struct spdk_ssam_scsi_session *ssmsession = NULL; ++ uint32_t session_ctx_size = sizeof(struct spdk_ssam_scsi_session) - sizeof( ++ struct spdk_ssam_session); ++ uint16_t tid; ++ int rc = 0; ++ ++ ssam_lock(); ++ ++ tid = ssam_get_tid(); ++ if (tid == SPDK_INVALID_TID) { ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ info->tid = tid; ++ info->backend = &g_ssam_scsi_session_backend; ++ info->session_ctx_size = session_ctx_size; ++ snprintf(info->type_name, SPDK_SESSION_TYPE_MAX_LEN, "%s", SPDK_SESSION_TYPE_SCSI); ++ rc = ssam_session_register(info, &smsession); ++ if (rc != 0) { ++ ssam_unlock(); ++ return rc; ++ } ++ smsession->started = true; ++ ++ ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ ssmsession->registered = true; ++ ssmsession->dbdf = strdup(info->dbdf); ++ if (ssmsession->dbdf == NULL) { ++ ssam_session_unregister(smsession); ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ rc = ssam_scsi_session_connect(smsession, info->queues); ++ if (rc != 0) { ++ ssam_session_unreg_response_cb(smsession); ++ ssam_session_unregister(smsession); ++ ssam_unlock(); ++ return -EINVAL; ++ } ++ ++ ssam_unlock(); ++ ++ return 0; ++} ++ ++static int ++ssam_get_scsi_tgt_num(struct spdk_ssam_scsi_session *ssmsession, int *scsi_tgt_num_out) ++{ ++ int scsi_tgt_num = *scsi_tgt_num_out; ++ if (scsi_tgt_num < 0) { ++ for (scsi_tgt_num = 0; scsi_tgt_num < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; scsi_tgt_num++) { ++ if (ssmsession->scsi_dev_state[scsi_tgt_num].dev == NULL) { ++ break; ++ } ++ } ++ ++ if (scsi_tgt_num == SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: all SCSI target slots are already in use.\n", ssmsession->smsession.name); ++ return -ENOSPC; ++ } ++ } else { ++ if (scsi_tgt_num >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: SCSI target number is too big (got %d, max %d)\n", ++ ssmsession->smsession.name, scsi_tgt_num, SPDK_SSAM_SCSI_CTRLR_MAX_DEVS - 1); ++ return -EINVAL; ++ } ++ } ++ *scsi_tgt_num_out = scsi_tgt_num; ++ return 0; ++} ++ ++static int ++ssam_scsi_dev_param_changed(struct spdk_ssam_scsi_session *ssmsession, ++ unsigned scsi_tgt_num) ++{ ++ struct spdk_scsi_dev_ssam_state *state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ ++ if (state->dev == NULL) { ++ return 0; ++ } ++ int rc = ssam_scsi_send_event(&ssmsession->smsession, scsi_tgt_num, VIRTIO_SCSI_T_PARAM_CHANGE, ++ 0x2a | (0x09 << 0x8)); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: tgt %d resize send action failed\n", ssmsession->smsession.name, scsi_tgt_num); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static unsigned ++ssam_get_scsi_dev_num(const struct spdk_ssam_scsi_session *ssmsession, ++ const struct spdk_scsi_lun *lun) ++{ ++ const struct spdk_scsi_dev *scsi_dev; ++ unsigned scsi_dev_num; ++ ++ scsi_dev = spdk_scsi_lun_get_dev(lun); ++ for (scsi_dev_num = 0; scsi_dev_num < SPDK_SSAM_SCSI_CTRLR_MAX_DEVS; scsi_dev_num++) { ++ if (ssmsession->scsi_dev_state[scsi_dev_num].dev == scsi_dev) { ++ break; ++ } ++ } ++ return scsi_dev_num; ++} ++ ++static void ++ssam_scsi_lun_resize(const struct spdk_scsi_lun *lun, void *arg) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = arg; ++ unsigned scsi_dev_num; ++ ++ scsi_dev_num = ssam_get_scsi_dev_num(ssmsession, lun); ++ if (scsi_dev_num == SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ /* The entire device has been already removed. */ ++ return; ++ } ++ ++ (void)ssam_scsi_dev_param_changed(ssmsession, scsi_dev_num); ++} ++ ++static void ++ssam_scsi_lun_hotremove(const struct spdk_scsi_lun *lun, void *arg) ++{ ++ struct ssam_scsi_tgt_hotplug_ctx *ctx; ++ struct spdk_ssam_scsi_session *ssmsession = arg; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = false, ++ }; ++ unsigned scsi_dev_num; ++ ++ scsi_dev_num = ssam_get_scsi_dev_num(ssmsession, lun); ++ if (scsi_dev_num == SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ /* The entire device has been already removed. */ ++ return; ++ } ++ ++ ctx = calloc(1, sizeof(*ctx)); ++ if (ctx == NULL) { ++ SPDK_ERRLOG("calloc failed\n"); ++ return; ++ } ++ ++ ctx->scsi_tgt_num = scsi_dev_num; ++ ssam_send_event_to_session(&ssmsession->smsession, ssam_scsi_dev_hot_remove_tgt, ++ NULL, send_event_flag, ctx); ++} ++ ++static int ++ssam_scsi_session_add_tgt(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct ssam_add_tgt_ev_ctx *args = (struct ssam_add_tgt_ev_ctx *)(*ctx); ++ unsigned scsi_tgt_num = args->tgt_num; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ int rc; ++ ++ rc = spdk_scsi_dev_allocate_io_channels(ssmsession->scsi_dev_state[scsi_tgt_num].dev); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: Couldn't allocate io channnel for SCSI target %u.\n", ++ smsession->name, scsi_tgt_num); ++ } ++ ++ rc = ssam_scsi_send_event(smsession, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, ++ VIRTIO_SCSI_EVT_RESET_RESCAN); ++ if (rc != 0) { ++ SPDK_WARNLOG("%s: send event %d(reason %d) to target %hu failed, ret: %d, host maynot boot.\n", ++ smsession->name, VIRTIO_SCSI_T_TRANSPORT_RESET, VIRTIO_SCSI_EVT_RESET_RESCAN, scsi_tgt_num, rc); ++ if (rc == -ENOSPC) { ++ spdk_scsi_dev_free_io_channels(ssmsession->scsi_dev_state[scsi_tgt_num].dev); ++ ssam_scsi_destruct_tgt(ssmsession, scsi_tgt_num); ++ return rc; ++ } ++ } ++ ++ ssmsession->scsi_dev_state[scsi_tgt_num].status = SSAM_SCSI_DEV_PRESENT; ++ ssmsession->scsi_dev_state[scsi_tgt_num].flight_io = 0; ++ ++ return 0; ++} ++ ++static void ++ssam_scsi_dev_add_tgt_cpl_cb(struct spdk_ssam_session *smsession, void **ctx) ++{ ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct ssam_add_tgt_ev_ctx *args = (struct ssam_add_tgt_ev_ctx *)(*ctx); ++ unsigned scsi_tgt_num = args->tgt_num; ++ ssmsession->ref++; ++ ++ SPDK_NOTICELOG("SCSI controller %s target %u added with bdev %s\n", ++ smsession->name, scsi_tgt_num, args->bdev_name); ++ ++ free(args->bdev_name); ++ args->bdev_name = NULL; ++ free(args); ++} ++ ++struct ssam_scsi_session_remove_tgt_arg { ++ struct spdk_ssam_session *smsession; ++ unsigned scsi_tgt_num; ++}; ++ ++static void ++ssam_scsi_session_remove_tgt_cpl(struct spdk_ssam_session *smsession, void **_ctx) ++{ ++ struct ssam_scsi_tgt_hotplug_ctx *ctx = *_ctx; ++ unsigned scsi_tgt_num = ctx->scsi_tgt_num; ++ int rc; ++ rc = ssam_umount_normal(smsession, ssam_scsi_tgtid_to_lunid(scsi_tgt_num)); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: function umount failed when remove scsi tgt:%s.\n", ++ smsession->name, strerror(-rc)); ++ } ++ free(ctx); ++} ++ ++static int ++ssam_scsi_session_remove_tgt(struct spdk_ssam_session *smsession, void **_ctx) ++{ ++ struct ssam_scsi_tgt_hotplug_ctx *ctx = *_ctx; ++ unsigned scsi_tgt_num = ctx->scsi_tgt_num; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct spdk_scsi_dev_ssam_state *state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ int rc = 0; ++ ++ if (state->status != SSAM_SCSI_DEV_PRESENT) { ++ SPDK_WARNLOG("%s: SCSI target %u is not present, skip.\n", smsession->name, scsi_tgt_num); ++ rc = -ENODEV; ++ goto out; ++ } ++ ++ if (ssmsession->scsi_dev_state[scsi_tgt_num].flight_io != 0) { ++ SPDK_ERRLOG("%s: SCSI target %u is busy.\n", smsession->name, scsi_tgt_num); ++ rc = -EBUSY; ++ goto out; ++ } ++ ++ state->status = SSAM_SCSI_DEV_REMOVING; ++ ++ SPDK_NOTICELOG("%s: target %d is removing\n", smsession->name, scsi_tgt_num); ++ ++ rc = ssam_scsi_send_event(smsession, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, ++ VIRTIO_SCSI_EVT_RESET_REMOVED); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: scsi send remove event failed\n", smsession->name); ++ if (rc == -ENOSPC) { ++ state->status = SSAM_SCSI_DEV_PRESENT; ++ goto out; ++ } ++ } ++ ++ spdk_scsi_dev_free_io_channels(state->dev); ++ ++ ssam_send_dev_destroy_msg(smsession, (void *)(uintptr_t)scsi_tgt_num); ++ ++ /* free ctx see ssam_scsi_session_remove_tgt_cpl() */ ++ return rc; ++ ++out: ++ free(ctx); ++ ++ return rc; ++} ++ ++static int ++ssam_scsi_construct_tgt(struct spdk_ssam_scsi_session *ssmsession, int scsi_tgt_num, ++ const char *bdev_name) ++{ ++ struct spdk_scsi_dev_ssam_state *state = NULL; ++ char target_name[SPDK_SCSI_DEV_MAX_NAME] = {0}; ++ int lun_id_list[SSAM_SPDK_SCSI_DEV_MAX_LUN]; ++ const char *bdev_names_list[SSAM_SPDK_SCSI_DEV_MAX_LUN]; ++ int rc; ++ ++ state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ if (state->dev != NULL) { ++ SPDK_ERRLOG("%s: SCSI target %u already occupied\n", ssmsession->smsession.name, scsi_tgt_num); ++ return -EEXIST; ++ } ++ ++ (void)snprintf(target_name, sizeof(target_name), "Target %u", scsi_tgt_num); ++ lun_id_list[0] = 0; ++ bdev_names_list[0] = (char *)bdev_name; ++ ++ state->status = SSAM_SCSI_DEV_ADDING; ++ rc = ssam_scsi_iostat_construct(ssmsession, scsi_tgt_num, lun_id_list, SSAM_SPDK_SCSI_DEV_MAX_LUN); ++ if (rc != 0) { ++ return rc; ++ } ++ ++ state->dev = spdk_scsi_dev_construct_ext(target_name, bdev_names_list, lun_id_list, ++ SSAM_SPDK_SCSI_DEV_MAX_LUN, ++ SPDK_SPC_PROTOCOL_IDENTIFIER_SAS, ++ ssam_scsi_lun_resize, ssmsession, ++ ssam_scsi_lun_hotremove, ssmsession); ++ if (state->dev == NULL) { ++ SPDK_ERRLOG("%s: couldn't create SCSI target %u using bdev '%s'\n", ++ ssmsession->smsession.name, scsi_tgt_num, bdev_name); ++ rc = -EINVAL; ++ goto dev_fail; ++ } ++ ++ rc = spdk_scsi_dev_add_port(state->dev, 0, "ssam"); ++ if (rc != 0) { ++ goto port_fail; ++ } ++ ++ return rc; ++ ++port_fail: ++ spdk_scsi_dev_destruct(state->dev, NULL, NULL); ++ ++dev_fail: ++ ssam_scsi_iostat_destruct(state); ++ ++ return rc; ++} ++ ++static void ++ssam_scsi_destruct_tgt(struct spdk_ssam_scsi_session *ssmsession, int scsi_tgt_num) ++{ ++ struct spdk_scsi_dev_ssam_state *state = NULL; ++ state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ ++ if (state->dev) { ++ spdk_scsi_dev_delete_port(state->dev, 0); ++ spdk_scsi_dev_destruct(state->dev, NULL, NULL); ++ state->dev = NULL; ++ } ++ ssam_scsi_iostat_destruct(state); ++ ++ state->status = SSAM_SCSI_DEV_EMPTY; ++} ++ ++int ++ssam_scsi_dev_add_tgt(struct spdk_ssam_session *smsession, int scsi_tgt_num, ++ const char *bdev_name) ++{ ++ int rc; ++ struct spdk_ssam_scsi_session *ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ struct ssam_add_tgt_ev_ctx *ctx = calloc(1, sizeof(struct ssam_add_tgt_ev_ctx)); ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ ++ if (ctx == NULL) { ++ SPDK_ERRLOG("calloc ssam_add_tgt_ev_ctx failed\n"); ++ return -ENOMEM; ++ } ++ ++ if (bdev_name == NULL) { ++ SPDK_ERRLOG("No lun name specified\n"); ++ free(ctx); ++ return -EINVAL; ++ } ++ ++ ctx->bdev_name = spdk_sprintf_alloc("%s", bdev_name); ++ if (ctx->bdev_name == NULL) { ++ SPDK_ERRLOG("calloc ssam_add_tgt_ev_ctx bdev_name failed\n"); ++ free(ctx); ++ return -ENOMEM; ++ } ++ ++ rc = ssam_get_scsi_tgt_num(ssmsession, &scsi_tgt_num); ++ if (rc < 0) { ++ free(ctx->bdev_name); ++ free(ctx); ++ return rc; ++ } ++ ++ rc = ssam_mount_normal(smsession, ssam_scsi_tgtid_to_lunid(scsi_tgt_num)); ++ if (rc != SSAM_MOUNT_OK) { ++ SPDK_ERRLOG("%s: mount ssam volume failed, tgt id %d\n", smsession->name, scsi_tgt_num); ++ free(ctx->bdev_name); ++ free(ctx); ++ return rc; ++ } ++ ++ rc = ssam_scsi_construct_tgt(ssmsession, scsi_tgt_num, bdev_name); ++ if (rc != 0) { ++ free(ctx->bdev_name); ++ free(ctx); ++ return rc; ++ } ++ ++ ctx->tgt_num = scsi_tgt_num; ++ rc = ssam_send_event_to_session(&ssmsession->smsession, ssam_scsi_session_add_tgt, ++ ssam_scsi_dev_add_tgt_cpl_cb, send_event_flag, (void *)ctx); ++ if (rc != 0) { ++ ssam_scsi_destruct_tgt(ssmsession, scsi_tgt_num); ++ free(ctx->bdev_name); ++ free(ctx); ++ return rc; ++ } ++ ++ SPDK_INFOLOG(ssam_scsi, "%s: added SCSI target %u using bdev '%s'\n", ++ ssmsession->smsession.name, scsi_tgt_num, bdev_name); ++ ++ return 0; ++} ++ ++static int ++ssam_scsi_dev_hot_remove_tgt(struct spdk_ssam_session *smsession, void **_ctx) ++{ ++ int rc = 0; ++ struct ssam_scsi_tgt_hotplug_ctx *ctx = *_ctx; ++ struct spdk_ssam_scsi_session *ssmsession; ++ ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ unsigned scsi_tgt_num = ctx->scsi_tgt_num; ++ if (!ssmsession) { ++ SPDK_ERRLOG("invalid SCSI device"); ++ rc = -EINVAL; ++ goto out; ++ } ++ ++ struct spdk_scsi_dev_ssam_state *scsi_dev_state = &ssmsession->scsi_dev_state[scsi_tgt_num]; ++ if (scsi_dev_state->dev == NULL) { ++ /* Nothing to do */ ++ SPDK_WARNLOG("%s: There is no need to remove scsi target\n", smsession->name); ++ rc = -ENODEV; ++ goto out; ++ } ++ ++ if (scsi_dev_state->status != SSAM_SCSI_DEV_PRESENT) { ++ SPDK_INFOLOG(ssam_scsi, "%s: SCSI target %u is being removed\n", smsession->name, scsi_tgt_num); ++ rc = 0; ++ goto out; ++ } ++ ++ scsi_dev_state->status = SSAM_SCSI_DEV_REMOVING; ++ ++ SPDK_NOTICELOG("%s: target %d is hot removing\n", smsession->name, scsi_tgt_num); ++ ++ rc = ssam_scsi_send_event(smsession, scsi_tgt_num, VIRTIO_SCSI_T_TRANSPORT_RESET, ++ VIRTIO_SCSI_EVT_RESET_REMOVED); ++ if (rc != 0) { ++ SPDK_ERRLOG("%s: scsi send remove event failed\n", smsession->name); ++ if (rc == -ENOSPC) { ++ scsi_dev_state->status = SSAM_SCSI_DEV_PRESENT; ++ goto out; ++ } ++ } ++ ++ spdk_scsi_dev_free_io_channels(scsi_dev_state->dev); ++ ++ ssam_send_dev_destroy_msg(smsession, (void *)(uintptr_t)scsi_tgt_num); ++ ++out: ++ free(ctx); ++ return rc; ++} ++ ++int ++ssam_scsi_dev_remove_tgt(struct spdk_ssam_session *smsession, unsigned scsi_tgt_num, ++ spdk_ssam_session_rsp_fn cb_fn, void *cb_arg) ++{ ++ struct spdk_ssam_scsi_session *ssmsession; ++ struct ssam_scsi_tgt_hotplug_ctx *ctx; ++ struct spdk_ssam_send_event_flag send_event_flag = { ++ .need_async = false, ++ .need_rsp = true, ++ }; ++ ++ if (scsi_tgt_num >= SPDK_SSAM_SCSI_CTRLR_MAX_DEVS) { ++ SPDK_ERRLOG("%s: invalid SCSI target number %d\n", smsession->name, scsi_tgt_num); ++ return -EINVAL; ++ } ++ ++ ssmsession = (struct spdk_ssam_scsi_session *)smsession; ++ if (!ssmsession) { ++ SPDK_ERRLOG("An invalid SCSI device that removing from a SCSI target."); ++ return -EINVAL; ++ } ++ ++ ctx = calloc(1, sizeof(*ctx)); ++ if (ctx == NULL) { ++ SPDK_ERRLOG("calloc failed\n"); ++ return -ENOMEM; ++ } ++ ++ ctx->scsi_tgt_num = scsi_tgt_num; ++ ++ ssam_send_event_to_session(smsession, ssam_scsi_session_remove_tgt, ++ ssam_scsi_session_remove_tgt_cpl, send_event_flag, ctx); ++ ++ return 0; ++} ++ ++SPDK_LOG_REGISTER_COMPONENT(ssam_scsi) +diff --git a/mk/spdk.lib_deps.mk b/mk/spdk.lib_deps.mk +index 7f04dab..efac42e 100644 +--- a/mk/spdk.lib_deps.mk ++++ b/mk/spdk.lib_deps.mk +@@ -195,6 +195,7 @@ DEPDIRS-event_vfu_tgt := init vfu_tgt + DEPDIRS-event_iobuf := init log thread util $(JSON_LIBS) + DEPDIRS-event_keyring := init json keyring + DEPDIRS-event_fsdev := init fsdev ++DEPDIRS-event_ssam := init ssam event_scsi + + # module/vfu_device + +diff --git a/module/event/subsystems/Makefile b/module/event/subsystems/Makefile +index aafcec8..d8ec186 100644 +--- a/module/event/subsystems/Makefile ++++ b/module/event/subsystems/Makefile +@@ -16,6 +16,7 @@ endif + endif + + DIRS-$(CONFIG_VHOST) += vhost_blk vhost_scsi ++DIRS-$(CONFIG_SSAM) += ssam + DIRS-$(CONFIG_VFIO_USER) += vfu_tgt + DIRS-$(CONFIG_FSDEV) += fsdev + +@@ -31,6 +32,7 @@ DEPDIRS-ublk := bdev + DEPDIRS-nvmf := bdev + DEPDIRS-scsi := bdev + DEPDIRS-vhost_scsi := scsi ++DEPDIRS-ssam := scsi + + .PHONY: all clean $(DIRS-y) + +diff --git a/module/event/subsystems/ssam/Makefile b/module/event/subsystems/ssam/Makefile +new file mode 100644 +index 0000000..77f74a8 +--- /dev/null ++++ b/module/event/subsystems/ssam/Makefile +@@ -0,0 +1,17 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright (C) 2021-2025 Huawei Technologies Co. ++# All rights reserved. ++# ++ ++SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..) ++include $(SPDK_ROOT_DIR)/mk/spdk.common.mk ++ ++SO_VER := 3 ++SO_MINOR := 0 ++ ++C_SRCS = ssam.c ++LIBNAME = event_ssam ++ ++SPDK_MAP_FILE = $(SPDK_ROOT_DIR)/mk/spdk_blank.map ++ ++include $(SPDK_ROOT_DIR)/mk/spdk.lib.mk +diff --git a/module/event/subsystems/ssam/ssam.c b/module/event/subsystems/ssam/ssam.c +new file mode 100644 +index 0000000..5291b34 +--- /dev/null ++++ b/module/event/subsystems/ssam/ssam.c +@@ -0,0 +1,45 @@ ++/* SPDX-License-Identifier: BSD-3-Clause ++ * Copyright (C) 2021-2025 Huawei Technologies Co. ++ * All rights reserved. ++ */ ++ ++#include "spdk/stdinc.h" ++ ++#include "spdk/ssam.h" ++ ++#include "spdk_internal/event.h" ++#include "spdk_internal/init.h" ++ ++static void ++ssam_subsystem_init_done(int rc) ++{ ++ spdk_subsystem_init_next(rc); ++} ++ ++static void ++ssam_subsystem_init(void) ++{ ++ spdk_ssam_subsystem_init(ssam_subsystem_init_done); ++} ++ ++static void ++ssam_subsystem_fini_done(void) ++{ ++ spdk_subsystem_fini_next(); ++} ++ ++static void ++ssam_subsystem_fini(void) ++{ ++ spdk_ssam_subsystem_fini(ssam_subsystem_fini_done); ++} ++ ++static struct spdk_subsystem g_spdk_subsystem_ssam = { ++ .name = SSAM_SERVER_NAME, ++ .init = ssam_subsystem_init, ++ .fini = ssam_subsystem_fini, ++ .write_config_json = spdk_ssam_config_json, ++}; ++ ++SPDK_SUBSYSTEM_REGISTER(g_spdk_subsystem_ssam); ++SPDK_SUBSYSTEM_DEPEND(ssam, scsi) +diff --git a/python/spdk/rpc/__init__.py b/python/spdk/rpc/__init__.py +index 59bfe71..470d859 100644 +--- a/python/spdk/rpc/__init__.py ++++ b/python/spdk/rpc/__init__.py +@@ -30,6 +30,7 @@ from . import nvmf + from . import subsystem + from . import trace + from . import vhost ++from . import ssam + from . import vmd + from . import sock + from . import vfio_user +diff --git a/python/spdk/rpc/ssam.py b/python/spdk/rpc/ssam.py +new file mode 100644 +index 0000000..b3b9d72 +--- /dev/null ++++ b/python/spdk/rpc/ssam.py +@@ -0,0 +1,271 @@ ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright (C) 2021-2025 Huawei Technologies Co. ++# All rights reserved. ++ ++from .helpers import deprecated_alias ++from getpass import getuser ++ ++ ++def log_command_info(client, event): ++ """log event info. ++ Args: ++ user_name: event user ++ event: function id of PCI device ++ src_addr: queue number of ssam ++ """ ++ params = { ++ 'user_name': getuser(), ++ 'event': event, ++ 'src_addr': "localhost", ++ } ++ return client.call('log_command_info', params) ++ ++ ++def log_info(func): ++ def wrapper_log_info(arg, *args, **kw): ++ log_command_info(arg.client, func.__name__) ++ return func(arg, *args, **kw) ++ return wrapper_log_info ++ ++ ++def create_blk_controller(client, dev_name, index, readonly=None, serial=None, vqueue=None): ++ """Create ssam BLK controller. ++ Args: ++ dev_name: device name to add to controller ++ index: function id or dbdf of PCI device ++ queues: queue number of ssam ++ readonly: set controller as read-only ++ serial: set volume id ++ vqueue: set virtio queue num ++ """ ++ params = { ++ 'dev_name': dev_name, ++ 'index': index, ++ } ++ if readonly: ++ params['readonly'] = readonly ++ if serial: ++ params['serial'] = serial ++ if vqueue is not None: ++ params['vqueue'] = vqueue ++ return client.call('create_blk_controller', params) ++ ++ ++def get_controllers(client, function_id=None, dbdf=None): ++ """Get information about configured ssam controllers. ++ ++ Args: ++ function_id: function id of PCI device ++ dbdf: dbdf of PCI device ++ ++ Returns: ++ List of ssam controllers. ++ """ ++ params = {} ++ if function_id is not None: ++ params['function_id'] = function_id ++ if dbdf is not None: ++ params['dbdf'] = dbdf ++ return client.call('get_controllers', params) ++ ++ ++def get_scsi_controllers(client, name=None): ++ """Get information about configured ssam controllers. ++ ++ Args: ++ name: name of scsi controller ++ ++ Returns: ++ List of ssam scsi controllers. ++ """ ++ params = {} ++ if name is not None: ++ params['name'] = name ++ return client.call('get_scsi_controllers', params) ++ ++ ++def delete_controller(client, index): ++ """Delete ssam controller from configuration. ++ Args: ++ index: function id or dbdf of PCI device ++ """ ++ params = {'index': index} ++ return client.call('delete_controller', params) ++ ++ ++def delete_scsi_controller(client, name): ++ """Delete ssam controller from configuration. ++ Args: ++ name: scsi controller name to be delete ++ """ ++ params = {'name': name} ++ return client.call('delete_scsi_controller', params) ++ ++ ++def controller_get_iostat(client, function_id=None, dbdf=None): ++ """Get iostat about configured ssam controllers. ++ ++ Args: ++ function_id: function id of PCI device ++ dbdf: dbdf of PCI device ++ ++ Returns: ++ List of iostat of ssam controllers. ++ """ ++ params = {} ++ if function_id is not None: ++ params['function_id'] = function_id ++ if dbdf is not None: ++ params['dbdf'] = dbdf ++ return client.call('controller_get_iostat', params) ++ ++ ++def blk_device_iostat(client, index, tid=None, vq_idx=None): ++ """Get iostat about blk device. ++ ++ Args: ++ index: index ++ tid: tid ++ vq_idx: vqueue id ++ ++ Returns: ++ List of iostat of ssam controllers. ++ """ ++ params = { ++ 'index': index, ++ } ++ if tid is not None: ++ params['tid'] = tid ++ if vq_idx is not None: ++ params['vq_idx'] = vq_idx ++ return client.call('blk_device_iostat', params) ++ ++ ++def controller_clear_iostat(client): ++ """Clear iostat about configured ssam controllers. ++ """ ++ return client.call('controller_clear_iostat') ++ ++ ++def bdev_resize(client, function_id, new_size_in_mb): ++ """Resize bdev in the system. ++ Args: ++ function_id: function id of PCI device ++ new_size_in_mb: new bdev size for resize operation. The unit is MiB ++ """ ++ params = { ++ 'function_id': function_id, ++ 'new_size_in_mb': new_size_in_mb, ++ } ++ return client.call('bdev_resize', params) ++ ++ ++def scsi_bdev_resize(client, name, tgt_id, new_size_in_mb): ++ """Resize scsi bdev in the system. ++ Args: ++ name: controller name of PCI device ++ tgt_id: tgt id of bdev ++ new_size_in_mb: new bdev size for resize operation. The unit is MiB ++ """ ++ params = { ++ 'name': name, ++ 'tgt_id': tgt_id, ++ 'new_size_in_mb': new_size_in_mb, ++ } ++ return client.call('scsi_bdev_resize', params) ++ ++ ++def bdev_aio_resize(client, name, new_size_in_mb): ++ """Resize aio bdev in the system. ++ Args: ++ name: aio bdev name ++ new_size_in_mb: new bdev size for resize operation. The unit is MiB ++ """ ++ params = { ++ 'name': name, ++ 'new_size_in_mb': new_size_in_mb, ++ } ++ return client.call('bdev_aio_resize', params) ++ ++ ++def os_ready(client): ++ """Write ready flag for booting OS. ++ ++ """ ++ return client.call('os_ready') ++ ++ ++def os_not_ready(client): ++ """Write not ready flag for booting OS. ++ ++ """ ++ return client.call('os_not_ready') ++ ++ ++def create_scsi_controller(client, dbdf, name): ++ """Create ssam scsi controller. ++ Args: ++ dbdf: the pci dbdf of virtio scsi controller ++ name: controller name to be create ++ """ ++ params = { ++ 'dbdf': dbdf, ++ 'name': name, ++ } ++ ++ return client.call('create_scsi_controller', params) ++ ++ ++def scsi_controller_add_target(client, name, scsi_tgt_num, bdev_name): ++ """Add LUN to ssam scsi controller target. ++ Args: ++ name: controller name where add lun ++ scsi_tgt_num: target number to use ++ bdev_name: name of bdev to add to target ++ """ ++ params = { ++ 'name': name, ++ 'scsi_tgt_num': scsi_tgt_num, ++ 'bdev_name': bdev_name, ++ } ++ return client.call('scsi_controller_add_target', params) ++ ++ ++def scsi_controller_remove_target(client, name, scsi_tgt_num): ++ """Remove LUN from ssam scsi controller target. ++ Args: ++ name: controller name to remove lun ++ scsi_tgt_num: target number to use ++ """ ++ params = { ++ 'name': name, ++ 'scsi_tgt_num': scsi_tgt_num, ++ } ++ return client.call('scsi_controller_remove_target', params) ++ ++ ++def scsi_device_iostat(client, name, scsi_tgt_num): ++ """Get iostat about scsi device. ++ ++ Args: ++ name: controller name ++ scsi_tgt_num: target number ++ ++ Returns: ++ List of iostat of ssam controllers. ++ """ ++ params = { ++ 'name': name, ++ 'scsi_tgt_num': scsi_tgt_num, ++ } ++ return client.call('scsi_device_iostat', params) ++ ++ ++def device_pcie_list(client): ++ """Show storage device pcie list. ++ ++ Returns: ++ List of storage device pcie. ++ """ ++ ++ return client.call('device_pcie_list') +diff --git a/scripts/hw_dpu_rpc.py b/scripts/hw_dpu_rpc.py +new file mode 100644 +index 0000000..f5fa36e +--- /dev/null ++++ b/scripts/hw_dpu_rpc.py +@@ -0,0 +1,324 @@ ++#!/usr/bin/env python3 ++# SPDX-License-Identifier: BSD-3-Clause ++# Copyright (C) 2021-2025 Huawei Technologies Co. ++# All rights reserved. ++# ++ ++import argparse ++import logging ++import sys ++import os ++import stat ++import pwd ++import grp ++import json ++ ++sys.path.append(os.path.dirname(__file__) + '/../python') ++ ++import spdk.rpc as rpc # noqa ++from spdk.rpc.client import print_dict, JSONRPCException # noqa ++from spdk.rpc.helpers import deprecated_aliases # noqa ++ ++ ++def get_parser(): ++ parser = argparse.ArgumentParser( ++ description='SPDK RPC command line interface', usage='%(prog)s [options]', add_help=False) ++ ++ parser.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ parser.add_argument('-r', dest='conn_retries', ++ help='Retry connecting to the RPC server N times with 0.2s interval. Default: 0', ++ default=0, type=int) ++ parser.add_argument('-t', dest='timeout', ++ help='Timeout as a floating point number expressed in seconds, waiting for response. Default: 60.0', ++ default=60.0, type=float) ++ ++ parser.set_defaults(is_server=False) ++ parser.set_defaults(dry_run=False) ++ parser.set_defaults(port=5260) ++ parser.set_defaults(verbose="ERROR") ++ parser.set_defaults(server_addr='/var/tmp/spdk.sock') ++ return parser ++ ++ ++def change_queues_num(client, number): ++ if not (1 <= number <= 64): ++ print("the number is not legal, it should be 1 <= number <= 64") ++ return ++ path = "/etc/dpak/ssam/parameter.json" ++ with open(path, 'r') as file: ++ try: ++ data = json.load(file) ++ except json.JSONDecodeError: ++ print("JSON file is wrong") ++ return ++ ++ if "queues" not in data: ++ print("JSON file do not have 'queues'") ++ return ++ ++ data["queues"] = number ++ with open(path, 'w') as file: ++ json.dump(data, file, indent=4) ++ ++ ++def init_rpc_func(): ++ parser = get_parser() ++ subparsers = parser.add_subparsers(help='RPC methods', dest='called_rpc_name', metavar='') ++ ++ @rpc.ssam.log_info ++ def create_blk_controller(args): ++ rpc.ssam.create_blk_controller(args.client, ++ dev_name=args.dev_name, ++ index=args.index, ++ readonly=args.readonly, ++ serial=args.serial, ++ vqueue=args.vqueue) ++ ++ p = subparsers.add_parser('create_blk_controller', ++ help='Add a new block controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('dev_name', help='Name of block device') ++ p.add_argument('index', help='Function ID or dbdf') ++ p.add_argument("-r", "--readonly", action='store_true', help='Set controller as read-only') ++ p.add_argument("-s", "--serial", help='Set volume ID') ++ p.add_argument("-q", "--vqueue", help='Set virtio queue num with a range of [1, 32]', type=int, required=False) ++ p.set_defaults(func=create_blk_controller) ++ ++ @rpc.ssam.log_info ++ def get_controllers(args): ++ print_dict(rpc.ssam.get_controllers(args.client, args.function_id, args.dbdf)) ++ ++ p = subparsers.add_parser('get_controllers', ++ help='List all or specific controller(s)', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('-f', '--function_id', help="Function ID of PCI device", type=int, required=False) ++ p.add_argument('-d', '--dbdf', help="Dbdf of PCI device", required=False) ++ p.set_defaults(func=get_controllers) ++ ++ @rpc.ssam.log_info ++ def get_scsi_controllers(args): ++ print_dict(rpc.ssam.get_scsi_controllers(args.client, args.name)) ++ ++ p = subparsers.add_parser('get_scsi_controllers', aliases=['scsi_controller_list'], ++ help='List all or specific scsi controller(s)', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('-n', '--name', help="Name of controller", required=False) ++ p.set_defaults(func=get_scsi_controllers) ++ ++ @rpc.ssam.log_info ++ def delete_controller(args): ++ rpc.ssam.delete_controller(args.client, index=args.index) ++ ++ p = subparsers.add_parser('delete_controller', ++ help='Delete a controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('index', help='Function ID or dbdf of PCI device') ++ p.set_defaults(func=delete_controller) ++ ++ @rpc.ssam.log_info ++ def delete_scsi_controller(args): ++ rpc.ssam.delete_scsi_controller(args.client, name=args.name) ++ ++ p = subparsers.add_parser('delete_scsi_controller', aliases=['scsi_controller_delete'], ++ help='Delete a scsi controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller to be deleted', type=str) ++ p.set_defaults(func=delete_scsi_controller) ++ ++ @rpc.ssam.log_info ++ def bdev_resize(args): ++ rpc.ssam.bdev_resize(args.client, ++ function_id=args.function_id, ++ new_size_in_mb=args.new_size_in_mb) ++ ++ p = subparsers.add_parser('bdev_resize', ++ help='Resize a blk bdev by blk controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('function_id', help='Function ID of PCI device', type=int) ++ p.add_argument('new_size_in_mb', help='New size of bdev for resize operation. The unit is MiB', type=int) ++ p.set_defaults(func=bdev_resize) ++ ++ @rpc.ssam.log_info ++ def scsi_bdev_resize(args): ++ rpc.ssam.scsi_bdev_resize(args.client, ++ name=args.name, ++ tgt_id=args.tgt_id, ++ new_size_in_mb=args.new_size_in_mb) ++ ++ p = subparsers.add_parser('scsi_bdev_resize', ++ help='Resize a scsi bdev by scsi controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller for the PCI device', type=str) ++ p.add_argument('tgt_id', help='Tgt ID of bdev', type=int) ++ p.add_argument('new_size_in_mb', help='New size of bdev for resize operation. The unit is MiB', type=int) ++ p.set_defaults(func=scsi_bdev_resize) ++ ++ @rpc.ssam.log_info ++ def bdev_aio_resize(args): ++ rpc.ssam.bdev_aio_resize(args.client, ++ name=args.name, ++ new_size_in_mb=args.new_size_in_mb) ++ ++ p = subparsers.add_parser('bdev_aio_resize', ++ help='Resize a bdev by bdev name', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of aio bdev', type=str) ++ p.add_argument('new_size_in_mb', help='New size of bdev for resize operation. The unit is MiB', type=int) ++ p.set_defaults(func=bdev_aio_resize) ++ ++ @rpc.ssam.log_info ++ def set_queues_num(args): ++ change_queues_num(args.client, ++ number=args.number) ++ p = subparsers.add_parser('set_queues_num', ++ help='Set the queues of translate', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('number', help='Number of queues', type=int) ++ p.set_defaults(func=set_queues_num) ++ ++ @rpc.ssam.log_info ++ def os_ready(args): ++ rpc.ssam.os_ready(args.client) ++ ++ p = subparsers.add_parser('os_ready', ++ help='Write ready flag for booting OS', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.set_defaults(func=os_ready) ++ ++ @rpc.ssam.log_info ++ def os_not_ready(args): ++ rpc.ssam.os_not_ready(args.client) ++ ++ p = subparsers.add_parser('os_not_ready', ++ help='Write not ready flag for booting OS', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.set_defaults(func=os_not_ready) ++ ++ @rpc.ssam.log_info ++ def controller_get_iostat(args): ++ print_dict(rpc.ssam.controller_get_iostat(args.client, args.function_id, args.dbdf)) ++ ++ p = subparsers.add_parser('controller_get_iostat', ++ help='Show all or specific controller(s) iostat', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('-f', '--function_id', help="Function ID of PCI device", type=int, required=False) ++ p.add_argument('-d', '--dbdf', help="Dbdf of PCI device", required=False) ++ p.set_defaults(func=controller_get_iostat) ++ ++ @rpc.ssam.log_info ++ def blk_device_iostat(args): ++ print_dict(rpc.ssam.blk_device_iostat(args.client, ++ index=args.index, ++ tid=args.tid, ++ vq_idx=args.vq_idx)) ++ ++ p = subparsers.add_parser('blk_device_iostat', ++ help='Show iostat of blk device', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('index', help='Function ID or dbdf') ++ p.add_argument('-t', "--tid", help='Tid', type=int, required=False) ++ p.add_argument("-q", "--vq_idx", help='Index of vqueue', type=int, required=False) ++ p.set_defaults(func=blk_device_iostat) ++ ++ @rpc.ssam.log_info ++ def controller_clear_iostat(args): ++ rpc.ssam.controller_clear_iostat(args.client) ++ ++ p = subparsers.add_parser('controller_clear_iostat', ++ help='Clear all controllers iostat', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.set_defaults(func=controller_clear_iostat) ++ ++ @rpc.ssam.log_info ++ def create_scsi_controller(args): ++ rpc.ssam.create_scsi_controller(args.client, ++ dbdf=args.dbdf, ++ name=args.name) ++ ++ p = subparsers.add_parser('create_scsi_controller', aliases=['scsi_controller_create'], ++ help='Add a new scsi controller', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('dbdf', help='The pci dbdf of virtio scsi controller, which is obtained by \'device_pcie_list\'', type=str) ++ p.add_argument('name', help='Name of controller to be created', type=str) ++ p.set_defaults(func=create_scsi_controller) ++ ++ @rpc.ssam.log_info ++ def scsi_controller_add_target(args): ++ rpc.ssam.scsi_controller_add_target(args.client, ++ name=args.name, ++ scsi_tgt_num=int(args.scsi_tgt_num), ++ bdev_name=args.bdev_name) ++ ++ p = subparsers.add_parser('scsi_controller_add_target', ++ help='Add LUN to ssam scsi controller target', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller where lun is added', type=str) ++ p.add_argument('scsi_tgt_num', help='ID of target to use') ++ p.add_argument('bdev_name', help='Name of bdev to be added to target') ++ p.set_defaults(func=scsi_controller_add_target) ++ ++ @rpc.ssam.log_info ++ def scsi_controller_remove_target(args): ++ rpc.ssam.scsi_controller_remove_target(args.client, ++ name=args.name, ++ scsi_tgt_num=int(args.scsi_tgt_num)) ++ ++ p = subparsers.add_parser('scsi_controller_remove_target', ++ help='Remove LUN from ssam scsi controller target', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller to remove lun', type=str) ++ p.add_argument('scsi_tgt_num', help='ID of target to use') ++ p.set_defaults(func=scsi_controller_remove_target) ++ ++ @rpc.ssam.log_info ++ def scsi_device_iostat(args): ++ print_dict(rpc.ssam.scsi_device_iostat(args.client, ++ name=args.name, ++ scsi_tgt_num=int(args.scsi_tgt_num))) ++ ++ p = subparsers.add_parser('scsi_device_iostat', ++ help='Show iostat of scsi device', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.add_argument('name', help='Name of controller', type=str) ++ p.add_argument('scsi_tgt_num', help='ID of target', type=int) ++ p.set_defaults(func=scsi_device_iostat) ++ ++ @rpc.ssam.log_info ++ def device_pcie_list(args): ++ print_dict(rpc.ssam.device_pcie_list(args.client)) ++ ++ p = subparsers.add_parser('device_pcie_list', ++ help='Show storage device pcie list', add_help=False) ++ p.add_argument('-h', '--help', action='help', help='Show this help message and exit') ++ p.set_defaults(func=device_pcie_list) ++ ++ return parser ++ ++ ++if __name__ == "__main__": ++ def call_rpc_func(args): ++ args.func(args) ++ check_called_name(args.called_rpc_name) ++ ++ def check_called_name(name): ++ if name in deprecated_aliases: ++ print("{} is deprecated, use {} instead.".format(name, deprecated_aliases[name]), file=sys.stderr) ++ ++ parser = init_rpc_func() ++ args = parser.parse_args() ++ ++ if sys.stdin.isatty() and not hasattr(args, 'func'): ++ # No arguments and no data piped through stdin ++ parser.print_help() ++ exit(1) ++ ++ if args.called_rpc_name != "get_version": ++ args.client = rpc.client.JSONRPCClient(args.server_addr, args.port, args.timeout, ++ log_level=getattr(logging, args.verbose.upper()), ++ conn_retries=args.conn_retries) ++ ++ try: ++ call_rpc_func(args) ++ except JSONRPCException as ex: ++ print(ex.message) ++ exit(1) +diff --git a/scripts/parameter.json b/scripts/parameter.json +new file mode 100644 +index 0000000..b6958a6 +--- /dev/null ++++ b/scripts/parameter.json +@@ -0,0 +1,5 @@ ++{ ++ "mempool_size_mb": 1024, ++ "queues": 16, ++ "mode": "default" ++} +diff --git a/test/unit/lib/event/app.c/app_ut.c b/test/unit/lib/event/app.c/app_ut.c +index 36aee1a..d59e01f 100644 +--- a/test/unit/lib/event/app.c/app_ut.c ++++ b/test/unit/lib/event/app.c/app_ut.c +@@ -22,7 +22,7 @@ DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const cha + DEFINE_STUB_V(spdk_rpc_set_state, (uint32_t state)); + DEFINE_STUB(spdk_rpc_get_state, uint32_t, (void), SPDK_RPC_RUNTIME); + DEFINE_STUB(spdk_rpc_initialize, int, (const char *listen_addr, +- const struct spdk_rpc_opts *opts), 0); ++ const struct spdk_rpc_opts *opts, int internval), 0); + DEFINE_STUB_V(spdk_rpc_set_allowlist, (const char **rpc_allowlist)); + DEFINE_STUB_V(spdk_rpc_finish, (void)); + DEFINE_STUB_V(spdk_rpc_server_finish, (const char *listen_addr)); +diff --git a/test/unit/lib/init/rpc.c/rpc_ut.c b/test/unit/lib/init/rpc.c/rpc_ut.c +index 8ffebfb..ec095a1 100644 +--- a/test/unit/lib/init/rpc.c/rpc_ut.c ++++ b/test/unit/lib/init/rpc.c/rpc_ut.c +@@ -84,12 +84,12 @@ initialize_servers(void) + + CU_ASSERT(STAILQ_EMPTY(&g_init_rpc_servers)); + +- rc = spdk_rpc_initialize(g_test_addr1, NULL); ++ rc = spdk_rpc_initialize(g_test_addr1, NULL, RPC_SELECT_INTERVAL); + CU_ASSERT(rc == 0); + CU_ASSERT(server_exists(g_test_addr1)); + CU_ASSERT(server_paused(g_test_addr1) == false); + +- rc = spdk_rpc_initialize(g_test_addr2, NULL); ++ rc = spdk_rpc_initialize(g_test_addr2, NULL, RPC_SELECT_INTERVAL); + CU_ASSERT(rc == 0); + CU_ASSERT(server_exists(g_test_addr2)); + CU_ASSERT(server_paused(g_test_addr2) == false); +@@ -137,13 +137,13 @@ test_rpc_set_spdk_log_opts(void) + server2_opts.log_level = SPDK_LOG_ERROR; + server2_opts.size = sizeof(server2_opts); + +- spdk_rpc_initialize(g_test_addr1, &server1_opts); ++ spdk_rpc_initialize(g_test_addr1, &server1_opts, RPC_SELECT_INTERVAL); + CU_ASSERT(g_test_log_file == server1_opts.log_file); + CU_ASSERT(g_test_log_level == server1_opts.log_level); + CU_ASSERT(g_test_log_file_set_count == 1); + CU_ASSERT(g_test_log_level_set_count == 1); + +- spdk_rpc_initialize(g_test_addr2, &server2_opts); ++ spdk_rpc_initialize(g_test_addr2, &server2_opts, RPC_SELECT_INTERVAL); + CU_ASSERT(g_test_log_file == server2_opts.log_file); + CU_ASSERT(g_test_log_level == server2_opts.log_level); + CU_ASSERT(g_test_log_file_set_count == 2); +@@ -160,13 +160,13 @@ test_rpc_set_spdk_log_default_opts(void) + + reset_global_counters(); + +- spdk_rpc_initialize(g_test_addr1, NULL); ++ spdk_rpc_initialize(g_test_addr1, NULL, RPC_SELECT_INTERVAL); + CU_ASSERT(g_test_log_file == test_log_file_default); + CU_ASSERT(g_test_log_level == test_log_level_default); + CU_ASSERT(g_test_log_file_set_count == 1); + CU_ASSERT(g_test_log_level_set_count == 1); + +- spdk_rpc_initialize(g_test_addr2, NULL); ++ spdk_rpc_initialize(g_test_addr2, NULL, RPC_SELECT_INTERVAL); + CU_ASSERT(g_test_log_file == test_log_file_default); + CU_ASSERT(g_test_log_level == test_log_level_default); + CU_ASSERT(g_test_log_file_set_count == 1);