diff --git a/LICENSE b/LICENSE
index c1d45370dd1c1665b7665b06168af41e716854e3..5be3b9026a3251184059781c680a1cef38449984 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,5 +1,6 @@
(1) The directories below are licensed under GPL-2.0-or-later.
./newip/
+ ./tzdriver/
./xpm/
./qos_auth/
./ucollection/
diff --git a/OAT.xml b/OAT.xml
index c024cf3707521158847114f2773a341c988a4214..ed4c568bab84406b687f1644d68cef71d871f5d6 100644
--- a/OAT.xml
+++ b/OAT.xml
@@ -64,6 +64,7 @@ Note:If the text contains special characters, please escape them according to th
+
@@ -73,6 +74,8 @@ Note:If the text contains special characters, please escape them according to th
+
+
@@ -93,6 +96,7 @@ Note:If the text contains special characters, please escape them according to th
+
diff --git a/tzdriver/CMakeLists.txt b/tzdriver/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8f454c86ae63b4a3ce5fc524d2ab329cfed29ad1
--- /dev/null
+++ b/tzdriver/CMakeLists.txt
@@ -0,0 +1,2 @@
+add_device_ko(LOCAL_MODULE tzdriver
+ KO_SRC_FOLDER ${CMAKE_CURRENT_SOURCE_DIR})
\ No newline at end of file
diff --git a/tzdriver/Kconfig b/tzdriver/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..3e1d3356398559be87f79333f2be00e279765121
--- /dev/null
+++ b/tzdriver/Kconfig
@@ -0,0 +1,37 @@
+menu "TEE OS"
+
+config TZDRIVER
+ tristate "Secure Execution Communicator driver"
+ default n
+ help
+ Provides a communication interface between userspace and
+ TrustZone Operating Environment.
+
+config SECBOOT_IMG
+ bool "tzdriver split secboot img into modem and ap"
+ default n
+ depends on KERNEL_CLIENT
+ help
+ Macro defined for splitting modem and ap img
+
+config SECBOOT_IMG_V2
+ bool "tzdriver split modem and ap for v2"
+ default n
+ depends on KERNEL_CLIENT
+ help
+ Macro defined for splitting modem and ap img v2
+
+config ASAN_DEBUG
+ bool "ASAN debug version"
+ default n
+ help
+ Macro defined for ASAN debug version
+
+source "drivers/tzdriver/auth/Kconfig"
+source "drivers/tzdriver/core/Kconfig"
+source "drivers/tzdriver/tlogger/Kconfig"
+source "drivers/tzdriver/agent_rpmb/Kconfig"
+source "drivers/tzdriver/ion/Kconfig"
+source "drivers/tzdriver/tui/Kconfig"
+endmenu
+
diff --git a/tzdriver/Makefile b/tzdriver/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..3f9ef62cdb7a14394dc9b427e38928c9e4e38d00
--- /dev/null
+++ b/tzdriver/Makefile
@@ -0,0 +1,14 @@
+ifeq ($(CONFIG_TZDRIVER),y)
+KERNEL_DIR := $(srctree)
+
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../third_party/bounds_checking_function/include/
+
+obj-$(CONFIG_TZDRIVER) += agent_rpmb/
+obj-$(CONFIG_TZDRIVER) += auth/
+obj-$(CONFIG_TZDRIVER) += core/
+obj-$(CONFIG_TZDRIVER) += tlogger/
+obj-$(CONFIG_TZDRIVER) += ion/
+obj-$(CONFIG_TZDRIVER) += tui/
+obj-$(CONFIG_TZDRIVER) += whitelist/
+
+endif
diff --git a/tzdriver/README.md b/tzdriver/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..116db16c31b24881dd6e7724cdab65eda84499ba
--- /dev/null
+++ b/tzdriver/README.md
@@ -0,0 +1,78 @@
+# Tzdriver driver
+
+## Introduction
+
+Tzdriver is a kernel driver deployed on the REE side, supporting communication between REE and TEE. Tzdriver processes commands from Tee Client and sends instructions to switch from REE to TEE. Tzdriver supports data sharing between REE and TEE by managing shared memory.
+
+Tzdriver includes the following main modules:
+
+smc: Send smc instructions to switch the CPU from the REE side to the TEE side for operation.
+
+session_manager: Manage communication sessions between REE and TEE.
+
+mailbox:Data is shared between REE and TEE through the mailbox.
+
+cmd_monitor:Monitor the operation of SMC instructions and provides a timeout detection mechanism.
+
+tzdebug:Create debugfs debugging nodes to facilitate developers in debugging TEE functionality
+
+tlogger:TEE log driver module, supporting TEE log recording and printing.
+
+Figure 1: Tzdriver architecture diagram
+
+
+
+## Directory
+
+```
+/kernel/linux/common_modules/tzdriver
+├── core
+│ ├── cmdmonitor.c # SMC instruction execution monitoring
+ ├── gp_ops.c # GP TEE specification processing logic
+ ├── mailbox_mempool.c # REE and TEE shared memory management
+ ├── session_manager.c # Session management for CA access to TA
+ ├── smc_smp.c # Send SMC command to switch to TEE
+ ├── tzdebug.c # Debugging module
+├── tlogger # TEE log driver
+```
+
+## Configuration Option
+
+If you want to enable the Tzdriver driver, you need to modify the defconfig file of the device in the Linux kernel code repository and add configuration options for Tzdriver:
+
+```
+#
+# TEEOS
+#
+CONFIG_TZDRIVER=y
+CONFIG_CPU_AFF_NR=1
+CONFIG_KERNEL_CLIENT=y
+CONFIG_TEELOG=y
+CONFIG_PAGES_MEM=y
+CONFIG_THIRDPARTY_COMPATIBLE=y
+```
+
+The meanings of each option are shown in the table below:
+
+**Table 1** Configuration Options Description
+
+| Parameters | Description |
+| ---------------------------- | ------------------------------------------------------------ |
+| CONFIG_TZDRIVER | Tzdriver module switch. |
+| CONFIG_CPU_AFF_NR | CA binding core function, non-zero values represent restrictions on CPUID less than CONFIG_ CPU_ AFF_ NR's CPU can enter TEE, where 0 represents unlimited. Currently, Tzdriver only supports running on 0 cores, so the value is 1. |
+| CONFIG_KERNEL_CLIENT | Support the kernel CA option. |
+| CONFIG_TEELOG | TEE log switch, it is recommended to enable. |
+| CONFIG_PAGES_MEM | TEE log memory management, it is recommended to enable. |
+| CONFIG_THIRDPARTY_COMPATIBLE | Used for compatibility with third-party optee, such as the RK3568 chip, which requires this option to be enabled. |
+
+## Compile Command
+
+Tzdriver is compiled together with the kernel. Taking the rk3568 chip as an example, the "boot_linux.img" can be compiled separately. The compilation command is as follows
+
+```
+./build.sh --product-name rk3568 --ccache --build-target kernel --gn-args linux_kernel_version=\"linux-5.10\"
+```
+
+## Related code repository
+
+[tee_client](https://gitee.com/openharmony/tee_tee_client)
diff --git a/tzdriver/README_zh.md b/tzdriver/README_zh.md
new file mode 100644
index 0000000000000000000000000000000000000000..957bd2a18c4b2d56835ee25c213bae344309c641
--- /dev/null
+++ b/tzdriver/README_zh.md
@@ -0,0 +1,78 @@
+# Tzdriver驱动
+
+## 简介
+
+Tzdriver是部署在REE侧的内核驱动,支持REE和TEE之间通信。Tzdriver处理来自于Tee Client的命令,发送指令从REE切换到TEE。Tzdriver通过管理共享内存,支持REE和TEE之间共享数据。
+
+Tzdriver驱动包含如下主要模块:
+
+smc:发送smc指令,将CPU从REE侧切换到TEE侧运行。
+
+session_manager:管理REE与TEE之间的通信会话。
+
+mailbox:REE和TEE之间通过mailbox共享数据。
+
+cmd_monitor:监控smc指令的运行,提供超时检测机制。
+
+tzdebug:创建debugfs调试节点,方便开发人员调试TEE功能。
+
+tlogger:TEE日志驱动模块,支持TEE日志记录和打印。
+
+图1 Tzdriver驱动架构图
+
+
+
+## 目录
+
+```
+/kernel/linux/common_modules/tzdriver
+├── core
+│ ├── cmdmonitor.c # smc指令执行监控
+ ├── gp_ops.c # GP TEE规范处理逻辑
+ ├── mailbox_mempool.c # REE和TEE共享内存管理
+ ├── session_manager.c # CA访问TA的session管理
+ ├── smc_smp.c # 发送smc指令切换到TEE
+ ├── tzdebug.c # 调试模块
+├── tlogger # TEE日志驱动
+```
+
+## 配置选项
+
+如果要使能Tzdriver驱动,需要修改linux内核代码仓中设备的defconfig文件,增加Tzdriver的配置选项:
+
+```
+#
+# TEEOS
+#
+CONFIG_TZDRIVER=y
+CONFIG_CPU_AFF_NR=1
+CONFIG_KERNEL_CLIENT=y
+CONFIG_TEELOG=y
+CONFIG_PAGES_MEM=y
+CONFIG_THIRDPARTY_COMPATIBLE=y
+```
+
+各选项其含义如下表所示:
+
+**表 1** 配置选项说明
+
+| 参数 | 说明 |
+| ---------------------------- | ------------------------------------------------------------ |
+| CONFIG_TZDRIVER | Tzdriver模块开关。 |
+| CONFIG_CPU_AFF_NR | CA绑核功能,非零值代表限制仅cpuid小于CONFIG_CPU_AFF_NR的CPU可以进入TEE,0代表无限制,当前只支持在0核运行,所以值为1。 |
+| CONFIG_KERNEL_CLIENT | 支持内核CA选项。 |
+| CONFIG_TEELOG | TEE日志开关,建议开启。 |
+| CONFIG_PAGES_MEM | TEE日志内存管理,建议开启。 |
+| CONFIG_THIRDPARTY_COMPATIBLE | 兼容第三方opteed的适配,例如适配RK3568芯片需要开启此选项。 |
+
+## 编译命令
+
+Tzdriver驱动跟随kernel一起编译,以rk3568为例,可以单独编译boot_linux.img,编译命令如下
+
+```
+./build.sh --product-name rk3568 --ccache --build-target kernel --gn-args linux_kernel_version=\"linux-5.10\"
+```
+
+## 相关仓
+
+[tee_client](https://gitee.com/openharmony/tee_tee_client)
diff --git a/tzdriver/agent.h b/tzdriver/agent.h
new file mode 120000
index 0000000000000000000000000000000000000000..295b146dea8443325a41bc30398c33cfba293d59
--- /dev/null
+++ b/tzdriver/agent.h
@@ -0,0 +1 @@
+core/agent.h
\ No newline at end of file
diff --git a/tzdriver/agent_rpmb/Kconfig b/tzdriver/agent_rpmb/Kconfig
new file mode 100755
index 0000000000000000000000000000000000000000..710dd61325b02752cd73e78e1628993856380a7a
--- /dev/null
+++ b/tzdriver/agent_rpmb/Kconfig
@@ -0,0 +1,6 @@
+config RPMB_AGENT
+ bool "Tzdriver Rpmb Agent"
+ default n
+ depends on TZDRIVER
+ help
+ Tzdriver Rpmb Agent
\ No newline at end of file
diff --git a/tzdriver/agent_rpmb/Makefile b/tzdriver/agent_rpmb/Makefile
new file mode 100755
index 0000000000000000000000000000000000000000..3cbd7b78e61efec18414f28d7c0b998aaccb48f0
--- /dev/null
+++ b/tzdriver/agent_rpmb/Makefile
@@ -0,0 +1,35 @@
+KERNEL_DIR := $(srctree)
+
+ifneq ($(TARGET_BUILD_VARIANT),user)
+ ccflags-y += -DDEF_ENG
+endif
+
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/platform_drivers/tzdriver
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/platform_drivers/tzdriver/core
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../third_party/bounds_checking_function/include
+
+ifeq ($(CONFIG_MEDIATEK_SOLUTION),y)
+ MTK_PLATFORM := $(subst ",,$(CONFIG_MTK_PLATFORM))
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/core
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/card
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/misc/mediatek/include
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/misc/mediatek/include/mt-plat/$(MTK_PLATFORM)/include
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/misc/mediatek/include/mt-plat
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/misc/mediatek/base/power/$(MTK_PLATFORM)
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/misc/mediatek/base/power/include
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/devfreq
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/scsi/ufs
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/host/mediatek/ComboA
+ ifeq ($(CONFIG_MTK_PLATFORM), "mt6761")
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/host/mediatek/ComboA/mt6765
+ else
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/host/mediatek/ComboA/$(MTK_PLATFORM)
+ endif
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/host/mediatek/$(MTK_PLATFORM)
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/host/mediatek/$(MTK_PLATFORM)/$(MTK_PLATFORM)
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/platform_drivers/tzdriver/agent_rpmb/mplat
+ obj-$(CONFIG_RPMB_AGENT) += core/agent_rpmb.o mplat/rpmb_driver.o
+else
+ EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/platform_drivers/tzdriver/agent_rpmb/generic
+ obj-$(CONFIG_RPMB_AGENT) += core/agent_rpmb.o generic/rpmb_driver.o
+endif
\ No newline at end of file
diff --git a/tzdriver/agent_rpmb/core/agent_rpmb.c b/tzdriver/agent_rpmb/core/agent_rpmb.c
new file mode 100755
index 0000000000000000000000000000000000000000..faba0eca48acb1eb5db6dfd6f4f9ceaed7594882
--- /dev/null
+++ b/tzdriver/agent_rpmb/core/agent_rpmb.c
@@ -0,0 +1,331 @@
+/*
+ * agent_rpmb.c
+ *
+ * rpmb agent manager function, such as register
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "agent_rpmb.h"
+#include
+#include /* for struct mmc_ioc_rpmb */
+#include /* for struct mmc_card */
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "teek_client_constants.h"
+#include "teek_ns_client.h"
+#include "agent.h"
+#include "tc_ns_log.h"
+#include "smc_smp.h"
+#include "rpmb_driver.h"
+
+enum rpmb_cmd {
+ SEC_GET_DEVINFO,
+ SEC_SEND_IOCMD,
+ SEC_RPMB_LOCK,
+ SEC_RPMB_UNLOCK,
+ SEC_RPMB_ABNORMAL,
+};
+
+#define RPMB_EMMC_CID_SIZE 32
+
+struct rpmb_devinfo {
+ uint8_t cid[RPMB_EMMC_CID_SIZE]; /* eMMC card ID */
+
+ uint8_t rpmb_size_mult; /* EXT CSD-slice 168 "RPMB Size" */
+ uint8_t rel_wr_sec_cnt; /* EXT CSD-slice 222 "Reliable Write Sector Count" */
+ uint8_t tmp[2];
+ uint32_t blk_size; /* RPMB blocksize */
+
+ uint32_t max_blk_idx; /* The highest block index supported by current device */
+ uint32_t access_start_blk_idx; /* The start block index SecureOS can access */
+
+ uint32_t access_total_blk; /* The total blocks SecureOS can access */
+ uint32_t tmp2;
+
+ uint32_t mdt; /* 1: EMMC 2: UFS */
+
+ /* the device's support bit map, for example, if it support 1,2,32, then the value is 0x80000003 */
+ uint32_t support_bit_map;
+
+ uint32_t version;
+ uint32_t tmp3;
+};
+struct rpmb_ioc {
+ struct storage_blk_ioc_rpmb_data ioc_rpmb; /* sizeof() = 72 */
+
+ uint32_t buf_offset[STORAGE_IOC_MAX_RPMB_CMD];
+ uint32_t tmp;
+};
+
+#define RPMB_CTRL_MAGIC 0x5A5A5A5A
+struct rpmb_ctrl_t {
+ uint32_t magic;
+ uint32_t cmd_sn;
+ uint8_t lock_flag;
+ uint8_t tmp[3];
+ enum rpmb_op_type op_type;
+ union __args {
+ struct rpmb_devinfo get_devinfo;
+ struct rpmb_ioc send_ioccmd;
+ } args;
+ enum rpmb_cmd cmd;
+ uint32_t reserved;
+ uint32_t buf_len;
+ int32_t ret;
+ uint32_t reserved2;
+ uint32_t buf_start[0];
+}; /* sizeof() = 8 * 16 = 128 */
+
+static struct rpmb_ctrl_t *m_rpmb_ctrl = NULL;
+/*
+ * the data_ptr from SecureOS is physical address,
+ * so, we MUST update to the virtual address,
+ * otherwise, segment default
+ */
+static void update_dataptr(struct rpmb_ctrl_t *trans_ctrl)
+{
+ uint32_t i;
+ uint32_t offset = 0;
+ uint8_t *dst = NULL;
+
+ if (trans_ctrl == NULL)
+ return;
+
+ for (i = 0; i < STORAGE_IOC_MAX_RPMB_CMD; i++) {
+ offset = trans_ctrl->args.send_ioccmd.buf_offset[i];
+ if (offset > trans_ctrl->buf_len)
+ continue;
+ if (trans_ctrl->args.send_ioccmd.ioc_rpmb.data[i].buf != NULL) {
+ dst = (uint8_t *)trans_ctrl->buf_start + offset;
+ /* update the data_prt */
+ trans_ctrl->args.send_ioccmd.ioc_rpmb.data[i].buf = dst;
+ }
+ }
+}
+
+struct rpmb_agent_lock_info {
+ unsigned int dev_id;
+ bool lock_need_free;
+};
+static struct rpmb_agent_lock_info lock_info = { 0 };
+
+static u64 g_tee_rpmb_lock_done = 0;
+static u64 g_tee_rpmb_lock_release = 0;
+#define RPMB_TIMEOUT_TIME_TEE 800000000
+
+
+static void process_rpmb_lock(const struct tee_agent_kernel_ops *agent_instance)
+{
+ struct smc_event_data *event_data = NULL;
+
+ rpmb_driver_counter_lock();
+ g_tee_rpmb_lock_done = dfx_getcurtime();
+
+ tlogd("obtain rpmb device lock\n");
+
+ event_data = find_event_control(agent_instance->agent_id);
+ if (event_data != NULL) {
+ lock_info.dev_id = event_data->cmd.dev_file_id;
+ lock_info.lock_need_free = true;
+ tlogd("rpmb counter lock context: dev_id=%d\n",
+ lock_info.dev_id);
+ }
+ put_agent_event(event_data);
+}
+
+static void process_rpmb_unlock(int operation)
+{
+ u64 temp_cost_time;
+
+ /* clear the lock info */
+ lock_info.dev_id = 0;
+ lock_info.lock_need_free = false;
+ rpmb_driver_counter_unlock();
+
+ g_tee_rpmb_lock_release = dfx_getcurtime();
+ temp_cost_time = g_tee_rpmb_lock_release - g_tee_rpmb_lock_done;
+ if (temp_cost_time > RPMB_TIMEOUT_TIME_TEE) {
+ tloge("rpmb tee cost time is more than 800ms, start[%llu], unlock[%llu], cost[%llu], operation[%d]\n",
+ g_tee_rpmb_lock_done, g_tee_rpmb_lock_release,
+ temp_cost_time, operation);
+ tee_report_rpmb();
+ }
+ tlogd("free rpmb device lock\n");
+}
+
+#define GET_RPMB_LOCK_MASK 0x01
+#define FREE_RPMB_LOCK_MASK 0x02
+static void send_ioccmd(const struct tee_agent_kernel_ops *agent_instance)
+{
+ uint8_t lock_flag;
+ int32_t ret;
+
+ if (agent_instance == NULL || m_rpmb_ctrl == NULL) {
+ tloge("bad parameters\n");
+ return;
+ }
+
+ lock_flag = m_rpmb_ctrl->lock_flag;
+
+ if (lock_flag & GET_RPMB_LOCK_MASK)
+ process_rpmb_lock(agent_instance);
+
+ ret = rpmb_ioctl_cmd(RPMB_FUNC_ID_SECURE_OS, m_rpmb_ctrl->op_type,
+ &m_rpmb_ctrl->args.send_ioccmd.ioc_rpmb);
+ if (ret)
+ tloge("rpmb ioctl failed: %d\n", ret);
+
+ if (lock_flag & FREE_RPMB_LOCK_MASK)
+ process_rpmb_unlock(m_rpmb_ctrl->op_type);
+ m_rpmb_ctrl->ret = ret;
+}
+
+static int rpmb_check_data(struct rpmb_ctrl_t *trans_ctrl)
+{
+ if (trans_ctrl == NULL)
+ return 0;
+
+ if (trans_ctrl->magic != RPMB_CTRL_MAGIC) {
+ tloge("rpmb check magic error, now is 0x%x\n",
+ trans_ctrl->magic);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void rpmb_handle_cmd(struct tee_agent_kernel_ops *agent_instance)
+{
+ switch (m_rpmb_ctrl->cmd) {
+ case SEC_SEND_IOCMD:
+ tlogd("rpmb agent cmd is send ioc\n");
+ send_ioccmd(agent_instance);
+ break;
+ case SEC_RPMB_LOCK:
+ tlogd("rpmb agent cmd is lock\n");
+ process_rpmb_lock(agent_instance);
+ m_rpmb_ctrl->ret = 0;
+ break;
+ case SEC_RPMB_UNLOCK:
+ tlogd("rpmb agent cmd is unlock\n");
+ process_rpmb_unlock(SEC_RPMB_UNLOCK);
+ m_rpmb_ctrl->ret = 0;
+ break;
+ default:
+ tloge("rpmb agent cmd not supported 0x%x\n", m_rpmb_ctrl->cmd);
+ break;
+ }
+}
+
+static int rpmb_agent_work(struct tee_agent_kernel_ops *agent_instance)
+{
+ struct rpmb_ctrl_t *trans_ctrl = NULL;
+ errno_t rc = EOK;
+ uint32_t copy_len;
+
+ if (agent_instance == NULL || agent_instance->agent_buff == NULL) {
+ tloge("agent buff invalid\n");
+ return -1;
+ }
+
+ trans_ctrl = (struct rpmb_ctrl_t *)agent_instance->agent_buff;
+ if (rpmb_check_data(trans_ctrl) != 0) {
+ trans_ctrl->ret = TEEC_ERROR_BAD_FORMAT;
+ return -1;
+ }
+
+ if (m_rpmb_ctrl == NULL) {
+ m_rpmb_ctrl = kzalloc(agent_instance->agent_buff_size,
+ GFP_KERNEL);
+ if (m_rpmb_ctrl == NULL) {
+ tloge("memory alloc failed\n");
+ trans_ctrl->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return -1;
+ }
+ }
+ rc = memcpy_s((void *)m_rpmb_ctrl,
+ agent_instance->agent_buff_size, (void *)trans_ctrl,
+ sizeof(*m_rpmb_ctrl) + trans_ctrl->buf_len);
+ if (rc != EOK) {
+ tloge("memcpy_s failed: 0x%x\n", rc);
+ trans_ctrl->ret = TEEC_ERROR_SECURITY;
+ goto clean;
+ }
+ update_dataptr(m_rpmb_ctrl);
+ rpmb_handle_cmd(agent_instance);
+ copy_len = agent_instance->agent_buff_size -
+ offsetof(struct rpmb_ctrl_t, buf_start);
+ rc = memcpy_s((void *)trans_ctrl->buf_start, copy_len,
+ (void *)m_rpmb_ctrl->buf_start, copy_len);
+ if (rc != EOK) {
+ tloge("memcpy_s 2 failed: 0x%x\n", rc);
+ trans_ctrl->ret = TEEC_ERROR_SECURITY;
+ goto clean;
+ }
+ trans_ctrl->ret = m_rpmb_ctrl->ret;
+
+ return 0;
+clean:
+ trans_ctrl->ret = TEEC_ERROR_SECURITY;
+ kfree(m_rpmb_ctrl);
+ m_rpmb_ctrl = NULL;
+ return -1;
+}
+
+static int rpmb_agent_exit(struct tee_agent_kernel_ops *agent_instance)
+{
+ tloge("rpmb agent is exit is being invoked\n");
+
+ if (m_rpmb_ctrl != NULL) {
+ kfree(m_rpmb_ctrl);
+ m_rpmb_ctrl = NULL;
+ }
+
+ return 0;
+}
+
+static int rpmb_agent_crash_work(
+ struct tee_agent_kernel_ops *agent_instance,
+ struct tc_ns_client_context *context, unsigned int dev_file_id)
+{
+ (void)agent_instance;
+ (void)context;
+ tlogd("check free lock or not, dev_id=%d\n", dev_file_id);
+ if (lock_info.lock_need_free && (lock_info.dev_id == dev_file_id)) {
+ tloge("CA crash, need to free lock\n");
+ process_rpmb_unlock(SEC_RPMB_ABNORMAL);
+ }
+ return 0;
+}
+
+static struct tee_agent_kernel_ops rpmb_agent_ops = {
+ .agent_name = "rpmb",
+ .agent_id = TEE_RPMB_AGENT_ID,
+ .tee_agent_init = NULL,
+ .tee_agent_work = rpmb_agent_work,
+ .tee_agent_exit = rpmb_agent_exit,
+ .tee_agent_crash_work = rpmb_agent_crash_work,
+ .agent_buff_size = 8 * PAGE_SIZE,
+ .list = LIST_HEAD_INIT(rpmb_agent_ops.list)
+};
+
+int rpmb_agent_register(void)
+{
+ tee_agent_kernel_register(&rpmb_agent_ops);
+ return 0;
+}
+EXPORT_SYMBOL(rpmb_agent_register);
\ No newline at end of file
diff --git a/tzdriver/agent_rpmb/core/agent_rpmb.h b/tzdriver/agent_rpmb/core/agent_rpmb.h
new file mode 100755
index 0000000000000000000000000000000000000000..2cf0ce7f7d362ed609f3a820fb37af5a9aefc776
--- /dev/null
+++ b/tzdriver/agent_rpmb/core/agent_rpmb.h
@@ -0,0 +1,29 @@
+/*
+ * agent_rpmb.h
+ *
+ * rpmb agent manager function, such as register
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef AGENT_RPMB_H
+#define AGENT_RPMB_H
+
+#ifdef CONFIG_RPMB_AGENT
+int rpmb_agent_register(void);
+#else
+static inline int rpmb_agent_register(void)
+{
+ return 0;
+}
+#endif
+
+#endif
\ No newline at end of file
diff --git a/tzdriver/agent_rpmb/generic/rpmb_driver.c b/tzdriver/agent_rpmb/generic/rpmb_driver.c
new file mode 100755
index 0000000000000000000000000000000000000000..08617f2f53e105edc50f297ea88b13caf86e9265
--- /dev/null
+++ b/tzdriver/agent_rpmb/generic/rpmb_driver.c
@@ -0,0 +1,26 @@
+/*
+ * rpmb_driver.c
+ *
+ * rpmb driver function, such as ioctl
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "rpmb_driver.h"
+
+int rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation,
+ struct storage_blk_ioc_rpmb_data *storage_data)
+{
+ if (storage_data == NULL)
+ return -1;
+
+ return vendor_rpmb_ioctl_cmd(id, operation, storage_data);
+}
\ No newline at end of file
diff --git a/tzdriver/agent_rpmb/generic/rpmb_driver.h b/tzdriver/agent_rpmb/generic/rpmb_driver.h
new file mode 100755
index 0000000000000000000000000000000000000000..dff0bac4d3050604371ebdd5b453e1c44a2878e2
--- /dev/null
+++ b/tzdriver/agent_rpmb/generic/rpmb_driver.h
@@ -0,0 +1,65 @@
+/*
+ * rpmb_driver.h
+ *
+ * rpmb driver function, such as ioctl
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __RPMB_DRIVER_H
+#define __RPMB_DRIVER_H
+
+#include
+
+#ifdef CONFIG_VENDOR_RPMB
+int vendor_rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation,
+ struct storage_blk_ioc_rpmb_data *storage_data);
+
+static inline void tee_report_rpmb(void)
+{
+ rpmb_dump_io_latency();
+}
+#else
+static inline int vendor_rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation,
+ struct storage_blk_ioc_rpmb_data *storage_data)
+{
+ return 0xFF08;
+}
+
+static inline void tee_report_rpmb(void)
+{
+}
+#endif
+
+#if defined(CONFIG_VENDOR_RPMB) && !defined(CONFIG_RPMB_REQ_LOCK_DISABLE)
+static inline void rpmb_driver_counter_lock(void)
+{
+ mutex_lock(&rpmb_counter_lock);
+}
+
+static inline void rpmb_driver_counter_unlock(void)
+{
+ mutex_unlock(&rpmb_counter_lock);
+}
+#else
+static inline void rpmb_driver_counter_lock(void)
+{
+}
+
+static inline void rpmb_driver_counter_unlock(void)
+{
+}
+#endif
+
+int rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation,
+ struct storage_blk_ioc_rpmb_data *storage_data);
+#endif
\ No newline at end of file
diff --git a/tzdriver/agent_rpmb/mdc/rpmb.h b/tzdriver/agent_rpmb/mdc/rpmb.h
new file mode 100755
index 0000000000000000000000000000000000000000..481705de04f185653d2d924bd55a4ae5550b8dd6
--- /dev/null
+++ b/tzdriver/agent_rpmb/mdc/rpmb.h
@@ -0,0 +1,76 @@
+/*
+ * rpmb.h
+ *
+ * rpmb base data and structs defination
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __RPMB_H__
+#define __RPMB_H__
+
+#include
+#include
+
+#define MAX_CDB_CMD_LENGTH 16
+#define UFS_IOC_MAX_RPMB_CMD 3
+#define STORAGE_IOC_MAX_RPMB_CMD 3
+#define MAX_IOC_RPMB_BYTES (4 * 1024)
+
+enum rpmb_op_type {
+ RPMB_OP_RD = 0,
+ RPMB_OP_WR_DATA = 1,
+ RPMB_OP_WR_CNT = 2
+};
+
+enum func_id {
+ RPMB_FUNC_ID_RESERVED,
+ RPMB_FUNC_ID_SE,
+ RPMB_FUNC_ID_SECURE_OS,
+ RPMB_FUNC_ID_MAX,
+};
+
+enum rpmb_version {
+ RPMB_VER_INVALID = 0,
+ RPMB_VER_UFS_21 = 21,
+ RPMB_VER_UFS_30 = 30,
+ RPMB_VER_MAX = 999
+};
+
+struct storage_blk_ioc_data {
+ unsigned char *buf;
+ u64 buf_bytes;
+ u32 blocks;
+};
+
+struct ufs_blk_ioc_data {
+ struct sg_io_v4 siv;
+ unsigned char *buf;
+ u64 buf_bytes;
+};
+
+struct storage_blk_ioc_rpmb_data {
+ struct storage_blk_ioc_data data[STORAGE_IOC_MAX_RPMB_CMD];
+};
+
+struct ufs_blk_ioc_rpmb_data {
+ struct ufs_blk_ioc_data data[UFS_IOC_MAX_RPMB_CMD];
+ u8 sdb_command[UFS_IOC_MAX_RPMB_CMD][MAX_CDB_CMD_LENGTH];
+};
+
+extern struct mutex rpmb_counter_lock;
+
+extern int vendor_rpmb_ioctl_cmd(
+ enum func_id id,
+ enum rpmb_op_type operation,
+ struct storage_blk_ioc_rpmb_data *storage_data);
+
+#endif /* __RPMB_H__ */
\ No newline at end of file
diff --git a/tzdriver/agent_rpmb/mdc/rpmb_driver.c b/tzdriver/agent_rpmb/mdc/rpmb_driver.c
new file mode 100755
index 0000000000000000000000000000000000000000..198d290fdd2964166010ff75a93f2e524c1b15d8
--- /dev/null
+++ b/tzdriver/agent_rpmb/mdc/rpmb_driver.c
@@ -0,0 +1,46 @@
+/*
+ * rpmb_driver.c
+ *
+ * rpmb driver function, such as ioctl
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "rpmb_driver.h"
+#include
+#include "tc_ns_log.h"
+
+typedef int *(rpmb_ioctl_func)(enum func_id id, enum rpmb_op_type operation,
+ struct storage_blk_ioc_rpmb_data *storage_data);
+
+int rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation,
+ struct storage_blk_ioc_rpmb_data *storage_data)
+{
+ static rpmb_ioctl_func *rpmb_ioctl = NULL;
+
+ if (storage_data == NULL)
+ return NULL;
+
+ if (rpmb_ioctl == NULL) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
+ rpmb_ioctl =
+ (rpmb_ioctl_func *)(uintptr_t)__symbol_get("vendor_rpmb_ioctl_cmd");
+#else
+ rpmb_ioctl =
+ (rpmb_ioctl_func *)(uintptr_t)kallsyms_lookup_name("vendor_rpmb_ioctl_cmd");
+#endif
+ if (rpmb_ioctl == NULL) {
+ tloge("fail to find symbol vendor_rpmb_ioctl_cmd\n");
+ return NULL;
+ }
+ }
+ return rpmb_ioctl(id, operation, storage_data);
+}
diff --git a/tzdriver/agent_rpmb/mdc/rpmb_driver.h b/tzdriver/agent_rpmb/mdc/rpmb_driver.h
new file mode 100755
index 0000000000000000000000000000000000000000..44a223165ca5ed7983943bba6b12cad77a6c2926
--- /dev/null
+++ b/tzdriver/agent_rpmb/mdc/rpmb_driver.h
@@ -0,0 +1,34 @@
+/*
+ * rpmb_driver.h
+ *
+ * rpmb driver function, such as ioctl
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __RPMB_DRIVER_H
+#define __RPMB_DRIVER_H
+
+#include "rpmb.h"
+
+static inline void rpmb_driver_counter_lock(void)
+{
+}
+
+static inline void rpmb_driver_counter_unlock(void)
+{
+}
+
+int rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation,
+ struct storage_blk_ioc_rpmb_data *storage_data);
+
+#endif
\ No newline at end of file
diff --git a/tzdriver/agent_rpmb/mplat/rpmb_driver.c b/tzdriver/agent_rpmb/mplat/rpmb_driver.c
new file mode 100755
index 0000000000000000000000000000000000000000..bba8deba3200dfa6402940a26ab1cfd7e71add2d
--- /dev/null
+++ b/tzdriver/agent_rpmb/mplat/rpmb_driver.c
@@ -0,0 +1,511 @@
+/*
+ * rpmb_driver.c
+ *
+ * rpmb driver function, such as ioctl
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "rpmb_driver.h"
+#include
+
+#include /* for struct mmc_card */
+#include
+#include
+
+#ifdef CONFIG_MTK_UFS_SUPPORT
+#include "ufs-mtk.h"
+#endif
+#include
+#include "core.h"
+#include "card.h"
+#include "mmc_ops.h"
+#include "mtk_sd.h"
+#include "tc_ns_log.h"
+#include "queue.h"
+
+#define IOC_CMD_0 0
+#define IOC_CMD_1 1
+#define IOC_CMD_2 2
+#define STORAGE_IOC_MAX_RPMB_CMD 3
+#define RPMB_EMMC_CID_SIZE 32
+#define RPMB_CTRL_MAGIC 0x5A5A5A5A
+#define RPMB_REQ 1 /* RPMB request mark */
+#define RPMB_RESP (1 << 1) /* RPMB response mark*/
+#define RPMB_PROGRAM_KEY 0x1 /* Program RPMB Authentication Key */
+#define RPMB_GET_WRITE_COUNTER 0x2 /* Read RPMB write counter */
+#define RPMB_WRITE_DATA 0x3 /* Write data to RPMB partition */
+#define RPMB_READ_DATA 0x4 /* Read data from RPMB partition */
+#define RPMB_RESULT_READ 0x5 /* Read result request (Internal) */
+
+struct emmc_rpmb_blk_data {
+ spinlock_t lock;
+ struct device *parent;
+ struct gendisk *disk;
+ struct mmc_queue queue;
+ struct list_head part;
+ uint32_t flags;
+ uint32_t usage;
+ uint32_t read_only;
+ uint32_t part_type;
+ uint32_t reset_done;
+ uint32_t part_curr; // keep curr partition
+ struct device_attribute force_ro;
+ struct device_attribute power_ro_lock;
+ int32_t area_type;
+};
+
+static int32_t emmc_rpmb_switch(struct mmc_card *card,
+ struct emmc_rpmb_blk_data *md)
+{
+ int32_t ret;
+ struct emmc_rpmb_blk_data *main_md = NULL;
+
+ if (card == NULL)
+ return -1;
+
+ main_md = dev_get_drvdata(&card->dev);
+ if (main_md == NULL)
+ return -1;
+
+ if (main_md->part_curr == md->part_type)
+ return 0;
+
+#if defined(CONFIG_MTK_EMMC_CQ_SUPPORT) || defined(CONFIG_MTK_EMMC_HW_CQ)
+ if (mmc_card_cmdq(card)) {
+ ret = mmc_cmdq_disable(card);
+ if (ret) {
+ tloge("CQ disabled failed!!! ret: 0x%x\n", ret);
+ return ret;
+ }
+ }
+#endif
+
+ if (mmc_card_mmc(card) != 0) {
+ uint8_t cfg = card->ext_csd.part_config;
+
+ cfg &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ cfg |= md->part_type;
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_PART_CONFIG,
+ cfg, card->ext_csd.part_time);
+ if (ret)
+ return ret;
+
+ card->ext_csd.part_config = cfg;
+ }
+
+#if defined(CONFIG_MTK_EMMC_CQ_SUPPORT) || defined(CONFIG_MTK_EMMC_HW_CQ)
+ /* enable cmdq at user partition */
+ if (!mmc_card_cmdq(card) && (md->part_type <= 0)) {
+ ret = mmc_cmdq_enable(card);
+ if (ret)
+ tloge("%s enable CMDQ error %d, so just work without\n",
+ mmc_hostname(card->host), ret);
+ }
+#endif
+
+#if defined(CONFIG_MTK_EMMC_HW_CQ)
+ card->part_curr = md->part_type;
+#endif
+ main_md->part_curr = md->part_type;
+ return 0;
+}
+
+#define RPMB_BLOCK_SIZE 512
+static void set_sbc(__u16 blks, __u16 type, u8 req_type,
+ struct mmc_command *sbc)
+{
+ sbc->opcode = MMC_SET_BLOCK_COUNT;
+ sbc->arg = blks;
+ if ((req_type == RPMB_REQ && type == RPMB_WRITE_DATA) ||
+ type == RPMB_PROGRAM_KEY)
+ sbc->arg |= 1 << 31;
+ sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
+}
+
+static void rpmb_send_req_cmd(struct mmc_card *card,
+ struct storage_blk_ioc_rpmb_data *storage_data,
+ __u16 blks, __u16 type, struct mmc_request *request)
+{
+ request->cmd->opcode = MMC_WRITE_MULTIPLE_BLOCK;
+ request->data->flags |= MMC_DATA_WRITE;
+ if (type == RPMB_RESULT_READ) {
+ /* this is the step2 for write data cmd and write key cmd */
+ sg_copy_from_buffer(request->data->sg, 1,
+ storage_data->data[IOC_CMD_1].buf, RPMB_BLOCK_SIZE * blks);
+ } else {
+ /* this is step 1 for read data and read counter */
+ sg_copy_from_buffer(request->data->sg, 1,
+ storage_data->data[IOC_CMD_0].buf, RPMB_BLOCK_SIZE * blks);
+ }
+ mmc_set_data_timeout(request->data, card);
+ mmc_wait_for_req(card->host, request);
+}
+
+static void resp_get_sg(struct storage_blk_ioc_rpmb_data *storage_data,
+ __u16 blks, __u16 type, struct scatterlist *sg)
+{
+ bool read_type = (type == RPMB_READ_DATA) ||
+ (type == RPMB_GET_WRITE_COUNTER);
+ bool write_type = (type == RPMB_WRITE_DATA) ||
+ (type == RPMB_PROGRAM_KEY);
+ if (read_type) {
+ if (storage_data->data[IOC_CMD_1].buf != NULL)
+ sg_copy_to_buffer(sg, 1, storage_data->data[IOC_CMD_1].buf,
+ RPMB_BLOCK_SIZE * blks);
+ else
+ tloge("invalid data1buff, is null\n");
+ } else if (write_type) {
+ if (storage_data->data[IOC_CMD_2].buf != NULL)
+ sg_copy_to_buffer(sg, 1, storage_data->data[IOC_CMD_2].buf,
+ RPMB_BLOCK_SIZE * blks);
+ else
+ tloge("invalid data1buff, is null\n");
+ } else {
+ /* do nothing */
+ tloge("invalid reqtype %d\n", type);
+ }
+}
+
+static void rpmb_send_resp_cmd(struct mmc_card *card,
+ struct storage_blk_ioc_rpmb_data *storage_data,
+ __u16 blks, __u16 type, struct mmc_request *request)
+{
+ request->cmd->opcode = MMC_READ_MULTIPLE_BLOCK;
+ request->data->flags |= MMC_DATA_READ;
+ mmc_set_data_timeout(request->data, card);
+ mmc_wait_for_req(card->host, request);
+ resp_get_sg(storage_data, blks, type, request->data->sg);
+}
+
+static int emmc_rpmb_send_command(struct mmc_card *card,
+ struct storage_blk_ioc_rpmb_data *storage_data,
+ __u16 blks, __u16 type, u8 req_type)
+{
+ struct mmc_command cmd = {0};
+ struct mmc_command sbc = {0};
+ struct mmc_data data = {0};
+ struct mmc_request request = {NULL};
+ struct scatterlist sg;
+ u8 *transfer_buf = NULL;
+
+ if (blks == 0) {
+ tloge("Invalid blks: 0\n");
+ return -EINVAL;
+ }
+
+ set_sbc(blks, type, req_type, &sbc);
+ request.sbc = &sbc;
+
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+ request.cmd = &cmd;
+
+ data.blksz = RPMB_BLOCK_SIZE;
+ data.blocks = blks;
+ data.sg = &sg;
+ data.sg_len = 1;
+ request.data = &data;
+
+ request.stop = NULL;
+
+ transfer_buf = kzalloc(RPMB_BLOCK_SIZE * blks, GFP_KERNEL);
+ if (transfer_buf == NULL)
+ return -ENOMEM;
+
+ sg_init_one(&sg, transfer_buf, RPMB_BLOCK_SIZE * blks);
+
+ if (req_type == RPMB_REQ)
+ rpmb_send_req_cmd(card, storage_data, blks, type, &request);
+ else
+ rpmb_send_resp_cmd(card, storage_data, blks, type, &request);
+
+ kfree(transfer_buf);
+
+ if (cmd.error)
+ return cmd.error;
+ else if (data.error)
+ return data.error;
+ else
+ return 0;
+}
+
+static int emmc_rpmb_cmd_proc(struct mmc_card *card, unsigned short type,
+ struct storage_blk_ioc_rpmb_data *storage_data)
+{
+ int err = 0;
+
+ /* STEP 1: send request to RPMB partition */
+ if (type == RPMB_WRITE_DATA) {
+ err = emmc_rpmb_send_command(card, storage_data,
+ storage_data->data[IOC_CMD_0].blocks, type, RPMB_REQ);
+ } else {
+ /* assemble the frame */
+ storage_data->data[IOC_CMD_0].blocks = storage_data->data[IOC_CMD_1].blocks;
+ err = emmc_rpmb_send_command(card, storage_data,
+ 1, type, RPMB_REQ);
+ }
+ if (err != 0) {
+ tloge("step 1, request failed err-%d\n", err);
+ goto out;
+ }
+
+ /* STEP 2: check write result. Only for WRITE_DATA or Program key */
+ if (type == RPMB_WRITE_DATA || type == RPMB_PROGRAM_KEY) {
+ err = emmc_rpmb_send_command(card, storage_data,
+ 1, RPMB_RESULT_READ, RPMB_REQ);
+ if (err != 0) {
+ tloge("step 2, request result failed err-%d\n", err);
+ goto out;
+ }
+ }
+
+ /* STEP 3: get response from RPMB partition */
+ if (type == RPMB_READ_DATA)
+ err = emmc_rpmb_send_command(card, storage_data,
+ storage_data->data[IOC_CMD_0].blocks, type, RPMB_RESP);
+ else
+ err = emmc_rpmb_send_command(card, storage_data, 1,
+ type, RPMB_RESP);
+ if (err != 0)
+ tloge("step 3, response failed err-%d\n", err);
+
+out:
+ return err;
+}
+
+static int rpmb_operation_emmc(enum rpmb_op_type operation,
+ struct storage_blk_ioc_rpmb_data *storage_data)
+{
+ struct emmc_rpmb_blk_data *part_md = NULL;
+ int ret;
+ struct emmc_rpmb_blk_data *md = NULL;
+
+ struct mmc_card *card = get_card_from_mtk_msdc_host();
+ if (card == NULL)
+ return -1;
+
+ md = dev_get_drvdata(&card->dev);
+ if (md == NULL)
+ return -1;
+
+ list_for_each_entry(part_md, &md->part, part) {
+ if (part_md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ break;
+ }
+
+ if (part_md->part_type != EXT_CSD_PART_CONFIG_ACC_RPMB)
+ return -1;
+
+ mmc_get_card(card);
+ ret = emmc_rpmb_switch(card, part_md);
+ if (ret != 0) {
+ tloge("emmc switch to rpmb failed ret-%x\n", ret);
+ goto error;
+ }
+
+ switch (operation) {
+ case RPMB_OP_RD:
+ ret = emmc_rpmb_cmd_proc(card, RPMB_READ_DATA, storage_data);
+ break;
+ case RPMB_OP_WR_CNT:
+ ret = emmc_rpmb_cmd_proc(card, RPMB_GET_WRITE_COUNTER,
+ storage_data);
+ break;
+ case RPMB_OP_WR_DATA:
+ ret = emmc_rpmb_cmd_proc(card, RPMB_WRITE_DATA, storage_data);
+ break;
+ default:
+ tloge("receive an unknown operation %d\n", operation);
+ goto error;
+ }
+ if (ret != 0)
+ tloge("emmc rpmb cmd proc failed ret-%x\n", ret);
+
+error:
+ ret = emmc_rpmb_switch(card, dev_get_drvdata(&card->dev));
+ if (ret != 0)
+ tloge("emmc switch to main failed ret-%x\n", ret);
+
+ mmc_put_card(card);
+
+ return ret;
+}
+
+static int rpmb_req_read_data_ufs(
+ struct storage_blk_ioc_rpmb_data *storage_data)
+{
+ struct rpmb_data data;
+ struct rpmb_dev *rawdev_ufs_rpmb = NULL;
+ int ret;
+ uint16_t blk_cnt;
+
+ rawdev_ufs_rpmb = ufs_mtk_rpmb_get_raw_dev();
+
+ blk_cnt = storage_data->data[1].blocks;
+ tlogd("rpmb read data ufs, blk_cnt: %u\n", blk_cnt);
+
+ data.req_type = RPMB_READ_DATA;
+ data.icmd.nframes = 1;
+ data.icmd.frames = (struct rpmb_frame *)storage_data->data[IOC_CMD_0].buf;
+
+ /*
+ * We need to fill-in block_count by ourselves for UFS case.
+ */
+ data.icmd.frames->block_count = cpu_to_be16(blk_cnt);
+
+ data.ocmd.nframes = blk_cnt;
+ data.ocmd.frames = (struct rpmb_frame *)storage_data->data[IOC_CMD_1].buf;
+
+ ret = rpmb_cmd_req(rawdev_ufs_rpmb, &data);
+ if (ret != 0)
+ tloge("rpmb req ufs error, ret:0x%x\n", ret);
+
+ tlogd("result 0x%x\n", cpu_to_be16(data.ocmd.frames->result));
+
+ return ret;
+}
+
+static int rpmb_req_write_data_ufs(
+ struct storage_blk_ioc_rpmb_data *storage_data)
+{
+ struct rpmb_data data;
+ struct rpmb_dev *rawdev_ufs_rpmb = NULL;
+ int ret;
+ uint16_t blk_cnt;
+
+ rawdev_ufs_rpmb = ufs_mtk_rpmb_get_raw_dev();
+
+ blk_cnt = storage_data->data[IOC_CMD_0].blocks;
+
+ tlogd("blk_cnt: %d\n", blk_cnt);
+
+ /*
+ * Alloc output frame to avoid overwriting input frame
+ * buffer provided by TEE
+ */
+ data.ocmd.frames = kzalloc(sizeof(struct rpmb_frame), 0);
+ if (data.ocmd.frames == NULL)
+ return RPMB_ALLOC_ERROR;
+
+ data.ocmd.nframes = 1;
+
+ data.req_type = RPMB_WRITE_DATA;
+ data.icmd.nframes = blk_cnt;
+ data.icmd.frames = (struct rpmb_frame *)storage_data->data[IOC_CMD_0].buf;
+
+ ret = rpmb_cmd_req(rawdev_ufs_rpmb, &data);
+ if (ret != 0)
+ tloge("rpmb_req write_data_ufs error, ret:0x%x\n", ret);
+
+ /*
+ * Microtrust TEE will check write counter in the first frame,
+ * thus we copy response frame to the first frame.
+ */
+ if (storage_data->data[IOC_CMD_2].buf == NULL) {
+ ret = -1;
+ goto free;
+ }
+
+ ret = memcpy_s(storage_data->data[IOC_CMD_2].buf,
+ storage_data->data[IOC_CMD_2].buf_bytes,
+ data.ocmd.frames, sizeof(*(data.ocmd.frames)));
+ if (ret != EOK)
+ tloge("frames copy fail, ret:0x%x\n", ret);
+
+ tlogd("result 0x%x\n", cpu_to_be16(data.ocmd.frames->result));
+
+free:
+ kfree(data.ocmd.frames);
+
+ return ret;
+}
+
+static int rpmb_req_get_wc_ufs(u8 *key, u32 *wc,
+ struct storage_blk_ioc_rpmb_data *storage_data)
+{
+ struct rpmb_data data;
+ struct rpmb_dev *rawdev_ufs_rpmb = NULL;
+ int ret;
+
+ tlogd("rpmb_req_get_wc_ufs start!!!\n");
+
+ rawdev_ufs_rpmb = ufs_mtk_rpmb_get_raw_dev();
+
+ /*
+ * Initial frame buffers
+ */
+ data.icmd.frames = (struct rpmb_frame *)storage_data->data[IOC_CMD_0].buf;
+ data.ocmd.frames = (struct rpmb_frame *)storage_data->data[IOC_CMD_1].buf;
+
+ /*
+ * Prepare frame contents.
+ * Input frame (in view of device) only needs nonce
+ */
+ data.req_type = RPMB_GET_WRITE_COUNTER;
+ data.icmd.nframes = 1;
+
+ /* Output frame (in view of device) */
+ data.ocmd.nframes = 1;
+ ret = rpmb_cmd_req(rawdev_ufs_rpmb, &data);
+ if (ret != 0)
+ tloge("rpmb_req_get_wc_ufs error!!! ret:0x%x\n", ret);
+
+ tlogd("end\n");
+
+ return ret;
+}
+
+#ifdef CONFIG_MTK_UFS_SUPPORT
+static int rpmb_operation_ufs(enum rpmb_op_type operation,
+ struct storage_blk_ioc_rpmb_data *storage_data)
+{
+ int ret = -1;
+
+ switch (operation) {
+ case RPMB_OP_RD:
+ ret = rpmb_req_read_data_ufs(storage_data);
+ break;
+ case RPMB_OP_WR_CNT:
+ ret = rpmb_req_get_wc_ufs(NULL, NULL, storage_data);
+ break;
+ case RPMB_OP_WR_DATA:
+ ret = rpmb_req_write_data_ufs(storage_data);
+ break;
+ default:
+ tloge("receive an unknown command id %d.\n", operation);
+ break;
+ }
+
+ return ret;
+}
+#endif
+
+int rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation,
+ struct storage_blk_ioc_rpmb_data *storage_data)
+{
+ int ret = 0;
+ int boot_type;
+
+ if (storage_data == NULL)
+ return -1;
+
+ boot_type = get_boot_type();
+ if (boot_type == BOOTDEV_SDMMC)
+ ret = rpmb_operation_emmc(operation, storage_data);
+#ifdef CONFIG_MTK_UFS_SUPPORT
+ else if (boot_type == BOOTDEV_UFS)
+ ret = rpmb_operation_ufs(operation, storage_data);
+#endif
+ return ret;
+}
\ No newline at end of file
diff --git a/tzdriver/agent_rpmb/mplat/rpmb_driver.h b/tzdriver/agent_rpmb/mplat/rpmb_driver.h
new file mode 100755
index 0000000000000000000000000000000000000000..3d86f17839f12923e85bd2ea04fa72b8a792833c
--- /dev/null
+++ b/tzdriver/agent_rpmb/mplat/rpmb_driver.h
@@ -0,0 +1,34 @@
+/*
+ * rpmb_driver.h
+ *
+ * rpmb driver function, such as ioctl
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __RPMB_DRIVER_H
+#define __RPMB_DRIVER_H
+
+#include
+
+static inline void rpmb_driver_counter_lock(void)
+{
+}
+
+static inline void rpmb_driver_counter_unlock(void)
+{
+}
+
+int rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation,
+ struct storage_blk_ioc_rpmb_data *storage_data);
+
+#endif
\ No newline at end of file
diff --git a/tzdriver/apply_tzdriver.sh b/tzdriver/apply_tzdriver.sh
new file mode 100755
index 0000000000000000000000000000000000000000..bf5ec7a35f8ac5e24c0abe79deff4b2c2d5f40dc
--- /dev/null
+++ b/tzdriver/apply_tzdriver.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2022 Huawei Device Co., Ltd.
+#
+
+set -e
+
+OHOS_SOURCE_ROOT=$1
+KERNEL_BUILD_ROOT=$2
+PRODUCT_NAME=$3
+KERNEL_VERSION=$4
+TZDRIVER_SOURCE_ROOT=$OHOS_SOURCE_ROOT/kernel/linux/common_modules/tzdriver
+
+function main()
+{
+ pushd .
+
+ if [ ! -d "$KERNEL_BUILD_ROOT/drivers/tzdriver" ]; then
+ mkdir $KERNEL_BUILD_ROOT/drivers/tzdriver
+ fi
+
+ cd $KERNEL_BUILD_ROOT/drivers/tzdriver
+ ln -s -f $(realpath --relative-to=$KERNEL_BUILD_ROOT/drivers/tzdriver/ $TZDRIVER_SOURCE_ROOT)/* ./
+
+ popd
+}
+
+main
diff --git a/tzdriver/auth/Kconfig b/tzdriver/auth/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..6ab72c04c832fd0b9abe54ec38f4f8616f8e721c
--- /dev/null
+++ b/tzdriver/auth/Kconfig
@@ -0,0 +1,28 @@
+# Auth Configuration
+config CLIENT_AUTH
+ bool "Client Application Hash Auth"
+ default n
+ depends on TZDRIVER
+ help
+ TEEOS CA code hash auth
+
+config ANDROID_HIDL
+ bool "Android Hidl Adapt"
+ default n
+ depends on CLIENT_AUTH
+ help
+ TEEOS hidl proc auth
+
+config CADAEMON_AUTH
+ bool "Teec Daemon Path Hash Auth"
+ default n
+ depends on TZDRIVER
+ help
+ TEEOS TEECD path hash auth
+
+config TZDRIVER_OHOS
+ bool "Is in OH"
+ default n
+ depends on TZDRIVER
+ help
+ OH Cadaemon uid
diff --git a/tzdriver/auth/Makefile b/tzdriver/auth/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..0e68fe89777f932e4607f2d4f86379c428fdb696
--- /dev/null
+++ b/tzdriver/auth/Makefile
@@ -0,0 +1,30 @@
+KERNEL_DIR :=$(srctree)
+
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/core
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/tlogger
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../third_party/bounds_checking_function/include
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/include
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../base/security/selinux/include
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../base/security/selinux
+
+EXTRA_CFLAGS += -DSELINUX_CA_HIDL_LABEL=\"u:r:hal_libteec_default:s0\"
+EXTRA_CFLAGS += -DSELINUX_TEECD_LABEL=\"u:r:tee:s0\"
+ifneq ($(CONFIG_TZDRIVER_OHOS),y)
+EXTRA_CFLAGS += -DCONFIG_SELINUX_AUTH_ENABLE
+endif
+ifeq ($(CONFIG_CADAEMON_AUTH),y)
+EXTRA_CFLAGS += -DCADAEMON_PATH_UID_AUTH_CTX=\"/system/bin/sa_main:6668\"
+EXTRA_CFLAGS += -DSELINUX_CADAEMON_LABEL=NULL
+endif
+ifeq ($(CONFIG_TZDRIVER_OHOS),y)
+EXTRA_CFLAGS += -DTEECD_PATH_UID_AUTH_CTX=\"/vendor/bin/teecd:6668\"
+else
+EXTRA_CFLAGS += -DTEECD_PATH_UID_AUTH_CTX=\"/vendor/bin/teecd:0\"
+endif
+
+obj-$(CONFIG_CLIENT_AUTH) += client_hash_auth.o
+
+ifeq ($(findstring y, $(CONFIG_TEECD_AUTH) $(CONFIG_CLIENT_AUTH)), y)
+ obj-y += auth_base_impl.o
+endif
diff --git a/tzdriver/auth/auth_base_impl.c b/tzdriver/auth/auth_base_impl.c
new file mode 100644
index 0000000000000000000000000000000000000000..f70a857c904ace44078c0042627f713255c484da
--- /dev/null
+++ b/tzdriver/auth/auth_base_impl.c
@@ -0,0 +1,426 @@
+/*
+ * auth_base_impl.c
+ *
+ * function for base hash operation
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "auth_base_impl.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE)
+#include
+#endif
+#if defined (CONFIG_SELINUX_AUTH_ENABLE) && defined (CONFIG_SECURITY_SELINUX)
+#include
+#endif
+#include
+#include "tc_ns_log.h"
+#include "tc_ns_client.h"
+#include "agent.h" /* for get_proc_dpath */
+#include "ko_adapt.h"
+
+/* for crypto */
+struct crypto_shash *g_shash_handle;
+bool g_shash_handle_state = false;
+struct mutex g_shash_handle_lock;
+
+void init_crypto_hash_lock(void)
+{
+ mutex_init(&g_shash_handle_lock);
+}
+
+void mutex_crypto_hash_lock(void)
+{
+ mutex_lock(&g_shash_handle_lock);
+}
+
+void mutex_crypto_hash_unlock(void)
+{
+ mutex_unlock(&g_shash_handle_lock);
+}
+
+/* begin: prepare crypto context */
+struct crypto_shash *get_shash_handle(void)
+{
+ return g_shash_handle;
+}
+
+void free_shash_handle(void)
+{
+ if (g_shash_handle) {
+ crypto_free_shash(g_shash_handle);
+ g_shash_handle_state = false;
+ g_shash_handle = NULL;
+ }
+}
+
+int tee_init_shash_handle(char *hash_type)
+{
+ long rc;
+
+ if (!hash_type) {
+ tloge("tee init crypto: error input parameter\n");
+ return -EFAULT;
+ }
+
+ mutex_crypto_hash_lock();
+ if (g_shash_handle_state) {
+ mutex_crypto_hash_unlock();
+ return 0;
+ }
+
+ g_shash_handle = crypto_alloc_shash(hash_type, 0, 0);
+ if (IS_ERR_OR_NULL(g_shash_handle)) {
+ rc = PTR_ERR(g_shash_handle);
+ tloge("Can not allocate %s reason: %ld\n", hash_type, rc);
+ mutex_crypto_hash_unlock();
+ return rc;
+ }
+ g_shash_handle_state = true;
+
+ mutex_crypto_hash_unlock();
+ return 0;
+}
+/* end: prepare crypto context */
+
+/* begin: Calculate the SHA256 file digest */
+static int prepare_desc(struct sdesc **desc)
+{
+ size_t size;
+ size_t shash_size;
+
+ shash_size = crypto_shash_descsize(g_shash_handle);
+ size = sizeof((*desc)->shash) + shash_size;
+ if (size < sizeof((*desc)->shash) || size < shash_size) {
+ tloge("size flow\n");
+ return -ENOMEM;
+ }
+
+ *desc = kzalloc(size, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)(*desc))) {
+ tloge("alloc desc failed\n");
+ return -ENOMEM;
+ }
+
+ return EOK;
+}
+
+#define PINED_PAGE_NUMBER 1
+static int get_proc_user_pages(struct mm_struct *mm, unsigned long start_code,
+ struct page **ptr_page, struct task_struct *cur_struct)
+{
+#if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE)
+ (void)cur_struct;
+ return get_user_pages_remote(mm, start_code,
+ (unsigned long)PINED_PAGE_NUMBER, FOLL_FORCE, ptr_page, NULL, NULL);
+#elif (KERNEL_VERSION(4, 9, 0) <= LINUX_VERSION_CODE)
+ return get_user_pages_remote(cur_struct, mm, start_code,
+ (unsigned long)PINED_PAGE_NUMBER, FOLL_FORCE, ptr_page, NULL, NULL);
+#elif (KERNEL_VERSION(4, 4, 197) == LINUX_VERSION_CODE)
+ return get_user_pages_locked(cur_struct, mm, start_code,
+ (unsigned long)PINED_PAGE_NUMBER, FOLL_FORCE, ptr_page, NULL);
+#else
+ return get_user_pages_locked(cur_struct, mm, start_code,
+ (unsigned long)PINED_PAGE_NUMBER, 0, 1, ptr_page, NULL);
+#endif
+}
+
+static int update_task_hash(struct mm_struct *mm,
+ struct task_struct *cur_struct, struct shash_desc *shash)
+{
+ int rc = -1;
+ unsigned long in_size;
+ struct page *ptr_page = NULL;
+ void *ptr_base = NULL;
+
+ unsigned long start_code = mm->start_code;
+ unsigned long end_code = mm->end_code;
+ unsigned long code_size = end_code - start_code;
+ if (code_size == 0) {
+ tloge("bad code size\n");
+ return -EINVAL;
+ }
+
+ while (start_code < end_code) {
+ /* Get a handle of the page we want to read */
+ rc = get_proc_user_pages(mm, start_code, &ptr_page, cur_struct);
+ if (rc != PINED_PAGE_NUMBER) {
+ tloge("get user pages error[0x%x]\n", rc);
+ rc = -EFAULT;
+ break;
+ }
+
+ ptr_base = kmap_atomic(ptr_page);
+ if (!ptr_base) {
+ rc = -EFAULT;
+ put_page(ptr_page);
+ break;
+ }
+
+ in_size = (code_size > PAGE_SIZE) ? PAGE_SIZE : code_size;
+ rc = crypto_shash_update(shash, ptr_base, in_size);
+ if (rc) {
+ kunmap_atomic(ptr_base);
+ put_page(ptr_page);
+ break;
+ }
+
+ kunmap_atomic(ptr_base);
+ put_page(ptr_page);
+ start_code += in_size;
+ code_size = end_code - start_code;
+ }
+ return rc;
+}
+
+int calc_task_hash(unsigned char *digest, uint32_t dig_len,
+ struct task_struct *cur_struct, uint32_t pub_key_len)
+{
+ struct mm_struct *mm = NULL;
+ struct sdesc *desc = NULL;
+ bool check_value = false;
+ int rc;
+
+ check_value = (!cur_struct || !digest ||
+ dig_len != SHA256_DIGEST_LENTH);
+ if (check_value) {
+ tloge("tee hash: input param is error\n");
+ return -EFAULT;
+ }
+
+ mm = get_task_mm(cur_struct);
+ if (!mm) {
+ if (memset_s(digest, dig_len, 0, MAX_SHA_256_SZ))
+ return -EFAULT;
+ tloge("kernel proc need not check\n");
+ return EOK;
+ }
+
+ if (pub_key_len != sizeof(uint32_t)) {
+ tloge("apk need not check\n");
+ mmput(mm);
+ return EOK;
+ }
+
+ if (prepare_desc(&desc) != EOK) {
+ mmput(mm);
+ tloge("prepare desc failed\n");
+ return -ENOMEM;
+ }
+
+ desc->shash.tfm = g_shash_handle;
+ if (crypto_shash_init(&desc->shash)) {
+ tloge("shash init failed\n");
+ rc = -ENOMEM;
+ goto free_res;
+ }
+
+ down_read(&mm_sem_lock(mm));
+ if (update_task_hash(mm, cur_struct, &desc->shash)) {
+ up_read(&mm_sem_lock(mm));
+ rc = -ENOMEM;
+ goto free_res;
+ }
+ up_read(&mm_sem_lock(mm));
+
+ rc = crypto_shash_final(&desc->shash, digest);
+free_res:
+ mmput(mm);
+ kfree(desc);
+ return rc;
+}
+/* end: Calculate the SHA256 file digest */
+
+#if defined(CONFIG_SELINUX_AUTH_ENABLE) && defined (CONFIG_SECURITY_SELINUX)
+static int check_proc_selinux_access(const char * s_ctx)
+{
+ if (s_ctx == NULL) {
+ tloge("bad params\n");
+ return CHECK_ACCESS_FAIL;
+ }
+
+ int rc;
+ u32 sid;
+ u32 tid;
+ u32 s_ctx_len = strnlen(s_ctx, MAX_SCTX_LEN);
+ if (s_ctx_len == 0 || s_ctx_len >= MAX_SCTX_LEN) {
+ tloge("invalid selinux ctx\n");
+ return CHECK_ACCESS_FAIL;
+ }
+
+ security_task_getsecid(current, &sid);
+ rc = security_secctx_to_secid(s_ctx, s_ctx_len, &tid);
+ if (rc != 0) {
+ tloge("secctx to sid failed, rc %d", rc);
+ return CHECK_ACCESS_FAIL;
+ }
+ if (sid != tid) {
+ tloge("check selinux label failed\n");
+ return CHECK_ACCESS_FAIL;
+ }
+
+ return EOK;
+}
+#else
+static int check_proc_selinux_access(const char * s_ctx)
+{
+ (void)s_ctx;
+ return 0;
+}
+#endif
+
+static int get_proc_uid(uid_t *proc_uid)
+{
+#ifdef CONFIG_LIBLINUX
+ if (current->cred == NULL) {
+ tloge("cred is NULL\n");
+ return CHECK_ACCESS_FAIL;
+ }
+ *proc_uid = current->cred->uid.val;
+#else
+ const struct cred *cred = NULL;
+ get_task_struct(current);
+ cred = koadpt_get_task_cred(current);
+ if (cred == NULL) {
+ tloge("cred is NULL\n");
+ put_task_struct(current);
+ return CHECK_ACCESS_FAIL;
+ }
+
+ *proc_uid = cred->uid.val;
+ put_cred(cred);
+ put_task_struct(current);
+#endif
+ return CHECK_ACCESS_SUCC;
+}
+
+static int check_proc_uid_path(const char *auth_ctx)
+{
+ int ret = 0;
+ char str_path_uid[MAX_PATH_SIZE] = { 0 };
+ char *pro_dpath = NULL;
+ char *k_path = NULL;
+ u32 auth_ctx_len;
+ uid_t proc_uid;
+
+ if (auth_ctx == NULL) {
+ tloge("bad params\n");
+ return CHECK_ACCESS_FAIL;
+ }
+
+ auth_ctx_len = (u32)strnlen(auth_ctx, MAX_PATH_SIZE);
+ if (auth_ctx_len == 0 || auth_ctx_len >= MAX_PATH_SIZE) {
+ tloge("invalid uid path\n");
+ return CHECK_ACCESS_FAIL;
+ }
+
+ k_path = kmalloc(MAX_PATH_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)k_path)) {
+ tloge("path kmalloc fail\n");
+ return CHECK_ACCESS_FAIL;
+ }
+
+ pro_dpath = get_proc_dpath(k_path, MAX_PATH_SIZE);
+ if (IS_ERR_OR_NULL(pro_dpath)) {
+ kfree(k_path);
+ return CHECK_ACCESS_FAIL;
+ }
+
+ ret = get_proc_uid(&proc_uid);
+ if (ret != CHECK_ACCESS_SUCC) {
+ tloge("get proc uid failed\n");
+ goto clean;
+ }
+
+ if (snprintf_s(str_path_uid, MAX_PATH_SIZE, MAX_PATH_SIZE - 1, "%s:%u",
+ pro_dpath, (unsigned int)proc_uid) < 0) {
+ tloge("snprintf_s path uid failed, ret %d\n", ret);
+ ret = CHECK_ACCESS_FAIL;
+ goto clean;
+ }
+
+ if (strnlen(str_path_uid, MAX_PATH_SIZE) != auth_ctx_len || strncmp(str_path_uid, auth_ctx, auth_ctx_len) != 0)
+ ret = ENTER_BYPASS_CHANNEL;
+ else
+ ret = CHECK_ACCESS_SUCC;
+
+clean:
+ kfree(k_path);
+ return ret;
+}
+
+#ifdef CONFIG_CADAEMON_AUTH
+int check_cadaemon_auth(void)
+{
+ int ret = check_proc_uid_path(CADAEMON_PATH_UID_AUTH_CTX);
+ if (ret != 0) {
+ tloge("check cadaemon path failed, ret %d\n", ret);
+ return ret;
+ }
+ ret = check_proc_selinux_access(SELINUX_CADAEMON_LABEL);
+ if (ret != 0) {
+ tloge("check cadaemon selinux label failed!, ret %d\n", ret);
+ return -EACCES;
+ }
+ return 0;
+}
+#endif
+
+int check_hidl_auth(void)
+{
+ int ret = check_proc_uid_path(CA_HIDL_PATH_UID_AUTH_CTX);
+ if (ret != CHECK_ACCESS_SUCC)
+ return ret;
+
+#if defined(CONFIG_SELINUX_AUTH_ENABLE) && defined (CONFIG_SECURITY_SELINUX)
+ ret = check_proc_selinux_access(SELINUX_CA_HIDL_LABEL);
+ if (ret != EOK) {
+ tloge("check hidl selinux label failed, ret %d\n", ret);
+ return CHECK_SECLABEL_FAIL;
+ }
+#endif
+
+ return CHECK_ACCESS_SUCC;
+}
+
+#ifdef CONFIG_TEECD_AUTH
+int check_teecd_auth(void)
+{
+ int ret = check_proc_uid_path(TEECD_PATH_UID_AUTH_CTX);
+ if (ret != 0) {
+ tloge("check teecd path failed, ret %d\n", ret);
+ return ret;
+ }
+
+#if defined(CONFIG_SELINUX_AUTH_ENABLE) && defined (CONFIG_SECURITY_SELINUX)
+ ret = check_proc_selinux_access(SELINUX_TEECD_LABEL);
+ if (ret != 0) {
+ tloge("check teecd selinux label failed!, ret %d\n", ret);
+ return -EACCES;
+ }
+#endif
+ return CHECK_ACCESS_SUCC;
+}
+#endif
\ No newline at end of file
diff --git a/tzdriver/auth/auth_base_impl.h b/tzdriver/auth/auth_base_impl.h
new file mode 100644
index 0000000000000000000000000000000000000000..8fbd7f44bf76a634ee66ee411997d9101f0140e2
--- /dev/null
+++ b/tzdriver/auth/auth_base_impl.h
@@ -0,0 +1,102 @@
+/*
+ * auth_base_impl.h
+ *
+ * function definition for base hash operation
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef AUTH_BASE_IMPL_H
+#define AUTH_BASE_IMPL_H
+
+#ifndef SELINUX_CA_HIDL_LABEL
+#define SELINUX_CA_HIDL_LABEL ""
+#endif
+
+#ifndef SELINUX_TEECD_LABEL
+#define SELINUX_TEECD_LABEL ""
+#endif
+
+#ifndef CA_HIDL_PATH_UID_AUTH_CTX
+#define CA_HIDL_PATH_UID_AUTH_CTX ""
+#endif
+
+#ifndef TEECD_PATH_UID_AUTH_CTX
+#define TEECD_PATH_UID_AUTH_CTX ""
+#endif
+
+#ifndef CADAEMON_PATH_UID_AUTH_CTX
+#define CADAEMON_PATH_UID_AUTH_CTX ""
+#endif
+
+#if ((defined CONFIG_CLIENT_AUTH) || (defined CONFIG_TEECD_AUTH))
+#include
+#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE)
+#include
+#endif
+#include
+#include
+
+#define CHECK_ACCESS_SUCC 0
+#define CHECK_ACCESS_FAIL 0xffff
+#define CHECK_PATH_HASH_FAIL 0xff01
+#define CHECK_SECLABEL_FAIL 0xff02
+#define CHECK_CODE_HASH_FAIL 0xff03
+#define ENTER_BYPASS_CHANNEL 0xff04
+
+#define BUF_MAX_SIZE 1024
+#define MAX_PATH_SIZE 512
+#define SHA256_DIGEST_LENTH 32
+#define MAX_SCTX_LEN 128
+
+struct sdesc {
+ struct shash_desc shash;
+ char ctx[];
+};
+
+int calc_path_hash(bool is_hidl_srvc, unsigned char *digest, unsigned int dig_len);
+int calc_task_hash(unsigned char *digest, uint32_t dig_len,
+ struct task_struct *cur_struct, uint32_t pub_key_len);
+
+int tee_init_shash_handle(char *hash_type);
+void free_shash_handle(void);
+struct crypto_shash *get_shash_handle(void);
+
+void init_crypto_hash_lock(void);
+void mutex_crypto_hash_lock(void);
+void mutex_crypto_hash_unlock(void);
+int check_hidl_auth(void);
+int check_teecd_auth(void);
+#else
+
+static inline void free_shash_handle(void)
+{
+ return;
+}
+
+static void init_crypto_hash_lock(void)
+{
+ return;
+}
+
+static inline int check_teecd_auth(void)
+{
+ return 0;
+}
+
+#endif /* CLIENT_AUTH || TEECD_AUTH */
+
+#ifdef CONFIG_CADAEMON_AUTH
+int check_cadaemon_auth(void);
+#endif
+
+#endif
+
diff --git a/tzdriver/auth/client_hash_auth.c b/tzdriver/auth/client_hash_auth.c
new file mode 100644
index 0000000000000000000000000000000000000000..819a3892e5b422062eed059c62ce9d0c7ed07c66
--- /dev/null
+++ b/tzdriver/auth/client_hash_auth.c
@@ -0,0 +1,595 @@
+/*
+ * client_hash_auth.c
+ *
+ * function for CA code hash auth
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "client_hash_auth.h"
+#include
+#include
+#include
+#include
+#ifdef CONFIG_AUTH_SUPPORT_UNAME
+#include
+#endif
+#ifdef CONFIG_CLIENT_AUTH
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#endif
+#ifdef CONFIG_AUTH_HASH
+#include
+#endif
+#include
+
+#include "tc_ns_log.h"
+#include "auth_base_impl.h"
+
+#ifdef CONFIG_AUTH_HASH
+#define SHA256_DIGEST_LENGTH 32
+#define FIXED_PKG_NAME_LENGTH 256
+struct sdesc_hash {
+ struct shash_desc shash;
+ char ctx[];
+};
+#endif
+
+#if defined (CONFIG_ANDROID_HIDL) || defined (CONFIG_MDC_HAL_AUTH)
+
+static int check_proc_state(bool is_hidl, struct task_struct **hidl_struct,
+ const struct tc_ns_client_context *context)
+{
+ bool check_value = false;
+
+ if (is_hidl) {
+ rcu_read_lock();
+ *hidl_struct = pid_task(find_vpid(context->calling_pid),
+ PIDTYPE_PID);
+ check_value = !*hidl_struct ||
+ (*hidl_struct)->state == TASK_DEAD;
+ if (check_value) {
+ tloge("task is dead\n");
+ rcu_read_unlock();
+ return -EFAULT;
+ }
+
+ get_task_struct(*hidl_struct);
+ rcu_read_unlock();
+ return EOK;
+ }
+
+ return EOK;
+}
+
+static int get_hidl_client_task(bool is_hidl_task, struct tc_ns_client_context *context,
+ struct task_struct **cur_struct)
+{
+ int ret;
+ struct task_struct *hidl_struct = NULL;
+
+ ret = check_proc_state(is_hidl_task, &hidl_struct, context);
+ if (ret)
+ return ret;
+
+ if (hidl_struct)
+ *cur_struct = hidl_struct;
+ else
+ *cur_struct = current;
+
+ return EOK;
+}
+
+#endif
+
+#ifdef CONFIG_CLIENT_AUTH
+#define LIBTEEC_CODE_PAGE_SIZE 8
+#define DEFAULT_TEXT_OFF 0
+#define LIBTEEC_NAME_MAX_LEN 50
+
+const char g_libso[KIND_OF_SO][LIBTEEC_NAME_MAX_LEN] = {
+ "libteec_vendor.so",
+#ifndef CONFIG_CMS_CAHASH_AUTH
+#ifndef CONFIG_CADAEMON_AUTH
+ "libteec.huawei.so",
+#else
+ "libteec.so",
+#endif
+#endif
+};
+
+static int find_lib_code_area(struct mm_struct *mm,
+ struct vm_area_struct **lib_code_area, int so_index)
+{
+ struct vm_area_struct *vma = NULL;
+ bool is_valid_vma = false;
+ bool is_so_exists = false;
+ bool param_check = (!mm || !mm->mmap ||
+ !lib_code_area || so_index >= KIND_OF_SO);
+
+ if (param_check) {
+ tloge("illegal input params\n");
+ return -EFAULT;
+ }
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ is_valid_vma = (vma->vm_file &&
+ vma->vm_file->f_path.dentry &&
+ vma->vm_file->f_path.dentry->d_name.name);
+ if (is_valid_vma) {
+ is_so_exists = !strcmp(g_libso[so_index],
+ vma->vm_file->f_path.dentry->d_name.name);
+ if (is_so_exists && (vma->vm_flags & VM_EXEC)) {
+ *lib_code_area = vma;
+ tlogd("so name is %s\n",
+ vma->vm_file->f_path.dentry->d_name.name);
+ return EOK;
+ }
+ }
+ }
+ return -EFAULT;
+}
+
+struct get_code_info {
+ unsigned long code_start;
+ unsigned long code_end;
+ unsigned long code_size;
+};
+static int update_so_hash(struct mm_struct *mm,
+ struct task_struct *cur_struct, struct shash_desc *shash, int so_index)
+{
+ struct vm_area_struct *vma = NULL;
+ int rc = -EFAULT;
+ struct get_code_info code_info;
+ unsigned long in_size;
+ struct page *ptr_page = NULL;
+ void *ptr_base = NULL;
+
+ if (find_lib_code_area(mm, &vma, so_index)) {
+ tlogd("get lib code vma area failed\n");
+ return -EFAULT;
+ }
+
+ code_info.code_start = vma->vm_start;
+ code_info.code_end = vma->vm_end;
+ code_info.code_size = code_info.code_end - code_info.code_start;
+
+ while (code_info.code_start < code_info.code_end) {
+ // Get a handle of the page we want to read
+#if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE)
+ rc = get_user_pages_remote(mm, code_info.code_start,
+ 1, FOLL_FORCE, &ptr_page, NULL, NULL);
+#else
+ rc = get_user_pages_remote(cur_struct, mm, code_info.code_start,
+ 1, FOLL_FORCE, &ptr_page, NULL, NULL);
+#endif
+ if (rc != 1) {
+ tloge("get user pages locked error[0x%x]\n", rc);
+ rc = -EFAULT;
+ break;
+ }
+
+ ptr_base = kmap_atomic(ptr_page);
+ if (!ptr_base) {
+ rc = -EFAULT;
+ put_page(ptr_page);
+ break;
+ }
+ in_size = (code_info.code_size > PAGE_SIZE) ? PAGE_SIZE : code_info.code_size;
+
+ rc = crypto_shash_update(shash, ptr_base, in_size);
+ if (rc) {
+ kunmap_atomic(ptr_base);
+ put_page(ptr_page);
+ break;
+ }
+ kunmap_atomic(ptr_base);
+ put_page(ptr_page);
+ code_info.code_start += in_size;
+ code_info.code_size = code_info.code_end - code_info.code_start;
+ }
+ return rc;
+}
+
+/* Calculate the SHA256 library digest */
+static int calc_task_so_hash(unsigned char *digest, uint32_t dig_len,
+ struct task_struct *cur_struct, int so_index)
+{
+ struct mm_struct *mm = NULL;
+ int rc;
+ size_t size;
+ size_t shash_size;
+ struct sdesc *desc = NULL;
+
+ if (!digest || dig_len != SHA256_DIGEST_LENTH) {
+ tloge("tee hash: digest is NULL\n");
+ return -EFAULT;
+ }
+
+ shash_size = crypto_shash_descsize(get_shash_handle());
+ size = sizeof(desc->shash) + shash_size;
+ if (size < sizeof(desc->shash) || size < shash_size) {
+ tloge("size overflow\n");
+ return -ENOMEM;
+ }
+
+ desc = kzalloc(size, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)desc)) {
+ tloge("alloc desc failed\n");
+ return -ENOMEM;
+ }
+
+ desc->shash.tfm = get_shash_handle();
+ if (crypto_shash_init(&desc->shash)) {
+ kfree(desc);
+ return -EFAULT;
+ }
+
+ mm = get_task_mm(cur_struct);
+ if (!mm) {
+ tloge("so does not have mm struct\n");
+ if (memset_s(digest, MAX_SHA_256_SZ, 0, dig_len))
+ tloge("memset digest failed\n");
+ kfree(desc);
+ return -EFAULT;
+ }
+
+ down_read(&mm_sem_lock(mm));
+ rc = update_so_hash(mm, cur_struct, &desc->shash, so_index);
+ up_read(&mm_sem_lock(mm));
+ mmput(mm);
+ if (!rc)
+ rc = crypto_shash_final(&desc->shash, digest);
+ kfree(desc);
+ return rc;
+}
+
+static int proc_calc_hash(uint8_t kernel_api, struct tc_ns_session *session,
+ struct task_struct *cur_struct, uint32_t pub_key_len)
+{
+ int rc, i;
+ int so_found = 0;
+
+ mutex_crypto_hash_lock();
+ if (kernel_api == TEE_REQ_FROM_USER_MODE) {
+ for (i = 0; so_found < NUM_OF_SO && i < KIND_OF_SO; i++) {
+ rc = calc_task_so_hash(session->auth_hash_buf + MAX_SHA_256_SZ * so_found,
+ (uint32_t)SHA256_DIGEST_LENTH, cur_struct, i);
+ if (!rc)
+ so_found++;
+ }
+ if (so_found != NUM_OF_SO)
+ tlogd("so library found: %d\n", so_found);
+ } else {
+ tlogd("request from kernel\n");
+ }
+
+#ifdef CONFIG_ASAN_DEBUG
+ tloge("so auth disabled for ASAN debug\n");
+ uint32_t so_hash_len = MAX_SHA_256_SZ * NUM_OF_SO;
+ errno_t sret = memset_s(session->auth_hash_buf, so_hash_len, 0, so_hash_len);
+ if (sret) {
+ mutex_crypto_hash_unlock();
+ tloge("memset so hash failed\n");
+ return -EFAULT;
+ }
+#endif
+
+ rc = calc_task_hash(session->auth_hash_buf + MAX_SHA_256_SZ * NUM_OF_SO,
+ (uint32_t)SHA256_DIGEST_LENTH, cur_struct, pub_key_len);
+ if (rc) {
+ mutex_crypto_hash_unlock();
+ tloge("tee calc ca hash failed\n");
+ return -EFAULT;
+ }
+ mutex_crypto_hash_unlock();
+ return EOK;
+}
+
+int calc_client_auth_hash(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context, struct tc_ns_session *session)
+{
+ int ret;
+ struct task_struct *cur_struct = NULL;
+ bool check = false;
+#if defined(CONFIG_ANDROID_HIDL) || defined(CONFIG_MDC_HAL_AUTH)
+ bool is_hidl_srvc = false;
+#endif
+ check = (!dev_file || !context || !session);
+ if (check) {
+ tloge("bad params\n");
+ return -EFAULT;
+ }
+
+ if (tee_init_shash_handle("sha256")) {
+ tloge("init code hash error\n");
+ return -EFAULT;
+ }
+
+#if defined(CONFIG_ANDROID_HIDL) || defined(CONFIG_MDC_HAL_AUTH)
+ if(!current->mm) {
+ tlogd("kernel thread need not check\n");
+ ret = ENTER_BYPASS_CHANNEL;
+ } else {
+#ifdef CONFIG_CADAEMON_AUTH
+ /* for OH */
+ ret = check_cadaemon_auth();
+#else
+ /* for HO and MDC/DC */
+ ret = check_hidl_auth();
+#endif
+ }
+ if (ret != CHECK_ACCESS_SUCC) {
+ if (ret != ENTER_BYPASS_CHANNEL) {
+ tloge("hidl service may be exploited ret 0x%x\n", ret);
+ return -EACCES;
+ }
+ /* native\kernel ca task this branch */
+ } else {
+ /* android hidl\mdc secmgr(libteec\kms) task this branch */
+ is_hidl_srvc = true;
+ }
+ ret = get_hidl_client_task(is_hidl_srvc, context, &cur_struct);
+ if (ret)
+ return -EFAULT;
+#else
+ cur_struct = current;
+#endif
+
+ ret = proc_calc_hash(dev_file->kernel_api, session, cur_struct, dev_file->pub_key_len);
+#if defined(CONFIG_ANDROID_HIDL) || defined(CONFIG_MDC_HAL_AUTH)
+ if (is_hidl_srvc)
+ put_task_struct(cur_struct);
+#endif
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_AUTH_HASH
+#define UID_LEN 16
+static int construct_hashdata(struct tc_ns_dev_file *dev_file,
+ uint8_t *buf, uint32_t buf_len)
+{
+ int ret;
+ ret = memcpy_s(buf, buf_len, dev_file->pkg_name, dev_file->pkg_name_len);
+ if (ret) {
+ tloge("memcpy_s failed\n");
+ goto error;
+ }
+ buf += dev_file->pkg_name_len;
+ buf_len -= dev_file->pkg_name_len;
+ ret = memcpy_s(buf, buf_len, dev_file->pub_key, dev_file->pub_key_len);
+ if (ret) {
+ tloge("memcpy_s failed\n");
+ goto error;
+ }
+ return 0;
+error:
+ return -EFAULT;
+}
+
+static struct sdesc_hash *init_sdesc(struct crypto_shash *alg)
+{
+ struct sdesc_hash *sdesc;
+ size_t size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (sdesc == NULL)
+ return ERR_PTR(-ENOMEM);
+ sdesc->shash.tfm = alg;
+ return sdesc;
+}
+
+static int calc_hash(struct crypto_shash *alg,
+ const unsigned char *data, unsigned int datalen, unsigned char *digest)
+{
+ struct sdesc_hash *sdesc;
+ int ret;
+
+ sdesc = init_sdesc(alg);
+ if (IS_ERR(sdesc)) {
+ pr_info("can't alloc sdesc\n");
+ return PTR_ERR(sdesc);
+ }
+
+ ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
+ kfree(sdesc);
+ return ret;
+}
+
+static int do_sha256(const unsigned char *data, uint32_t datalen,
+ unsigned char *out_digest, uint8_t digest_len)
+{
+ int ret;
+ struct crypto_shash *alg;
+ const char *hash_alg_name = "sha256";
+ if (digest_len != SHA256_DIGEST_LENGTH) {
+ tloge("error digest_len\n");
+ return -1;
+ }
+
+ alg = crypto_alloc_shash(hash_alg_name, 0, 0);
+ if(IS_ERR_OR_NULL(alg)) {
+ tloge("can't alloc alg %s, PTR_ERR alg is %ld\n", hash_alg_name, PTR_ERR(alg));
+ return PTR_ERR(alg);
+ }
+ ret = calc_hash(alg, data, datalen, out_digest);
+ if (ret != 0) {
+ tloge("calc hash failed\n");
+ crypto_free_shash(alg);
+ alg = NULL;
+ return -1;
+ }
+ crypto_free_shash(alg);
+ alg = NULL;
+ return 0;
+}
+
+int set_login_information_hash(struct tc_ns_dev_file *hash_dev_file)
+{
+ int ret = 0;
+ uint8_t *indata = NULL;
+ if (hash_dev_file == NULL) {
+ tloge("wrong caller info, cal hash stopped\n");
+ return -1;
+ }
+ mutex_lock(&hash_dev_file->cainfo_hash_setup_lock);
+
+ if (!(hash_dev_file->cainfo_hash_setup)) {
+ unsigned char digest[SHA256_DIGEST_LENGTH] = {0};
+ uint8_t digest_len = sizeof(digest);
+
+ uint32_t indata_len;
+#ifdef CONFIG_AUTH_SUPPORT_UNAME
+ /* username using fixed length to cal hash */
+ if (hash_dev_file->pub_key_len >= FIXED_PKG_NAME_LENGTH) {
+ tloge("username is too loog\n");
+ ret = -1;
+ goto error;
+ }
+ indata_len = hash_dev_file->pkg_name_len + FIXED_PKG_NAME_LENGTH;
+#else
+ indata_len = hash_dev_file->pkg_name_len + hash_dev_file->pub_key_len;
+#endif
+ indata = kzalloc(indata_len, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)indata)) {
+ tloge("indata kmalloc fail\n");
+ ret = -1;
+ goto error;
+ }
+
+ ret = construct_hashdata(hash_dev_file, indata, indata_len);
+ if (ret != 0) {
+ tloge("construct hashdata failed\n");
+ goto error;
+ }
+
+ ret = do_sha256((unsigned char *)indata, indata_len, digest, digest_len);
+ if (ret != 0) {
+ tloge("do sha256 failed\n");
+ goto error;
+ }
+
+ ret = memcpy_s(hash_dev_file->pkg_name, MAX_PACKAGE_NAME_LEN, digest, digest_len);
+ if (ret != 0) {
+ tloge("memcpy_s failed\n");
+ goto error;
+ }
+ hash_dev_file->pkg_name_len = SHA256_DIGEST_LENGTH;
+ hash_dev_file->cainfo_hash_setup = true;
+ }
+
+error:
+ if (!ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)indata))
+ kfree(indata);
+
+ mutex_unlock(&hash_dev_file->cainfo_hash_setup_lock);
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_AUTH_SUPPORT_UNAME
+#define PASSWD_FILE "/etc/passwd"
+#define UID_POS 2U
+#define DECIMAL 10
+static int uid_compare(uint32_t uid, const char* uid_str, uint32_t uid_len)
+{
+ uint32_t uid_num = 0;
+ for (uint32_t i = 0; i < uid_len; i++) {
+ bool is_number = uid_str[i] >= '0' && uid_str[i] <= '9';
+ if (!is_number) {
+ tloge("passwd info wrong format: uid missing\n");
+ return -1;
+ }
+ uid_num = DECIMAL * uid_num + (uid_str[i] - '0');
+ }
+ return (uid_num == uid) ? 0 : -1;
+}
+
+/* "username:[encrypted password]:uid:gid:[comments]:home directory:login shell" */
+static uint32_t parse_uname(uint32_t uid, char *username, int buffer_len)
+{
+ char *str = username;
+ char *token = strsep(&str, ":");
+ char *temp_name = token; // first tokon is username, need to check uid
+ int index = 0;
+ while(token != NULL && index < UID_POS) {
+ token = strsep(&str, ":");
+ index++;
+ }
+ if (token == NULL)
+ return -1;
+ if (uid_compare(uid, token, strlen(token)) != 0)
+ return -1;
+ if (strcpy_s(username, buffer_len, temp_name) != EOK)
+ return -1;
+ return strlen(temp_name);
+}
+static int read_line(char *buf, int buf_len, struct file *fp, loff_t *offset)
+{
+ if (offset == NULL) {
+ tloge("offset is null while read file\n");
+ return -1;
+ }
+ ssize_t ret = kernel_read(fp, buf, buf_len, offset);
+ if (ret < 0)
+ return -1;
+ ssize_t i = 0;
+ /* read buf_len, need to find first '\n' */
+ while (i < ret) {
+ if (i >= buf_len)
+ break;
+ if (buf[i] == '\n')
+ break;
+ i++;
+ }
+ if (i < ret)
+ *offset -= (loff_t)(ret - i);
+ if (i < buf_len)
+ buf[i] = '\0';
+ return 0;
+}
+
+/* get username by uid,
+* on linux, user info is stored in system file "/etc/passwd",
+* each line represents a user, fields are separated by ':',
+* formatted as such: "username:[encrypted password]:uid:gid:[comments]:home directory:login shell"
+*/
+int tc_ns_get_uname(uint32_t uid, char *username, int buffer_len, uint32_t *out_len)
+{
+ if (username == NULL || out_len == NULL || buffer_len != FIXED_PKG_NAME_LENGTH) {
+ tloge("params is null\n");
+ return -1;
+ }
+ struct file *f = NULL;
+ loff_t offset = 0;
+ f = filp_open(PASSWD_FILE, O_RDONLY, 0);
+ if (IS_ERR(f)) {
+ tloge("kernel open passwd file failed\n");
+ return -1;
+ }
+ while (read_line(username, buffer_len, f, &offset) == 0) {
+ uint32_t ret = parse_uname(uid, username, buffer_len);
+ if (ret >= 0) {
+ *out_len = ret;
+ filp_close(f, NULL);
+ return 0;
+ }
+ }
+ filp_close(f, NULL);
+ return -1;
+}
+#endif
\ No newline at end of file
diff --git a/tzdriver/auth/client_hash_auth.h b/tzdriver/auth/client_hash_auth.h
new file mode 100644
index 0000000000000000000000000000000000000000..f8b0d724c6dd6f4b86825bf54b0c3a9108c5a95a
--- /dev/null
+++ b/tzdriver/auth/client_hash_auth.h
@@ -0,0 +1,53 @@
+/*
+ * client_hash_auth.h
+ *
+ * function definition for CA code hash auth
+ *
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CLIENT_HASH_CALC_H
+#define CLIENT_HASH_CALC_H
+
+#include "tc_ns_client.h"
+#include "teek_ns_client.h"
+
+#ifdef CONFIG_CLIENT_AUTH
+#include "auth_base_impl.h"
+
+int calc_client_auth_hash(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context, struct tc_ns_session *session);
+
+#else
+
+static inline int calc_client_auth_hash(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context, struct tc_ns_session *session)
+{
+ (void)dev_file;
+ (void)context;
+ (void)session;
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_AUTH_SUPPORT_UNAME
+#define MAX_NAME_LENGTH 256
+
+int tc_ns_get_uname(uint32_t uid, char *username, int buffer_len, uint32_t *out_len);
+#endif
+
+#ifdef CONFIG_AUTH_HASH
+int set_login_information_hash(struct tc_ns_dev_file *hash_dev_file);
+#endif
+
+#endif
diff --git a/tzdriver/core/Kconfig b/tzdriver/core/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..1903c3e2dc8ee7f0d83637d2117614efef831a63
--- /dev/null
+++ b/tzdriver/core/Kconfig
@@ -0,0 +1,70 @@
+# Framework Configuration
+config CPU_AFF_NR
+ int "Default Cpu Affinity"
+ default 0
+ depends on TZDRIVER
+ help
+ Default Cpu Affinity
+
+config DRM_ADAPT
+ bool "Drm Feature Adapt"
+ default n
+ depends on TZDRIVER
+ help
+ Drm Feature Adapt
+
+config TA_AFFINITY
+ bool "TA affinity"
+ default n
+ depends on TZDRIVER
+ help
+ TA Cpu Affinity bind range, consistent with CONFIG_MAX_NUM_NODES in TEE
+
+config TA_AFFINITY_CPU_NUMS
+ int "TA affinity max support cpus"
+ default 8
+ depends on TA_AFFINITY
+ help
+ consistent with CONFIG_MAX_NUM_NODES in TEE
+
+config TEECD_AUTH
+ bool "Teec Daemon Path Hash Auth"
+ default n
+ depends on TZDRIVER
+ help
+ TEEOS TEECD path hash auth
+
+config TEE_AUDIT
+ bool "Audit TA"
+ default n
+ depends on AUTH_ENHANCE
+ help
+ Audit TA in case of evil TA
+
+config KERNEL_CLIENT
+ bool "Kernel Client Interface"
+ default n
+ depends on TZDRIVER
+ help
+ Kernel Client Interface
+
+config BIG_SESSION
+ bool "open more sessions"
+ default n
+ depends on TZDRIVER
+ help
+ TEEOS open more sessions
+
+config FFA_SUPPORT
+ bool "FFA Support Enable"
+ default n
+ depends on TZDRIVER
+ help
+ FFA Support Enable
+
+config THIRDPARTY_COMPATIBLE
+ bool "Compatible with OPTEE"
+ default n
+ depends on TZDRIVER
+ help
+ Compatible with OPTEE
diff --git a/tzdriver/core/Makefile b/tzdriver/core/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..6edab430d76f0c20e37d74373fa1cf19e988f134
--- /dev/null
+++ b/tzdriver/core/Makefile
@@ -0,0 +1,32 @@
+KERNEL_DIR := $(srctree)
+
+ifneq ($(TARGET_BUILD_VARIANT), user)
+ ccflags-y += -DDEF_ENG
+endif
+
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../third_party/bounds_checking_function/include
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/agent_rpmb/core
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/auth
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/tlogger
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/tui
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/ion
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/core
+EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/include
+
+ifeq ($(CONFIG_TZDRIVER_INTERNAL), y)
+ include $(KERNEL_DIR)/drivers/tzdriver/tzdriver_internal/internal.mk
+endif
+
+obj-y += teek_client_api.o
+obj-y += smc_smp.o tc_client_driver.o session_manager.o mailbox_mempool.o teek_app_load.o
+obj-y += agent.o gp_ops.o mem.o cmdmonitor.o tzdebug.o tz_spi_notify.o tz_pm.o tee_compat_check.o
+obj-y += reserved_mempool.o
+obj-y += teek_client_ext.o
+obj-y += shared_mem.o
+
+ifdef CONFIG_FFA_SUPPORT
+obj-y += ffa_abi.o
+else
+obj-y += smc_abi.o
+endif
diff --git a/tzdriver/core/agent.c b/tzdriver/core/agent.c
new file mode 100644
index 0000000000000000000000000000000000000000..57fa5d070e25954b3399505556e77d808aa0fbde
--- /dev/null
+++ b/tzdriver/core/agent.c
@@ -0,0 +1,1368 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: agent manager function, such as register and send cmd
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "agent.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE)
+#include
+#include
+#endif
+#if (KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE)
+#include
+#else
+#include
+#endif
+#include
+#include
+#ifdef CONFIG_MM_VLTMM
+#include
+#endif
+#ifdef CONFIG_MEMORY_VLTMM
+#include
+#endif
+#include "teek_client_constants.h"
+#include "teek_ns_client.h"
+#include "smc_smp.h"
+#include "mem.h"
+#include "tui.h"
+#include "tc_ns_log.h"
+#include "mailbox_mempool.h"
+#include "tc_client_driver.h"
+#include "cmdmonitor.h"
+#include "agent_rpmb.h"
+#include "ko_adapt.h"
+#include "internal_functions.h"
+#include "auth_base_impl.h"
+
+#ifdef CONFIG_CMS_CAHASH_AUTH
+#define HASH_FILE_MAX_SIZE CONFIG_HASH_FILE_SIZE
+#else
+#define HASH_FILE_MAX_SIZE (16 * 1024)
+#endif
+#define AGENT_BUFF_SIZE (4 * 1024)
+#define AGENT_MAX 32
+#define PAGE_ORDER_RATIO 2
+
+static struct list_head g_tee_agent_list;
+
+struct agent_control {
+ spinlock_t lock;
+ struct list_head agent_list;
+};
+
+static struct agent_control g_agent_control;
+
+int __attribute__((weak)) is_allowed_agent_ca(const struct ca_info *ca,
+ bool check_agent_id)
+{
+ (void)ca;
+ (void)check_agent_id;
+
+ return -EFAULT;
+}
+
+static int check_mm_struct(struct mm_struct *mm)
+{
+ if (!mm)
+ return -EINVAL;
+
+ if (!mm->exe_file) {
+ mmput(mm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_LIBLINUX
+char *get_proc_dpath(char *path, int path_len)
+{
+ int rc;
+ char cmdstring[MAX_PATH_SIZE] = { 0 };
+
+ if (!path || path_len != MAX_PATH_SIZE) {
+ tloge("bad params\n");
+ return NULL;
+ }
+
+ if (memset_s(path, path_len, '\0', MAX_PATH_SIZE) != 0) {
+ tloge("memset error\n");
+ return NULL;
+ }
+
+ rc = sprintf_s(cmdstring, MAX_PATH_SIZE, "/proc/%d/exe", current->tgid);
+ if (rc < 0) {
+ tloge("set path in get proc dpath failed\n");
+ return NULL;
+ }
+
+ if (liblinux_pal_vfs_readlink(cmdstring, path, MAX_PATH_SIZE) == 0) {
+ tloge("get CA realpath in get proc dpath failed\n");
+ return NULL;
+ }
+
+ return path;
+}
+#else
+char *get_proc_dpath(char *path, int path_len)
+{
+ char *dpath = NULL;
+ struct path base_path = {
+ .mnt = NULL,
+ .dentry = NULL
+ };
+ struct mm_struct *mm = NULL;
+ struct file *exe_file = NULL;
+
+ if (!path || path_len != MAX_PATH_SIZE) {
+ tloge("bad params\n");
+ return NULL;
+ }
+
+ if (memset_s(path, path_len, '\0', MAX_PATH_SIZE) != 0) {
+ tloge("memset error\n");
+ return NULL;
+ }
+
+ mm = get_task_mm(current);
+ if (check_mm_struct(mm) != 0) {
+ tloge("check mm_struct failed\n");
+ return NULL;
+ }
+#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
+ exe_file = mm->exe_file;
+#else
+ exe_file = get_mm_exe_file(mm);
+#endif
+ if (!exe_file) {
+ mmput(mm);
+ return NULL;
+ }
+
+ base_path = exe_file->f_path;
+ path_get(&base_path);
+ dpath = d_path(&base_path, path, MAX_PATH_SIZE);
+ path_put(&base_path);
+#if (KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE)
+ fput(exe_file);
+#endif
+ mmput(mm);
+
+ return dpath;
+}
+#endif
+
+static int get_ca_path_and_uid(struct ca_info *ca)
+{
+ char *path = NULL;
+ const struct cred *cred = NULL;
+ int message_size;
+ char *tpath = NULL;
+
+ tpath = kmalloc(MAX_PATH_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)tpath)) {
+ tloge("tpath kmalloc fail\n");
+ return -ENOMEM;
+ }
+
+ path = get_proc_dpath(tpath, MAX_PATH_SIZE);
+ if (IS_ERR_OR_NULL(path)) {
+ tloge("get process path failed\n");
+ kfree(tpath);
+ return -ENOMEM;
+ }
+
+ message_size = snprintf_s(ca->path, MAX_PATH_SIZE,
+ MAX_PATH_SIZE - 1, "%s", path);
+ if (message_size <= 0) {
+ tloge("pack path failed\n");
+ kfree(tpath);
+ return -EFAULT;
+ }
+
+ get_task_struct(current);
+ cred = koadpt_get_task_cred(current);
+ if (!cred) {
+ tloge("cred is NULL\n");
+ kfree(tpath);
+ put_task_struct(current);
+ return -EACCES;
+ }
+
+ ca->uid = cred->uid.val;
+ tlogd("ca_task->comm is %s, path is %s, ca uid is %u\n",
+ current->comm, path, cred->uid.val);
+
+ put_cred(cred);
+ put_task_struct(current);
+ kfree(tpath);
+ return 0;
+}
+
+int check_ext_agent_access(uint32_t agent_id)
+{
+ int ret;
+ struct ca_info agent_ca = { {0}, 0, 0 };
+
+ ret = get_ca_path_and_uid(&agent_ca);
+ if (ret != 0) {
+ tloge("get cp path or uid failed\n");
+ return ret;
+ }
+ agent_ca.agent_id = agent_id;
+
+ return is_allowed_agent_ca(&agent_ca, true);
+}
+
+static int get_buf_len(const uint8_t *inbuf, uint32_t *buf_len)
+{
+ if (copy_from_user(buf_len, inbuf, sizeof(*buf_len))) {
+ tloge("copy from user failed\n");
+ return -EFAULT;
+ }
+
+ if (*buf_len > HASH_FILE_MAX_SIZE) {
+ tloge("ERROR: file size[0x%x] too big\n", *buf_len);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int send_set_smc_cmd(struct mb_cmd_pack *mb_pack,
+ struct tc_ns_smc_cmd *smc_cmd, unsigned int cmd_id,
+ const uint8_t *buf_to_tee, uint32_t buf_len)
+{
+ int ret = 0;
+
+ mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT |
+ (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM);
+ mb_pack->operation.params[0].value.a =
+ (unsigned int)mailbox_virt_to_phys((uintptr_t)buf_to_tee);
+ mb_pack->operation.params[0].value.b =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)buf_to_tee) >> ADDR_TRANS_NUM;
+ mb_pack->operation.params[1].value.a = buf_len;
+ smc_cmd->cmd_type = CMD_TYPE_GLOBAL;
+ smc_cmd->cmd_id = cmd_id;
+ smc_cmd->operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
+ smc_cmd->operation_h_phys =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
+ if (tc_ns_smc(smc_cmd) != 0) {
+ ret = -EPERM;
+ tloge("set native hash failed\n");
+ }
+
+ return ret;
+}
+
+int tc_ns_set_native_hash(unsigned long arg, unsigned int cmd_id)
+{
+ int ret;
+ struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
+ uint8_t *inbuf = (uint8_t *)(uintptr_t)arg;
+ uint32_t buf_len = 0;
+ uint8_t *buf_to_tee = NULL;
+ struct mb_cmd_pack *mb_pack = NULL;
+
+ ret = check_teecd_auth();
+#ifdef CONFIG_CADAEMON_AUTH
+ if (ret != 0)
+ ret = check_cadaemon_auth();
+#endif
+ if (ret != 0) {
+ tloge("teecd or cadaemon auth failed, ret %d\n", ret);
+ return -EACCES;
+ }
+
+ if (!inbuf)
+ return -EINVAL;
+
+ if (get_buf_len(inbuf, &buf_len) != 0)
+ return -EFAULT;
+
+ buf_to_tee = mailbox_alloc(buf_len, 0);
+ if (!buf_to_tee) {
+ tloge("failed to alloc memory!\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(buf_to_tee, inbuf, buf_len)) {
+ tloge("copy from user failed\n");
+ mailbox_free(buf_to_tee);
+ return -EFAULT;
+ }
+
+ mb_pack = mailbox_alloc_cmd_pack();
+ if (!mb_pack) {
+ tloge("alloc cmd pack failed\n");
+ mailbox_free(buf_to_tee);
+ return -ENOMEM;
+ }
+
+ ret = send_set_smc_cmd(mb_pack, &smc_cmd, cmd_id, buf_to_tee, buf_len);
+ mailbox_free(buf_to_tee);
+ mailbox_free(mb_pack);
+
+ return ret;
+}
+
+int tc_ns_late_init(unsigned long arg)
+{
+ int ret = 0;
+ struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
+ uint32_t index = (uint32_t)arg; /* index is uint32, no truncate risk */
+ struct mb_cmd_pack *mb_pack = NULL;
+
+ mb_pack = mailbox_alloc_cmd_pack();
+ if (!mb_pack) {
+ tloge("alloc cmd pack failed\n");
+ return -ENOMEM;
+ }
+
+ mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT;
+ mb_pack->operation.params[0].value.a = index;
+
+ smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
+ smc_cmd.cmd_id = GLOBAL_CMD_ID_LATE_INIT;
+ smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
+ smc_cmd.operation_h_phys =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
+
+ if (tc_ns_smc(&smc_cmd)) {
+ ret = -EPERM;
+ tloge("late int failed\n");
+ }
+ mailbox_free(mb_pack);
+
+ return ret;
+}
+
+void send_event_response_single(const struct tc_ns_dev_file *dev_file)
+{
+ struct smc_event_data *event_data = NULL;
+ struct smc_event_data *tmp = NULL;
+ unsigned long flags;
+ unsigned int agent_id = 0;
+
+ if (!dev_file)
+ return;
+
+ spin_lock_irqsave(&g_agent_control.lock, flags);
+ list_for_each_entry_safe(event_data, tmp, &g_agent_control.agent_list,
+ head) {
+ if (event_data->owner == dev_file) {
+ agent_id = event_data->agent_id;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&g_agent_control.lock, flags);
+ send_event_response(agent_id);
+ return;
+}
+
+struct smc_event_data *find_event_control(unsigned int agent_id)
+{
+ struct smc_event_data *event_data = NULL;
+ struct smc_event_data *tmp_data = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&g_agent_control.lock, flags);
+ list_for_each_entry(event_data, &g_agent_control.agent_list, head) {
+ if (event_data->agent_id == agent_id) {
+ tmp_data = event_data;
+ get_agent_event(event_data);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&g_agent_control.lock, flags);
+
+ return tmp_data;
+}
+
+static void unmap_agent_buffer(struct smc_event_data *event_data)
+{
+ if (!event_data) {
+ tloge("event data is NULL\n");
+ return;
+ }
+
+ if (IS_ERR_OR_NULL(event_data->agent_buff_user))
+ return;
+
+ if (vm_munmap((unsigned long)(uintptr_t)event_data->agent_buff_user,
+ event_data->agent_buff_size) != 0)
+ tloge("unmap failed\n");
+
+ event_data->agent_buff_user = NULL;
+}
+
+static void free_event_control(unsigned int agent_id)
+{
+ struct smc_event_data *event_data = NULL;
+ struct smc_event_data *tmp_event = NULL;
+ unsigned long flags;
+ bool find = false;
+
+ spin_lock_irqsave(&g_agent_control.lock, flags);
+ list_for_each_entry_safe(event_data, tmp_event, &g_agent_control.agent_list, head) {
+ if (event_data->agent_id == agent_id) {
+ list_del(&event_data->head);
+ find = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&g_agent_control.lock, flags);
+
+ if (!find)
+ return;
+
+ unmap_agent_buffer(event_data);
+ mailbox_free(event_data->agent_buff_kernel);
+ event_data->agent_buff_kernel = NULL;
+ put_agent_event(event_data);
+}
+
+static int init_agent_context(unsigned int agent_id,
+ const struct tc_ns_smc_cmd *smc_cmd,
+ struct smc_event_data **event_data)
+{
+ *event_data = find_event_control(agent_id);
+ if (!(*event_data)) {
+ tloge("agent %u not exist\n", agent_id);
+ return -EINVAL;
+ }
+ tlogd("agent-0x%x: returning client command", agent_id);
+
+ /* store tui working device for terminate tui when device is closed. */
+ if (is_tui_agent(agent_id)) {
+ tloge("TEE_TUI_AGENT_ID: pid-%d", current->pid);
+ set_tui_caller_info(smc_cmd->dev_file_id, current->pid);
+ }
+
+ isb();
+ wmb();
+
+ return 0;
+}
+
+static int wait_agent_response(struct smc_event_data *event_data)
+{
+ int ret = 0;
+ /* only userspace CA need freeze */
+ bool need_freeze = !(current->flags & PF_KTHREAD);
+ bool sig_pending = !sigisemptyset(¤t->pending.signal);
+ bool answered = true;
+ int rc;
+
+ do {
+ answered = true;
+ /*
+ * wait_event_freezable will be interrupted by signal and
+ * freezer which is called to free a userspace task in suspend.
+ * Freezing a task means wakeup a task by fake_signal_wake_up
+ * and let it have an opportunity to enter into 'refrigerator'
+ * by try_to_freeze used in wait_event_freezable.
+ *
+ * What scenes can be freezed ?
+ * 1. CA is waiting agent -> suspend -- OK
+ * 2. suspend -> CA start agent request -- OK
+ * 3. CA is waiting agent -> CA is killed -> suspend -- NOK
+ */
+ if (need_freeze && !sig_pending) {
+ rc = wait_event_freezable(event_data->ca_pending_wq,
+ atomic_read(&event_data->ca_run));
+ if (rc != -ERESTARTSYS)
+ continue;
+ if (!sigisemptyset(¤t->pending.signal))
+ sig_pending = true;
+ tloge("agent wait event is interrupted by %s\n",
+ sig_pending ? "signal" : "freezer");
+ /*
+ * When freezing a userspace task, fake_signal_wake_up
+ * only set TIF_SIGPENDING but not set a real signal.
+ * After task thawed, CA need wait agent response again
+ * so TIF_SIGPENDING need to be cleared.
+ */
+ if (!sig_pending)
+ clear_thread_flag(TIF_SIGPENDING);
+ answered = false;
+ } else {
+ rc = wait_event_timeout(event_data->ca_pending_wq,
+ atomic_read(&event_data->ca_run),
+ (long)(RESLEEP_TIMEOUT * HZ));
+ if (rc)
+ continue;
+ tloge("agent wait event is timeout\n");
+ /* if no kill signal, just resleep before agent wake */
+ if (!sigkill_pending(current)) {
+ answered = false;
+ } else {
+ tloge("CA is killed, no need to \
+wait agent response\n");
+ event_data->ret_flag = 0;
+ ret = -EFAULT;
+ }
+ }
+ } while (!answered);
+
+ return ret;
+}
+
+int agent_process_work(const struct tc_ns_smc_cmd *smc_cmd,
+ unsigned int agent_id)
+{
+ struct smc_event_data *event_data = NULL;
+ int ret;
+
+ if (!smc_cmd) {
+ tloge("smc_cmd is null\n");
+ return -EINVAL;
+ }
+
+ if (init_agent_context(agent_id, smc_cmd, &event_data))
+ return -EINVAL;
+
+ if (atomic_read(&event_data->agent_ready) == AGENT_CRASHED) {
+ tloge("agent 0x%x is killed and restarting\n", agent_id);
+ put_agent_event(event_data);
+ return -EFAULT;
+ }
+
+ event_data->ret_flag = 1;
+ /* Wake up the agent that will process the command */
+ tlogd("agent process work: wakeup the agent");
+ wake_up(&event_data->wait_event_wq);
+ tlogd("agent 0x%x request, goto sleep, pe->run=%d\n",
+ agent_id, atomic_read(&event_data->ca_run));
+
+ ret = wait_agent_response(event_data);
+ atomic_set(&event_data->ca_run, 0);
+ put_agent_event(event_data);
+
+ /*
+ * when agent work is done, reset cmd monitor time
+ * add agent call count, cause it's a new smc cmd.
+ */
+ cmd_monitor_reset_context();
+ return ret;
+}
+
+int is_agent_alive(unsigned int agent_id)
+{
+ struct smc_event_data *event_data = NULL;
+
+ event_data = find_event_control(agent_id);
+ if (event_data) {
+ put_agent_event(event_data);
+ return AGENT_ALIVE;
+ }
+
+ return AGENT_DEAD;
+}
+
+int tc_ns_wait_event(unsigned int agent_id)
+{
+ int ret = -EINVAL;
+ struct smc_event_data *event_data = NULL;
+
+ tlogd("agent %u waits for command\n", agent_id);
+
+ event_data = find_event_control(agent_id);
+ if (event_data) {
+ /* only when agent wait event, it's in ready state to work */
+ atomic_set(&(event_data->agent_ready), AGENT_READY);
+ ret = wait_event_interruptible(event_data->wait_event_wq, event_data->ret_flag);
+ put_agent_event(event_data);
+ }
+
+ return ret;
+}
+
+int tc_ns_sync_sys_time(const struct tc_ns_client_time *tc_ns_time)
+{
+ struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
+ int ret = 0;
+ struct mb_cmd_pack *mb_pack = NULL;
+
+ if (!tc_ns_time) {
+ tloge("tc_ns_time is NULL input buffer\n");
+ return -EINVAL;
+ }
+
+ mb_pack = mailbox_alloc_cmd_pack();
+ if (!mb_pack) {
+ tloge("alloc mb pack failed\n");
+ return -ENOMEM;
+ }
+
+ mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT;
+ mb_pack->operation.params[0].value.a = tc_ns_time->seconds;
+ mb_pack->operation.params[0].value.b = tc_ns_time->millis;
+
+ smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
+ smc_cmd.cmd_id = GLOBAL_CMD_ID_ADJUST_TIME;
+ smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
+ smc_cmd.operation_h_phys =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
+ if (tc_ns_smc(&smc_cmd)) {
+ tloge("tee adjust time failed, return error\n");
+ ret = -EPERM;
+ }
+ mailbox_free(mb_pack);
+
+ return ret;
+}
+
+int sync_system_time_from_user(const struct tc_ns_client_time *user_time)
+{
+ int ret = 0;
+ struct tc_ns_client_time time = { 0 };
+
+ if (!user_time) {
+ tloge("user time is NULL input buffer\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&time, user_time, sizeof(time))) {
+ tloge("copy from user failed\n");
+ return -EFAULT;
+ }
+
+ ret = tc_ns_sync_sys_time(&time);
+ if (ret != 0)
+ tloge("sync system time from user failed, ret = 0x%x\n", ret);
+
+ return ret;
+}
+
+void sync_system_time_from_kernel(void)
+{
+ int ret = 0;
+ struct tc_ns_client_time time = { 0 };
+
+ struct time_spec kernel_time = {0};
+ get_time_spec(&kernel_time);
+
+ time.seconds = (uint32_t)kernel_time.ts.tv_sec;
+ time.millis = (uint32_t)(kernel_time.ts.tv_nsec / MS_TO_NS);
+
+ ret = tc_ns_sync_sys_time(&time);
+ if (ret != 0)
+ tloge("sync system time from kernel failed, ret = 0x%x\n", ret);
+
+ return;
+}
+
+static struct smc_event_data *check_response_access(unsigned int agent_id)
+{
+ struct smc_event_data *event_data = find_event_control(agent_id);
+
+ if (!event_data) {
+ tloge("Can't get event_data\n");
+ return NULL;
+ }
+ return event_data;
+}
+
+static void process_send_event_response(struct smc_event_data *event_data)
+{
+ if (event_data->ret_flag == 0)
+ return;
+
+ event_data->ret_flag = 0;
+ /* Send the command back to the TA session waiting for it */
+ tlogd("agent wakeup ca\n");
+ atomic_set(&event_data->ca_run, 1);
+ /* make sure reset working_ca before wakeup CA */
+ wake_up(&event_data->ca_pending_wq);
+}
+
+int tc_ns_send_event_response(unsigned int agent_id)
+{
+ struct smc_event_data *event_data = NULL;
+
+ event_data = check_response_access(agent_id);
+ if (!event_data) {
+ tlogd("agent %u pre-check failed\n", agent_id);
+ return -EINVAL;
+ }
+
+ tlogd("agent %u sends answer back\n", agent_id);
+ process_send_event_response(event_data);
+ put_agent_event(event_data);
+
+ return 0;
+}
+
+void send_event_response(unsigned int agent_id)
+{
+ struct smc_event_data *event_data = find_event_control(agent_id);
+
+ if (!event_data) {
+ tloge("Can't get event_data\n");
+ return;
+ }
+
+ tlogi("agent 0x%x sends answer back\n", agent_id);
+ atomic_set(&event_data->agent_ready, AGENT_CRASHED);
+ process_send_event_response(event_data);
+ put_agent_event(event_data);
+}
+
+static void init_restart_agent_node(struct tc_ns_dev_file *dev_file,
+ struct smc_event_data *event_data)
+{
+ tlogi("agent: 0x%x restarting\n", event_data->agent_id);
+ event_data->ret_flag = 0;
+ event_data->owner = dev_file;
+ atomic_set(&event_data->agent_ready, AGENT_REGISTERED);
+ init_waitqueue_head(&(event_data->wait_event_wq));
+ init_waitqueue_head(&(event_data->send_response_wq));
+ init_waitqueue_head(&(event_data->ca_pending_wq));
+ atomic_set(&(event_data->ca_run), 0);
+}
+
+static int create_new_agent_node(struct tc_ns_dev_file *dev_file,
+ struct smc_event_data **event_data, unsigned int agent_id,
+ void **agent_buff, uint32_t agent_buff_size)
+{
+ *agent_buff = mailbox_alloc(agent_buff_size, MB_FLAG_ZERO);
+ if (!(*agent_buff)) {
+ tloge("alloc agent buff failed\n");
+ return -ENOMEM;
+ }
+ *event_data = kzalloc(sizeof(**event_data), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)(*event_data))) {
+ mailbox_free(*agent_buff);
+ *agent_buff = NULL;
+ *event_data = NULL;
+ tloge("alloc event data failed\n");
+ return -ENOMEM;
+ }
+ (*event_data)->agent_id = agent_id;
+ (*event_data)->ret_flag = 0;
+ (*event_data)->agent_buff_kernel = *agent_buff;
+ (*event_data)->agent_buff_size = agent_buff_size;
+ (*event_data)->owner = dev_file;
+ atomic_set(&(*event_data)->agent_ready, AGENT_REGISTERED);
+ init_waitqueue_head(&(*event_data)->wait_event_wq);
+ init_waitqueue_head(&(*event_data)->send_response_wq);
+ INIT_LIST_HEAD(&(*event_data)->head);
+ init_waitqueue_head(&(*event_data)->ca_pending_wq);
+ atomic_set(&(*event_data)->ca_run, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_LIBLINUX
+static unsigned long agent_buffer_map(unsigned long buffer, uint32_t size)
+{
+ struct vm_area_struct *vma = NULL;
+ unsigned long user_addr;
+ int ret;
+
+ void *priv = NULL;
+ pgprot_t pro;
+ pro.pgprot = VM_READ | VM_WRITE;
+
+ size = PAGE_ALIGN(size);
+ if (!size)
+ return -ENOMEM;
+
+ user_addr = liblinux_pal_usermap_prepare(user_addr, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, &priv);
+ if (IS_ERR_OR_NULL((const void *)user_addr)) {
+ tloge("agent usermap prepare failed\n");
+ return user_addr;
+ }
+ liblinux_pal_usermap_finish((const void *)priv, !IS_ERR_VALUE(ret));
+
+ ret = remap_pfn_range(NULL, user_addr, buffer >> PAGE_SHIFT, size, pro);
+ if (ret) {
+ tloge("remap agent buffer failed, err=%d", ret);
+ goto err_out;
+ }
+
+ return user_addr;
+err_out:
+ if (vm_munmap(user_addr, size))
+ tloge("munmap failed\n");
+ return -EFAULT;
+}
+#else
+static unsigned long agent_buffer_map(unsigned long buffer, uint32_t size)
+{
+ struct vm_area_struct *vma = NULL;
+ unsigned long user_addr;
+ int ret;
+
+ user_addr = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, 0);
+ if (IS_ERR_VALUE((uintptr_t)user_addr)) {
+ tloge("vm mmap failed\n");
+ return user_addr;
+ }
+
+ down_read(&mm_sem_lock(current->mm));
+ vma = find_vma(current->mm, user_addr);
+ if (!vma) {
+ tloge("user_addr is not valid in vma");
+ goto err_out;
+ }
+
+ ret = remap_pfn_range(vma, user_addr, buffer >> PAGE_SHIFT, size,
+ vma->vm_page_prot);
+ if (ret != 0) {
+ tloge("remap agent buffer failed, err=%d", ret);
+ goto err_out;
+ }
+
+ up_read(&mm_sem_lock(current->mm));
+ return user_addr;
+err_out:
+ up_read(&mm_sem_lock(current->mm));
+ if (vm_munmap(user_addr, size))
+ tloge("munmap failed\n");
+ return -EFAULT;
+}
+#endif
+
+static bool is_valid_agent(unsigned int agent_id,
+ unsigned int buffer_size, bool user_agent)
+{
+ (void)agent_id;
+ if (user_agent && (buffer_size > SZ_4K)) {
+ tloge("size: %u of user agent's shared mem is invalid\n",
+ buffer_size);
+ return false;
+ }
+
+ return true;
+}
+
+static int is_agent_already_exist(unsigned int agent_id,
+ struct smc_event_data **event_data, struct tc_ns_dev_file *dev_file, bool *find_flag)
+{
+ unsigned long flags;
+ bool flag = false;
+ struct smc_event_data *agent_node = NULL;
+
+ spin_lock_irqsave(&g_agent_control.lock, flags);
+ list_for_each_entry(agent_node, &g_agent_control.agent_list, head) {
+ if (agent_node->agent_id == agent_id) {
+ if (atomic_read(&agent_node->agent_ready) != AGENT_CRASHED) {
+ tloge("no allow agent proc to reg twice\n");
+ spin_unlock_irqrestore(&g_agent_control.lock, flags);
+ return -EINVAL;
+ }
+ flag = true;
+ get_agent_event(agent_node);
+ /*
+ * We find the agent event_data aready in agent_list, it indicate agent
+ * didn't unregister normally, so the event_data will be reused.
+ */
+ init_restart_agent_node(dev_file, agent_node);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&g_agent_control.lock, flags);
+ *find_flag = flag;
+ if (flag)
+ *event_data = agent_node;
+ return 0;
+}
+
+static void add_event_node_to_list(struct smc_event_data *event_data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&g_agent_control.lock, flags);
+ list_add_tail(&event_data->head, &g_agent_control.agent_list);
+ atomic_set(&event_data->usage, 1);
+ spin_unlock_irqrestore(&g_agent_control.lock, flags);
+}
+
+static int register_agent_to_tee(unsigned int agent_id, const void *agent_buff, uint32_t agent_buff_size)
+{
+ int ret = 0;
+ struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
+ struct mb_cmd_pack *mb_pack = NULL;
+
+ mb_pack = mailbox_alloc_cmd_pack();
+ if (!mb_pack) {
+ tloge("alloc mailbox failed\n");
+ return -ENOMEM;
+ }
+
+ mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT |
+ (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM);
+ mb_pack->operation.params[0].value.a =
+ mailbox_virt_to_phys((uintptr_t)agent_buff);
+ mb_pack->operation.params[0].value.b =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)agent_buff) >> ADDR_TRANS_NUM;
+ mb_pack->operation.params[1].value.a = agent_buff_size;
+ smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
+ smc_cmd.cmd_id = GLOBAL_CMD_ID_REGISTER_AGENT;
+ smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
+ smc_cmd.operation_h_phys =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
+ smc_cmd.agent_id = agent_id;
+
+ if (tc_ns_smc(&smc_cmd)) {
+ ret = -EPERM;
+ tloge("register agent to tee failed\n");
+ }
+ mailbox_free(mb_pack);
+
+ return ret;
+}
+
+static int get_agent_buffer(struct smc_event_data *event_data,
+ bool user_agent, void **buffer)
+{
+ /* agent first start or restart, both need a remap */
+ if (user_agent) {
+ event_data->agent_buff_user =
+ (void *)(uintptr_t)agent_buffer_map(
+ mailbox_virt_to_phys((uintptr_t)event_data->agent_buff_kernel),
+ event_data->agent_buff_size);
+ if (IS_ERR(event_data->agent_buff_user)) {
+ tloge("vm map agent buffer failed\n");
+ return -EFAULT;
+ }
+ *buffer = event_data->agent_buff_user;
+ } else {
+ *buffer = event_data->agent_buff_kernel;
+ }
+
+ return 0;
+}
+
+int tc_ns_register_agent(struct tc_ns_dev_file *dev_file,
+ unsigned int agent_id, unsigned int buffer_size,
+ void **buffer, bool user_agent)
+{
+ struct smc_event_data *event_data = NULL;
+ int ret = -EINVAL;
+ bool find_flag = false;
+ void *agent_buff = NULL;
+ uint32_t size_align;
+
+ /* dev can be null */
+ if (!buffer)
+ return ret;
+
+ if (!is_valid_agent(agent_id, buffer_size, user_agent))
+ return ret;
+
+ size_align = ALIGN(buffer_size, SZ_4K);
+
+ if (is_agent_already_exist(agent_id, &event_data, dev_file, &find_flag))
+ return ret;
+ if (!find_flag) {
+ ret = create_new_agent_node(dev_file, &event_data,
+ agent_id, &agent_buff, size_align);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (get_agent_buffer(event_data, user_agent, buffer))
+ goto release_rsrc;
+
+ /* find_flag is false means it's a new agent register */
+ if (!find_flag) {
+ /*
+ * Obtain share memory which is released
+ * in tc_ns_unregister_agent
+ */
+ ret = register_agent_to_tee(agent_id, agent_buff, size_align);
+ if (ret != 0) {
+ unmap_agent_buffer(event_data);
+ goto release_rsrc;
+ }
+ add_event_node_to_list(event_data);
+ }
+ if (find_flag)
+ put_agent_event(event_data); /* match get action */
+ return 0;
+
+release_rsrc:
+ if (find_flag)
+ put_agent_event(event_data); /* match get action */
+ else
+ kfree(event_data); /* here event_data can never be NULL */
+
+ if (agent_buff)
+ mailbox_free(agent_buff);
+ return ret;
+}
+
+int tc_ns_unregister_agent(unsigned int agent_id)
+{
+ struct smc_event_data *event_data = NULL;
+ int ret = 0;
+ struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
+ struct mb_cmd_pack *mb_pack = NULL;
+
+ event_data = find_event_control(agent_id);
+ if (!event_data || !event_data->agent_buff_kernel) {
+ tloge("agent is not found or kaddr is not allocated\n");
+ return -EINVAL;
+ }
+
+ mb_pack = mailbox_alloc_cmd_pack();
+ if (!mb_pack) {
+ tloge("alloc mailbox failed\n");
+ put_agent_event(event_data);
+ return -ENOMEM;
+ }
+ mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT |
+ (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM);
+ mb_pack->operation.params[0].value.a =
+ mailbox_virt_to_phys((uintptr_t)event_data->agent_buff_kernel);
+ mb_pack->operation.params[0].value.b =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)event_data->agent_buff_kernel) >> ADDR_TRANS_NUM;
+ mb_pack->operation.params[1].value.a = SZ_4K;
+ smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
+ smc_cmd.cmd_id = GLOBAL_CMD_ID_UNREGISTER_AGENT;
+ smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
+ smc_cmd.operation_h_phys =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
+ smc_cmd.agent_id = agent_id;
+ tlogd("unregistering agent 0x%x\n", agent_id);
+
+ if (tc_ns_smc(&smc_cmd) == 0) {
+ free_event_control(agent_id);
+ } else {
+ ret = -EPERM;
+ tloge("unregister agent failed\n");
+ }
+ put_agent_event(event_data);
+ mailbox_free(mb_pack);
+ return ret;
+}
+
+bool is_system_agent(const struct tc_ns_dev_file *dev_file)
+{
+ struct smc_event_data *event_data = NULL;
+ struct smc_event_data *tmp = NULL;
+ bool system_agent = false;
+ unsigned long flags;
+
+ if (!dev_file)
+ return system_agent;
+
+ spin_lock_irqsave(&g_agent_control.lock, flags);
+ list_for_each_entry_safe(event_data, tmp, &g_agent_control.agent_list,
+ head) {
+ if (event_data->owner == dev_file) {
+ system_agent = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&g_agent_control.lock, flags);
+
+ return system_agent;
+}
+
+void send_crashed_event_response_all(const struct tc_ns_dev_file *dev_file)
+{
+ struct smc_event_data *event_data = NULL;
+ struct smc_event_data *tmp = NULL;
+ unsigned int agent_id[AGENT_MAX] = {0};
+ unsigned int i = 0;
+ unsigned long flags;
+
+ if (!dev_file)
+ return;
+
+ spin_lock_irqsave(&g_agent_control.lock, flags);
+ list_for_each_entry_safe(event_data, tmp, &g_agent_control.agent_list,
+ head) {
+ if (event_data->owner == dev_file && i < AGENT_MAX)
+ agent_id[i++] = event_data->agent_id;
+ }
+ spin_unlock_irqrestore(&g_agent_control.lock, flags);
+
+ for (i = 0; i < AGENT_MAX; i++) {
+ if (agent_id[i] != 0)
+ send_event_response(agent_id[i]);
+ }
+
+ return;
+}
+
+void tee_agent_clear_dev_owner(const struct tc_ns_dev_file *dev_file)
+{
+ struct smc_event_data *event_data = NULL;
+ struct smc_event_data *tmp = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&g_agent_control.lock, flags);
+ list_for_each_entry_safe(event_data, tmp, &g_agent_control.agent_list,
+ head) {
+ if (event_data->owner == dev_file) {
+ event_data->owner = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&g_agent_control.lock, flags);
+}
+
+
+static int def_tee_agent_work(void *instance)
+{
+ int ret = 0;
+ struct tee_agent_kernel_ops *agent_instance = NULL;
+
+ agent_instance = instance;
+ while (!kthread_should_stop()) {
+ tlogd("%s agent loop++++\n", agent_instance->agent_name);
+ ret = tc_ns_wait_event(agent_instance->agent_id);
+ if (ret != 0) {
+ tloge("%s wait event fail\n",
+ agent_instance->agent_name);
+ break;
+ }
+ if (agent_instance->tee_agent_work) {
+ ret = agent_instance->tee_agent_work(agent_instance);
+ if (ret != 0)
+ tloge("%s agent work fail\n",
+ agent_instance->agent_name);
+ }
+ ret = tc_ns_send_event_response(agent_instance->agent_id);
+ if (ret != 0) {
+ tloge("%s send event response fail\n",
+ agent_instance->agent_name);
+ break;
+ }
+ tlogd("%s agent loop----\n", agent_instance->agent_name);
+ }
+
+ return ret;
+}
+
+static int def_tee_agent_run(struct tee_agent_kernel_ops *agent_instance)
+{
+ struct tc_ns_dev_file dev = {0};
+ int ret;
+
+ /* 1. Register agent buffer to TEE */
+ ret = tc_ns_register_agent(&dev, agent_instance->agent_id,
+ agent_instance->agent_buff_size, &agent_instance->agent_buff,
+ false);
+ if (ret != 0) {
+ tloge("register agent buffer fail,ret =0x%x\n", ret);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* 2. Creat thread to run agent */
+ agent_instance->agent_thread =
+ kthread_create(def_tee_agent_work, agent_instance,
+ "agent_%s", agent_instance->agent_name);
+ if (IS_ERR_OR_NULL(agent_instance->agent_thread)) {
+ tloge("kthread create fail\n");
+ ret = PTR_ERR(agent_instance->agent_thread);
+ agent_instance->agent_thread = NULL;
+ goto out;
+ }
+ tz_kthread_bind_mask(agent_instance->agent_thread);
+ wake_up_process(agent_instance->agent_thread);
+ return 0;
+
+out:
+ return ret;
+}
+
+static int def_tee_agent_stop(struct tee_agent_kernel_ops *agent_instance)
+{
+ int ret;
+
+ if (tc_ns_send_event_response(agent_instance->agent_id) != 0)
+ tloge("failed to send response for agent %u\n",
+ agent_instance->agent_id);
+ ret = tc_ns_unregister_agent(agent_instance->agent_id);
+ if (ret != 0)
+ tloge("failed to unregister agent %u\n",
+ agent_instance->agent_id);
+ if (!IS_ERR_OR_NULL(agent_instance->agent_thread))
+ kthread_stop(agent_instance->agent_thread);
+
+ return 0;
+}
+
+static struct tee_agent_kernel_ops g_def_tee_agent_ops = {
+ .agent_name = "default",
+ .agent_id = 0,
+ .tee_agent_init = NULL,
+ .tee_agent_run = def_tee_agent_run,
+ .tee_agent_work = NULL,
+ .tee_agent_exit = NULL,
+ .tee_agent_stop = def_tee_agent_stop,
+ .tee_agent_crash_work = NULL,
+ .agent_buff_size = PAGE_SIZE,
+ .list = LIST_HEAD_INIT(g_def_tee_agent_ops.list)
+};
+
+static int tee_agent_kernel_init(void)
+{
+ struct tee_agent_kernel_ops *agent_ops = NULL;
+ int ret = 0;
+
+ list_for_each_entry(agent_ops, &g_tee_agent_list, list) {
+ /* Check the agent validity */
+ if (!agent_ops->agent_id ||
+ !agent_ops->agent_name ||
+ !agent_ops->tee_agent_work) {
+ tloge("agent is invalid\n");
+ continue;
+ }
+ tlogd("ready to init %s agent, id=0x%x\n",
+ agent_ops->agent_name, agent_ops->agent_id);
+
+ /* Set agent buff size */
+ if (!agent_ops->agent_buff_size)
+ agent_ops->agent_buff_size =
+ g_def_tee_agent_ops.agent_buff_size;
+
+ /* Initialize the agent */
+ if (agent_ops->tee_agent_init)
+ ret = agent_ops->tee_agent_init(agent_ops);
+ else if (g_def_tee_agent_ops.tee_agent_init)
+ ret = g_def_tee_agent_ops.tee_agent_init(agent_ops);
+ else
+ tlogw("agent id %u has no init function\n",
+ agent_ops->agent_id);
+ if (ret != 0) {
+ tloge("tee_agent_init %s failed\n",
+ agent_ops->agent_name);
+ continue;
+ }
+
+ /* Run the agent */
+ if (agent_ops->tee_agent_run)
+ ret = agent_ops->tee_agent_run(agent_ops);
+ else if (g_def_tee_agent_ops.tee_agent_run)
+ ret = g_def_tee_agent_ops.tee_agent_run(agent_ops);
+ else
+ tlogw("agent id %u has no run function\n",
+ agent_ops->agent_id);
+
+ if (ret != 0) {
+ tloge("tee_agent_run %s failed\n",
+ agent_ops->agent_name);
+ if (agent_ops->tee_agent_exit)
+ agent_ops->tee_agent_exit(agent_ops);
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+static void tee_agent_kernel_exit(void)
+{
+ struct tee_agent_kernel_ops *agent_ops = NULL;
+
+ list_for_each_entry(agent_ops, &g_tee_agent_list, list) {
+ /* Stop the agent */
+ if (agent_ops->tee_agent_stop)
+ agent_ops->tee_agent_stop(agent_ops);
+ else if (g_def_tee_agent_ops.tee_agent_stop)
+ g_def_tee_agent_ops.tee_agent_stop(agent_ops);
+ else
+ tlogw("agent id %u has no stop function\n",
+ agent_ops->agent_id);
+
+ /* Uninitialize the agent */
+ if (agent_ops->tee_agent_exit)
+ agent_ops->tee_agent_exit(agent_ops);
+ else if (g_def_tee_agent_ops.tee_agent_exit)
+ g_def_tee_agent_ops.tee_agent_exit(agent_ops);
+ else
+ tlogw("agent id %u has no exit function\n",
+ agent_ops->agent_id);
+ }
+}
+
+int tee_agent_clear_work(struct tc_ns_client_context *context,
+ unsigned int dev_file_id)
+{
+ struct tee_agent_kernel_ops *agent_ops = NULL;
+
+ list_for_each_entry(agent_ops, &g_tee_agent_list, list) {
+ if (agent_ops->tee_agent_crash_work)
+ agent_ops->tee_agent_crash_work(agent_ops,
+ context, dev_file_id);
+ }
+ return 0;
+}
+
+int tee_agent_kernel_register(struct tee_agent_kernel_ops *new_agent)
+{
+ if (!new_agent)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&new_agent->list);
+ list_add_tail(&new_agent->list, &g_tee_agent_list);
+
+ return 0;
+}
+
+void agent_init(void)
+{
+ spin_lock_init(&g_agent_control.lock);
+ INIT_LIST_HEAD(&g_agent_control.agent_list);
+ INIT_LIST_HEAD(&g_tee_agent_list);
+
+ rpmb_agent_register();
+#if defined(CONFIG_MM_VLTMM) || defined(CONFIG_MEMORY_VLTMM)
+ (void)vltmm_agent_register();
+#endif
+ if (tee_agent_kernel_init())
+ tloge("tee agent kernel init failed\n");
+ return;
+}
+
+void free_agent(void)
+{
+ struct smc_event_data *event_data = NULL;
+ struct smc_event_data *temp = NULL;
+ unsigned long flags;
+
+ tee_agent_kernel_exit();
+
+ spin_lock_irqsave(&g_agent_control.lock, flags);
+ list_for_each_entry_safe(event_data, temp, &g_agent_control.agent_list, head) {
+ list_del(&event_data->head);
+ unmap_agent_buffer(event_data);
+ mailbox_free(event_data->agent_buff_kernel);
+ event_data->agent_buff_kernel = NULL;
+ kfree(event_data);
+ }
+ spin_unlock_irqrestore(&g_agent_control.lock, flags);
+}
diff --git a/tzdriver/core/agent.h b/tzdriver/core/agent.h
new file mode 100644
index 0000000000000000000000000000000000000000..1b0b8c253c715a75b7962c7aa6b4b08109414e28
--- /dev/null
+++ b/tzdriver/core/agent.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: agent manager function definition, such as register and send cmd
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef AGENT_H
+#define AGENT_H
+#include
+#include "teek_ns_client.h"
+
+#define MAX_PATH_SIZE 512
+#define AGENT_FS_ID 0x46536673 /* FSfs */
+#define AGENT_MISC_ID 0x4d495343 /* MISC */
+
+#ifdef CONFIG_RPMB_AGENT
+#define TEE_RPMB_AGENT_ID 0x4abe6198 /* RPMB */
+#endif
+
+#define AGENT_SOCKET_ID 0x69e85664 /* socket */
+#define SECFILE_LOAD_AGENT_ID 0x4c4f4144 /* SECFILE-LOAD-AGENT */
+#define TEE_SECE_AGENT_ID 0x53656345 /* npu agent id */
+#define TEE_FACE_AGENT1_ID 0x46616365 /* face agent id */
+#define TEE_FACE_AGENT2_ID 0x46616345 /* face agent id */
+#define TEE_VLTMM_AGENT_ID 0x564c544d /* vltmm agent id */
+#define SYSTEM_UID 1000
+#define MS_TO_NS 1000000
+
+enum agent_state_type {
+ AGENT_CRASHED = 0,
+ AGENT_REGISTERED,
+ AGENT_READY,
+};
+
+enum agent_status {
+ AGENT_ALIVE = 1,
+ AGENT_DEAD = 0,
+};
+
+/* for secure agent */
+struct smc_event_data {
+ unsigned int agent_id;
+ atomic_t agent_ready;
+ wait_queue_head_t wait_event_wq;
+ int ret_flag; /* indicate whether agent is returned from TEE */
+ wait_queue_head_t send_response_wq;
+ struct list_head head;
+ struct tc_ns_smc_cmd cmd;
+ struct tc_ns_dev_file *owner;
+ void *agent_buff_kernel;
+ void *agent_buff_user; /* used for unmap */
+ unsigned int agent_buff_size;
+ atomic_t usage;
+ wait_queue_head_t ca_pending_wq;
+ /* indicate whether agent is allowed to return to TEE */
+ atomic_t ca_run;
+};
+
+struct tee_agent_kernel_ops {
+ const char *agent_name;
+ unsigned int agent_id;
+ int (*tee_agent_init)(struct tee_agent_kernel_ops *agent_instance);
+ int (*tee_agent_run)(struct tee_agent_kernel_ops *agent_instance);
+ int (*tee_agent_work)(struct tee_agent_kernel_ops *agent_instance);
+ int (*tee_agent_stop)(struct tee_agent_kernel_ops *agent_instance);
+ int (*tee_agent_exit)(struct tee_agent_kernel_ops *agent_instance);
+ int (*tee_agent_crash_work)(
+ struct tee_agent_kernel_ops *agent_instance,
+ struct tc_ns_client_context *context,
+ unsigned int dev_file_id);
+ struct task_struct *agent_thread;
+ void *agent_data;
+ void *agent_buff;
+ unsigned int agent_buff_size;
+ struct list_head list;
+};
+
+struct ca_info {
+ char path[MAX_PATH_SIZE];
+ uint32_t uid;
+ uint32_t agent_id;
+};
+
+static inline void get_agent_event(struct smc_event_data *event_data)
+{
+ if (event_data)
+ atomic_inc(&event_data->usage);
+}
+
+static inline void put_agent_event(struct smc_event_data *event_data)
+{
+ if (event_data) {
+ if (atomic_dec_and_test(&event_data->usage))
+ kfree(event_data);
+ }
+}
+
+int is_allowed_agent_ca(const struct ca_info *ca,
+ bool check_agent_id);
+void agent_init(void);
+void free_agent(void);
+struct smc_event_data *find_event_control(unsigned int agent_id);
+void send_event_response(unsigned int agent_id);
+int agent_process_work(const struct tc_ns_smc_cmd *smc_cmd, unsigned int agent_id);
+int is_agent_alive(unsigned int agent_id);
+int tc_ns_set_native_hash(unsigned long arg, unsigned int cmd_id);
+int tc_ns_late_init(unsigned long arg);
+int tc_ns_register_agent(struct tc_ns_dev_file *dev_file, unsigned int agent_id,
+ unsigned int buffer_size, void **buffer, bool user_agent);
+int tc_ns_unregister_agent(unsigned int agent_id);
+void send_crashed_event_response_all(const struct tc_ns_dev_file *dev_file);
+int tc_ns_wait_event(unsigned int agent_id);
+int tc_ns_send_event_response(unsigned int agent_id);
+void send_event_response_single(const struct tc_ns_dev_file *dev_file);
+int sync_system_time_from_user(const struct tc_ns_client_time *user_time);
+void sync_system_time_from_kernel(void);
+int tee_agent_clear_work(struct tc_ns_client_context *context,
+ unsigned int dev_file_id);
+int tee_agent_kernel_register(struct tee_agent_kernel_ops *new_agent);
+bool is_system_agent(const struct tc_ns_dev_file *dev_file);
+void tee_agent_clear_dev_owner(const struct tc_ns_dev_file *dev_file);
+char *get_proc_dpath(char *path, int path_len);
+int check_ext_agent_access(uint32_t agent_id);
+
+#endif
diff --git a/tzdriver/core/cmdmonitor.c b/tzdriver/core/cmdmonitor.c
new file mode 100644
index 0000000000000000000000000000000000000000..b7d7b2897442b4a4541f7c4c1afb6f7091c5c6a2
--- /dev/null
+++ b/tzdriver/core/cmdmonitor.c
@@ -0,0 +1,613 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: cmdmonitor function, monitor every cmd which is sent to TEE.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "cmdmonitor.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE)
+#include
+#endif
+
+#ifdef CONFIG_TEE_LOG_EXCEPTION
+#include
+#define IMONITOR_TA_CRASH_EVENT_ID 901002003
+#define IMONITOR_MEMSTAT_EVENT_ID 940007001
+#define IMONITOR_TAMEMSTAT_EVENT_ID 940007002
+#endif
+
+#include "tc_ns_log.h"
+#include "smc_smp.h"
+#include "internal_functions.h"
+#include "mailbox_mempool.h"
+#include "tlogger.h"
+#include "log_cfg_api.h"
+#include "tui.h"
+
+static int g_cmd_need_archivelog;
+static LIST_HEAD(g_cmd_monitor_list);
+static int g_cmd_monitor_list_size;
+/* report 2 hours */
+static const long long g_memstat_report_freq = 2 * 60 * 60 * 1000;
+#define MAX_CMD_MONITOR_LIST 200
+#define MAX_AGENT_CALL_COUNT 5000
+static DEFINE_MUTEX(g_cmd_monitor_lock);
+
+/* independent wq to avoid block system_wq */
+static struct workqueue_struct *g_cmd_monitor_wq;
+static struct delayed_work g_cmd_monitor_work;
+static struct delayed_work g_cmd_monitor_work_archive;
+static struct delayed_work g_mem_stat;
+static int g_tee_detect_ta_crash;
+static struct tc_uuid g_crashed_ta_uuid;
+
+void get_time_spec(struct time_spec *time)
+{
+ if (!time)
+ return;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+ time->ts = current_kernel_time();
+#else
+ ktime_get_coarse_ts64(&time->ts);
+#endif
+}
+
+static void schedule_memstat_work(struct delayed_work *work,
+ unsigned long delay)
+{
+ schedule_delayed_work(work, delay);
+}
+
+static void schedule_cmd_monitor_work(struct delayed_work *work,
+ unsigned long delay)
+{
+ if (g_cmd_monitor_wq)
+ queue_delayed_work(g_cmd_monitor_wq, work, delay);
+ else
+ schedule_delayed_work(work, delay);
+}
+
+void tzdebug_memstat(void)
+{
+ schedule_memstat_work(&g_mem_stat, usecs_to_jiffies(S_TO_MS));
+}
+
+void tzdebug_archivelog(void)
+{
+ schedule_cmd_monitor_work(&g_cmd_monitor_work_archive,
+ usecs_to_jiffies(0));
+}
+
+void cmd_monitor_ta_crash(int32_t type, const uint8_t *ta_uuid, uint32_t uuid_len)
+{
+ g_tee_detect_ta_crash = type;
+ if (g_tee_detect_ta_crash != TYPE_CRASH_TEE &&
+ ta_uuid != NULL && uuid_len == sizeof(struct tc_uuid))
+ (void)memcpy_s(&g_crashed_ta_uuid, sizeof(g_crashed_ta_uuid),
+ ta_uuid, uuid_len);
+ tzdebug_archivelog();
+ fault_monitor_start(type);
+}
+
+static int get_pid_name(pid_t pid, char *comm, size_t size)
+{
+ struct task_struct *task = NULL;
+ int sret;
+
+ if (size <= TASK_COMM_LEN - 1 || !comm)
+ return -1;
+
+ rcu_read_lock();
+
+#ifndef CONFIG_TZDRIVER_MODULE
+ task = find_task_by_vpid(pid);
+#else
+ task = pid_task(find_vpid(pid), PIDTYPE_PID);
+#endif
+ if (task)
+ get_task_struct(task);
+ rcu_read_unlock();
+ if (!task) {
+ tloge("get task failed\n");
+ return -1;
+ }
+
+ sret = strncpy_s(comm, size, task->comm, strlen(task->comm));
+ if (sret != 0)
+ tloge("strncpy failed: errno = %d\n", sret);
+ put_task_struct(task);
+
+ return sret;
+}
+
+bool is_thread_reported(pid_t tid)
+{
+ bool ret = false;
+ struct cmd_monitor *monitor = NULL;
+
+ mutex_lock(&g_cmd_monitor_lock);
+ list_for_each_entry(monitor, &g_cmd_monitor_list, list) {
+ if (monitor->tid == tid && !is_tui_in_use(monitor->tid)) {
+ ret = (monitor->is_reported ||
+ monitor->agent_call_count >
+ MAX_AGENT_CALL_COUNT);
+ break;
+ }
+ }
+ mutex_unlock(&g_cmd_monitor_lock);
+ return ret;
+}
+
+#ifdef CONFIG_TEE_LOG_EXCEPTION
+#define FAIL_RET (-1)
+#define SUCC_RET 0
+
+static int send_memstat_packet(const struct tee_mem *meminfo)
+{
+ struct imonitor_eventobj *memstat = NULL;
+ uint32_t result = 0;
+ struct time_spec nowtime;
+ int ret;
+ get_time_spec(&nowtime);
+
+ memstat = imonitor_create_eventobj(IMONITOR_MEMSTAT_EVENT_ID);
+ if (!memstat) {
+ tloge("create eventobj failed\n");
+ return FAIL_RET;
+ }
+
+ result |= (uint32_t)imonitor_set_param_integer_v2(memstat, "totalmem", meminfo->total_mem);
+ result |= (uint32_t)imonitor_set_param_integer_v2(memstat, "mem", meminfo->pmem);
+ result |= (uint32_t)imonitor_set_param_integer_v2(memstat, "freemem", meminfo->free_mem);
+ result |= (uint32_t)imonitor_set_param_integer_v2(memstat, "freememmin", meminfo->free_mem_min);
+ result |= (uint32_t)imonitor_set_param_integer_v2(memstat, "tanum", meminfo->ta_num);
+ result |= (uint32_t)imonitor_set_time(memstat, nowtime.ts.tv_sec);
+ if (result) {
+ tloge("set param integer1 failed ret=%u\n", result);
+ imonitor_destroy_eventobj(memstat);
+ return FAIL_RET;
+ }
+
+ ret = imonitor_send_event(memstat);
+ imonitor_destroy_eventobj(memstat);
+ if (ret <= 0) {
+ tloge("imonitor send memstat packet failed\n");
+ return FAIL_RET;
+ }
+ return SUCC_RET;
+}
+
+void report_imonitor(const struct tee_mem *meminfo)
+{
+ int ret;
+ uint32_t result = 0;
+ uint32_t i;
+ struct imonitor_eventobj *pamemobj = NULL;
+ struct time_spec nowtime;
+ get_time_spec(&nowtime);
+
+ if (!meminfo)
+ return;
+
+ if (meminfo->ta_num > MEMINFO_TA_MAX)
+ return;
+
+ if (send_memstat_packet(meminfo))
+ return;
+
+ for (i = 0; i < meminfo->ta_num; i++) {
+ pamemobj = imonitor_create_eventobj(IMONITOR_TAMEMSTAT_EVENT_ID);
+ if (!pamemobj) {
+ tloge("create obj failed\n");
+ break;
+ }
+
+ result |= (uint32_t)imonitor_set_param_string_v2(pamemobj, "NAME", meminfo->ta_mem_info[i].ta_name);
+ result |= (uint32_t)imonitor_set_param_integer_v2(pamemobj, "MEM", meminfo->ta_mem_info[i].pmem);
+ result |= (uint32_t)imonitor_set_param_integer_v2(pamemobj, "MEMMAX", meminfo->ta_mem_info[i].pmem_max);
+ result |= (uint32_t)imonitor_set_param_integer_v2(pamemobj, "MEMLIMIT", meminfo->ta_mem_info[i].pmem_limit);
+ result |= (uint32_t)imonitor_set_time(pamemobj, nowtime.ts.tv_sec);
+ if (result) {
+ tloge("set param integer2 failed ret=%d\n", result);
+ imonitor_destroy_eventobj(pamemobj);
+ return;
+ }
+ ret = imonitor_send_event(pamemobj);
+ imonitor_destroy_eventobj(pamemobj);
+ if (ret <= 0) {
+ tloge("imonitor send pamem packet failed\n");
+ break;
+ }
+ }
+}
+#endif
+
+static void memstat_report(void)
+{
+ int ret;
+ struct tee_mem *meminfo = NULL;
+
+ meminfo = mailbox_alloc(sizeof(*meminfo), MB_FLAG_ZERO);
+ if (!meminfo) {
+ tloge("mailbox alloc failed\n");
+ return;
+ }
+
+ ret = get_tee_meminfo(meminfo);
+#ifdef CONFIG_TEE_LOG_EXCEPTION
+ if (ret == 0) {
+ tlogd("report imonitor\n");
+ report_imonitor(meminfo);
+ }
+#endif
+ if (ret != 0)
+ tlogd("get meminfo failed\n");
+
+ mailbox_free(meminfo);
+}
+
+static void memstat_work(struct work_struct *work)
+{
+ (void)(work);
+ memstat_report();
+}
+
+void cmd_monitor_reset_context(void)
+{
+ struct cmd_monitor *monitor = NULL;
+ pid_t pid = current->tgid;
+ pid_t tid = current->pid;
+
+ mutex_lock(&g_cmd_monitor_lock);
+ list_for_each_entry(monitor, &g_cmd_monitor_list, list) {
+ if (monitor->pid == pid && monitor->tid == tid) {
+ get_time_spec(&monitor->sendtime);
+ if (monitor->agent_call_count + 1 < 0)
+ tloge("agent call count add overflow\n");
+ else
+ monitor->agent_call_count++;
+ break;
+ }
+ }
+ mutex_unlock(&g_cmd_monitor_lock);
+}
+
+#ifdef CONFIG_TEE_LOG_EXCEPTION
+static struct time_spec g_memstat_check_time;
+static bool g_after_loader = false;
+
+static void auto_report_memstat(void)
+{
+ long long timedif;
+ struct time_spec nowtime;
+ get_time_spec(&nowtime);
+
+ /*
+ * get time value D (timedif=nowtime-sendtime),
+ * we do not care about overflow
+ * 1 year means 1000 * (60*60*24*365) = 0x757B12C00
+ * only 5bytes, will not overflow
+ */
+ timedif = S_TO_MS * (nowtime.ts.tv_sec - g_memstat_check_time.ts.tv_sec) +
+ (nowtime.ts.tv_nsec - g_memstat_check_time.ts.tv_nsec) / S_TO_US;
+ if (timedif > g_memstat_report_freq && g_after_loader) {
+ tlogi("cmdmonitor auto report memstat\n");
+ memstat_report();
+ g_memstat_check_time = nowtime;
+ }
+
+ if (!g_after_loader) {
+ g_memstat_check_time = nowtime;
+ g_after_loader = true;
+ }
+}
+#endif
+
+/*
+ * if one session timeout, monitor will print timedifs every step[n] in seconds,
+ * if lasted more then 360s, monitor will print timedifs every 360s.
+ */
+const int32_t g_timer_step[] = {1, 1, 1, 2, 5, 10, 40, 120, 180, 360};
+const int32_t g_timer_nums = sizeof(g_timer_step) / sizeof(int32_t);
+static void show_timeout_cmd_info(struct cmd_monitor *monitor)
+{
+ long long timedif, timedif2;
+ struct time_spec nowtime;
+ int32_t time_in_sec;
+ get_time_spec(&nowtime);
+
+ /*
+ * 1 year means 1000 * (60*60*24*365) = 0x757B12C00
+ * only 5bytes, so timedif (timedif=nowtime-sendtime) will not overflow
+ */
+ timedif = S_TO_MS * (nowtime.ts.tv_sec - monitor->sendtime.ts.tv_sec) +
+ (nowtime.ts.tv_nsec - monitor->sendtime.ts.tv_nsec) / S_TO_US;
+
+ /* timeout to 10s, we log the teeos log, and report */
+ if ((timedif > CMD_MAX_EXECUTE_TIME * S_TO_MS) && (!monitor->is_reported)) {
+ monitor->is_reported = true;
+ tloge("[cmd_monitor_tick] pid=%d,pname=%s,tid=%d, "
+ "tname=%s, lastcmdid=%u, agent call count:%d, "
+ "running with timedif=%lld ms and report\n",
+ monitor->pid, monitor->pname, monitor->tid,
+ monitor->tname, monitor->lastcmdid,
+ monitor->agent_call_count, timedif);
+ /* threads out of white table need info dump */
+ tloge("monitor: pid-%d", monitor->pid);
+ if (!is_tui_in_use(monitor->tid)) {
+ show_cmd_bitmap();
+ g_cmd_need_archivelog = 1;
+ wakeup_tc_siq(SIQ_DUMP_TIMEOUT);
+ }
+ }
+
+ timedif2 = S_TO_MS * (nowtime.ts.tv_sec - monitor->lasttime.ts.tv_sec) +
+ (nowtime.ts.tv_nsec - monitor->lasttime.ts.tv_nsec) / S_TO_US;
+ time_in_sec = monitor->timer_index >= g_timer_nums ?
+ g_timer_step[g_timer_nums - 1] : g_timer_step[monitor->timer_index];
+ if (timedif2 > time_in_sec * S_TO_MS) {
+ monitor->lasttime = nowtime;
+ monitor->timer_index = monitor->timer_index >= (int32_t)sizeof(g_timer_step) ?
+ (int32_t)sizeof(g_timer_step) : (monitor->timer_index + 1);
+ tlogi("[cmd_monitor_tick] pid=%d,pname=%s,tid=%d, "
+ "lastcmdid=%u,agent call count:%d,timedif=%lld ms\n",
+ monitor->pid, monitor->pname, monitor->tid,
+ monitor->lastcmdid, monitor->agent_call_count,
+ timedif);
+ }
+}
+
+static void cmd_monitor_tick(void)
+{
+ struct cmd_monitor *monitor = NULL;
+ struct cmd_monitor *tmp = NULL;
+
+ mutex_lock(&g_cmd_monitor_lock);
+ list_for_each_entry_safe(monitor, tmp, &g_cmd_monitor_list, list) {
+ if (monitor->returned) {
+ g_cmd_monitor_list_size--;
+ tlogd("[cmd_monitor_tick] pid=%d,pname=%s,tid=%d, "
+ "tname=%s,lastcmdid=%u,count=%d,agent call count=%d, "
+ "timetotal=%lld us returned, remained command(s)=%d\n",
+ monitor->pid, monitor->pname, monitor->tid, monitor->tname,
+ monitor->lastcmdid, monitor->count, monitor->agent_call_count,
+ monitor->timetotal, g_cmd_monitor_list_size);
+ list_del(&monitor->list);
+ kfree(monitor);
+ continue;
+ }
+ show_timeout_cmd_info(monitor);
+ }
+
+ /* if have cmd in monitor list, we need tick */
+ if (g_cmd_monitor_list_size > 0)
+ schedule_cmd_monitor_work(&g_cmd_monitor_work, usecs_to_jiffies(S_TO_US));
+ mutex_unlock(&g_cmd_monitor_lock);
+#ifdef CONFIG_TEE_LOG_EXCEPTION
+ auto_report_memstat();
+#endif
+}
+
+static void cmd_monitor_tickfn(struct work_struct *work)
+{
+ (void)(work);
+ cmd_monitor_tick();
+ /* check tlogcat if have new log */
+ tz_log_write();
+}
+
+#define MAX_CRASH_INFO_LEN 100
+static void cmd_monitor_archivefn(struct work_struct *work)
+{
+ (void)(work);
+
+ if (tlogger_store_msg(CONFIG_TEE_LOG_ACHIVE_PATH,
+ sizeof(CONFIG_TEE_LOG_ACHIVE_PATH)) < 0)
+ tloge("[cmd_monitor_tick]tlogger store lastmsg failed\n");
+
+ if (g_tee_detect_ta_crash == TYPE_CRASH_TEE) {
+ tloge("detect teeos crash, panic\n");
+ report_log_system_panic();
+ } else if (g_tee_detect_ta_crash == TYPE_CRASH_TA ||
+ g_tee_detect_ta_crash == TYPE_KILLED_TA) {
+#ifdef CONFIG_TEE_LOG_EXCEPTION
+ const char crash_prefix[] = "ta crash: ";
+ const char killed_prefix[] = "ta timeout and killed: ";
+ const char crash_info_get_failed[] = "ta crash: get uuid failed";
+ char buffer[MAX_CRASH_INFO_LEN] = {0};
+ const char *crash_info = buffer;
+ int ret = snprintf_s(buffer, sizeof(buffer), sizeof(buffer) - 1,
+ "%s%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+ (g_tee_detect_ta_crash == TYPE_CRASH_TA) ? crash_prefix : killed_prefix,
+ g_crashed_ta_uuid.time_low, g_crashed_ta_uuid.time_mid,
+ g_crashed_ta_uuid.timehi_and_version,
+ g_crashed_ta_uuid.clockseq_and_node[0], g_crashed_ta_uuid.clockseq_and_node[1],
+ g_crashed_ta_uuid.clockseq_and_node[2], g_crashed_ta_uuid.clockseq_and_node[3],
+ g_crashed_ta_uuid.clockseq_and_node[4], g_crashed_ta_uuid.clockseq_and_node[5],
+ g_crashed_ta_uuid.clockseq_and_node[6], g_crashed_ta_uuid.clockseq_and_node[7]);
+ if (ret <= 0) {
+ tlogw("append crash info failed\n");
+ crash_info = crash_info_get_failed;
+ }
+ if (teeos_log_exception_archive(IMONITOR_TA_CRASH_EVENT_ID, crash_info) < 0)
+ tloge("log exception archive failed\n");
+ (void)memset_s(&g_crashed_ta_uuid, sizeof(g_crashed_ta_uuid), 0, sizeof(g_crashed_ta_uuid));
+#endif
+ }
+
+ g_tee_detect_ta_crash = 0;
+}
+
+static struct cmd_monitor *init_monitor_locked(void)
+{
+ struct cmd_monitor *newitem = NULL;
+
+ newitem = kzalloc(sizeof(*newitem), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)newitem)) {
+ tloge("[cmd_monitor_tick]kzalloc failed\n");
+ return NULL;
+ }
+
+ get_time_spec(&newitem->sendtime);
+ newitem->lasttime = newitem->sendtime;
+ newitem->timer_index = 0;
+ newitem->count = 1;
+ newitem->agent_call_count = 0;
+ newitem->returned = false;
+ newitem->is_reported = false;
+ newitem->pid = current->tgid;
+ newitem->tid = current->pid;
+ if (get_pid_name(newitem->pid, newitem->pname,
+ sizeof(newitem->pname)) != 0)
+ newitem->pname[0] = '\0';
+ if (get_pid_name(newitem->tid, newitem->tname,
+ sizeof(newitem->tname)) != 0)
+ newitem->tname[0] = '\0';
+ INIT_LIST_HEAD(&newitem->list);
+ list_add_tail(&newitem->list, &g_cmd_monitor_list);
+ g_cmd_monitor_list_size++;
+ return newitem;
+}
+
+struct cmd_monitor *cmd_monitor_log(const struct tc_ns_smc_cmd *cmd)
+{
+ bool found_flag = false;
+ pid_t pid;
+ pid_t tid;
+ struct cmd_monitor *monitor = NULL;
+
+ if (!cmd)
+ return NULL;
+
+ pid = current->tgid;
+ tid = current->pid;
+ mutex_lock(&g_cmd_monitor_lock);
+ do {
+ list_for_each_entry(monitor, &g_cmd_monitor_list, list) {
+ if (monitor->pid == pid && monitor->tid == tid) {
+ found_flag = true;
+ /* restart */
+ get_time_spec(&monitor->sendtime);
+ monitor->lasttime = monitor->sendtime;
+ monitor->timer_index = 0;
+ monitor->count++;
+ monitor->returned = false;
+ monitor->is_reported = false;
+ monitor->lastcmdid = cmd->cmd_id;
+ monitor->agent_call_count = 0;
+ monitor->timetotal = 0;
+ break;
+ }
+ }
+
+ if (!found_flag) {
+#ifndef CONFIG_BIG_SESSION
+ if (g_cmd_monitor_list_size > MAX_CMD_MONITOR_LIST - 1) {
+ tloge("monitor reach max node num\n");
+ monitor = NULL;
+ break;
+ }
+#endif
+ monitor = init_monitor_locked();
+ if (!monitor) {
+ tloge("init monitor failed\n");
+ break;
+ }
+ monitor->lastcmdid = cmd->cmd_id;
+ /* the first cmd will cause timer */
+ if (g_cmd_monitor_list_size == 1)
+ schedule_cmd_monitor_work(&g_cmd_monitor_work,
+ usecs_to_jiffies(S_TO_US));
+ }
+ } while (0);
+ mutex_unlock(&g_cmd_monitor_lock);
+
+ return monitor;
+}
+
+void cmd_monitor_logend(struct cmd_monitor *item)
+{
+ struct time_spec nowtime;
+ long long timedif;
+
+ if (!item)
+ return;
+
+ get_time_spec(&nowtime);
+ /*
+ * get time value D (timedif=nowtime-sendtime),
+ * we do not care about overflow
+ * 1 year means 1000000 * (60*60*24*365) = 0x1CAE8C13E000
+ * only 6bytes, will not overflow
+ */
+ timedif = S_TO_US * (nowtime.ts.tv_sec - item->sendtime.ts.tv_sec) +
+ (nowtime.ts.tv_nsec - item->sendtime.ts.tv_nsec) / S_TO_MS;
+ item->timetotal += timedif;
+ item->returned = true;
+}
+
+void do_cmd_need_archivelog(void)
+{
+ if (g_cmd_need_archivelog == 1) {
+ g_cmd_need_archivelog = 0;
+ schedule_cmd_monitor_work(&g_cmd_monitor_work_archive,
+ usecs_to_jiffies(S_TO_US));
+ }
+}
+
+void init_cmd_monitor(void)
+{
+ g_cmd_monitor_wq = alloc_workqueue("tz_cmd_monitor_wq",
+ WQ_UNBOUND, TZ_WQ_MAX_ACTIVE);
+ if (!g_cmd_monitor_wq)
+ tloge("alloc cmd monitor wq failed\n");
+ else
+ tz_workqueue_bind_mask(g_cmd_monitor_wq, 0);
+
+ INIT_DEFERRABLE_WORK((struct delayed_work *)
+ (uintptr_t)&g_cmd_monitor_work, cmd_monitor_tickfn);
+ INIT_DEFERRABLE_WORK((struct delayed_work *)
+ (uintptr_t)&g_cmd_monitor_work_archive, cmd_monitor_archivefn);
+ INIT_DEFERRABLE_WORK((struct delayed_work *)
+ (uintptr_t)&g_mem_stat, memstat_work);
+}
+
+void free_cmd_monitor(void)
+{
+ struct cmd_monitor *monitor = NULL;
+ struct cmd_monitor *tmp = NULL;
+
+ mutex_lock(&g_cmd_monitor_lock);
+ list_for_each_entry_safe(monitor, tmp, &g_cmd_monitor_list, list) {
+ list_del(&monitor->list);
+ kfree(monitor);
+ }
+ mutex_unlock(&g_cmd_monitor_lock);
+
+ flush_delayed_work(&g_cmd_monitor_work);
+ flush_delayed_work(&g_cmd_monitor_work_archive);
+ flush_delayed_work(&g_mem_stat);
+ if (g_cmd_monitor_wq) {
+ flush_workqueue(g_cmd_monitor_wq);
+ destroy_workqueue(g_cmd_monitor_wq);
+ g_cmd_monitor_wq = NULL;
+ }
+}
diff --git a/tzdriver/core/cmdmonitor.h b/tzdriver/core/cmdmonitor.h
new file mode 100644
index 0000000000000000000000000000000000000000..cab0bfc631ce1c291e17ab4bcb308a2106cd072c
--- /dev/null
+++ b/tzdriver/core/cmdmonitor.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: cmdmonitor function declaration
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef CMD_MONITOR_H
+#define CMD_MONITOR_H
+
+#include "tzdebug.h"
+#include "teek_ns_client.h"
+#include "smc_smp.h"
+#include
+
+#if (KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE)
+#define TASK_COMM_LEN 16
+#endif
+
+enum {
+ TYPE_CRASH_TEE = 1,
+ TYPE_CRASH_TA = 2,
+ TYPE_KILLED_TA = 3,
+};
+
+/*
+ * when cmd execute more than 25s in tee,
+ * it will be terminated when CA is killed
+ */
+#define CMD_MAX_EXECUTE_TIME 10U
+#define S_TO_MS 1000
+#define S_TO_US 1000000
+
+struct time_spec {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+ struct timespec ts;
+#else
+ struct timespec64 ts;
+#endif
+};
+
+struct cmd_monitor {
+ struct list_head list;
+ struct time_spec sendtime;
+ struct time_spec lasttime;
+ int32_t timer_index;
+ int count;
+ bool returned;
+ bool is_reported;
+ pid_t pid;
+ pid_t tid;
+ char pname[TASK_COMM_LEN];
+ char tname[TASK_COMM_LEN];
+ unsigned int lastcmdid;
+ long long timetotal;
+ int agent_call_count;
+};
+
+struct cmd_monitor *cmd_monitor_log(const struct tc_ns_smc_cmd *cmd);
+void cmd_monitor_reset_context(void);
+void cmd_monitor_logend(struct cmd_monitor *item);
+void init_cmd_monitor(void);
+void free_cmd_monitor(void);
+void do_cmd_need_archivelog(void);
+bool is_thread_reported(pid_t tid);
+void tzdebug_archivelog(void);
+void cmd_monitor_ta_crash(int32_t type, const uint8_t *ta_uuid, uint32_t uuid_len);
+void tzdebug_memstat(void);
+void get_time_spec(struct time_spec *time);
+#endif
diff --git a/tzdriver/core/ffa_abi.c b/tzdriver/core/ffa_abi.c
new file mode 100644
index 0000000000000000000000000000000000000000..ebe6c631be877915f14bd5acc6edc664ae736711
--- /dev/null
+++ b/tzdriver/core/ffa_abi.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: functions for ffa settings
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include
+#include "ffa_abi.h"
+#include "teek_ns_client.h"
+#include "tz_pm.h"
+#include "smc_call.h"
+
+const struct ffa_ops *g_ffa_ops = NULL;
+struct ffa_device *g_ffa_dev = NULL;
+
+static void ffa_remove(struct ffa_device *ffa_dev)
+{
+ tlogd("stub remove ffa driver!\n");
+}
+
+static int ffa_probe(struct ffa_device *ffa_dev)
+{
+ g_ffa_ops = ffa_dev->ops;
+ g_ffa_dev = ffa_dev;
+ if (!g_ffa_ops) {
+ tloge("failed to get ffa_ops!\n");
+ return -ENOENT;
+ }
+
+ g_ffa_ops->mode_32bit_set(ffa_dev);
+ return 0;
+}
+
+/* two sp uuid can be the same */
+const struct ffa_device_id tz_ffa_device_id[] = {
+ /* uuid = <0xe0786148 0xe311f8e7 0x02005ebc 0x1bc5d5a5> */
+ {0x48, 0x61, 0x78, 0xe0, 0xe7, 0xf8, 0x11, 0xe3, 0xbc, 0x5e, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b},
+ {}
+};
+
+static struct ffa_driver tz_ffa_driver = {
+ .name = "iTrustee",
+ .probe = ffa_probe,
+ .remove = ffa_remove,
+ .id_table = tz_ffa_device_id,
+};
+
+int ffa_abi_register(void)
+{
+ return ffa_register(&tz_ffa_driver);
+}
+
+void ffa_abi_unregister(void)
+{
+ ffa_unregister(&tz_ffa_driver);
+}
+
+void smc_req(struct smc_in_params *in, struct smc_out_params *out, uint8_t wait)
+{
+ ffa_forward_call(in, out, wait);
+}
+
+static void convert_smc_param_to_ffa_param(struct smc_in_params *in_param, struct ffa_send_direct_data *ffa_param)
+{
+ ffa_param->data0 = in_param->x1;
+ ffa_param->data1 = in_param->x2;
+ ffa_param->data2 = in_param->x3;
+ ffa_param->data3 = in_param->x4;
+ /* x0(smc id) need to be transported for tee dealing it directly */
+ ffa_param->data4 = in_param->x0;
+}
+
+static void convert_ffa_param_to_smc_param(struct ffa_send_direct_data *ffa_param, struct smc_out_params *out_param)
+{
+ out_param->ret = ffa_param->data4;
+ out_param->exit_reason = ffa_param->data0;
+ out_param->ta = ffa_param->data1;
+ out_param->target = ffa_param->data2;
+}
+
+int ffa_forward_call(struct smc_in_params *in_param, struct smc_out_params *out_param, uint8_t wait)
+{
+ if (in_param == NULL || out_param == NULL) {
+ tloge("invalid parameter ffa forward!\n");
+ return -1;
+ }
+
+ int ret;
+ struct ffa_send_direct_data ffa_param = {};
+ convert_smc_param_to_ffa_param(in_param, &ffa_param);
+
+ do {
+ ret = g_ffa_ops->sync_send_receive(g_ffa_dev, &ffa_param);
+ convert_ffa_param_to_smc_param(&ffa_param, out_param);
+ } while (out_param->ret == TSP_REQUEST && wait != 0);
+
+ if (ret != 0)
+ tloge("failed to call! ret is %d\n", ret);
+ return ret;
+}
\ No newline at end of file
diff --git a/tzdriver/core/ffa_abi.h b/tzdriver/core/ffa_abi.h
new file mode 100644
index 0000000000000000000000000000000000000000..05047f661fa1e6d4d59c970e7c0d865f583d5da7
--- /dev/null
+++ b/tzdriver/core/ffa_abi.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: declarations for ffa functions and useful macros
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef FFA_ABI_H
+#define FFA_ABI_H
+
+#include
+#include "smc_smp.h"
+#include "smc_call.h"
+/*
+ * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and
+ * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal
+ * messages.
+ *
+ * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP
+ * are using the AArch32 SMC calling convention with register usage as
+ * defined in FF-A specification:
+ * w0: Function ID (0x8400006F or 0x84000070)
+ * w1: Source/Destination IDs
+ * w2: Reserved (MBZ)
+ * w3-w7: Implementation defined, free to be used below
+ */
+
+#define TZ_FFA_VERSION_MAJOR 1
+#define TZ_FFA_VERSION_MINOR 0
+
+#define TZ_FFA_BLOCKING_CALL(id) (id)
+#define TZ_FFA_YIELDING_CALL_BIT 31
+#define TZ_FFA_YIELDING_CALL(id) ((id) | BIT(TZ_FFA_YIELDING_CALL_BIT))
+
+/*
+ * Returns the API version implemented, currently follows the FF-A version.
+ * Call register usage:
+ * w3: Service ID, TZ_FFA_GET_API_VERSION
+ * w4-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: TZ_FFA_VERSION_MAJOR
+ * w4: TZ_FFA_VERSION_MINOR
+ * w5-w7: Not used (MBZ)
+ */
+#define TZ_FFA_GET_API_VERSION TZ_FFA_BLOCKING_CALL(0)
+
+/*
+ * Returns the revision of iTrustee
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Call register usage:
+ * w3: Service ID, TZ_FFA_GET_OS_VERSION
+ * w4-w7: Unused (MBZ)
+ *
+ * Return register usage:
+ * w3: CFG_TZ_REVISION_MAJOR
+ * w4: CFG_TZ_REVISION_MINOR
+ * w5: TEE_IMPL_GIT_SHA1 (or zero if not supported)
+ */
+#define TZ_FFA_GET_OS_VERSION TZ_FFA_BLOCKING_CALL(1)
+
+/*
+ * Exchange capabilities between normal world and secure world.
+ *
+ * Currently, there are no defined capabilities. When features are added new
+ * capabilities may be added.
+ *
+ * Call register usage:
+ * w3: Service ID, TZ_FFA_EXCHANGE_CAPABILITIES
+ * w4-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4: Bit[7:0]: Number of parameters needed for RPC to be supplied
+ * as the second MSG arg struct for
+ * TZ_FFA_YIELDING_CALL_WITH_ARG.
+ * Bit[31:8]: Reserved (MBZ)
+ * w5-w7: Not used (MBZ)
+ */
+#define TZ_FFA_EXCHANGE_CAPABILITIES TZ_FFA_BLOCKING_CALL(2)
+
+/*
+ * Unregister shared memory
+ *
+ * Call register usage:
+ * w3: Service ID, TZ_FFA_YIELDING_CALL_UNREGISTER_SHM
+ * w4: Shared memory handle, lower bits
+ * w5: Shared memory handle, higher bits
+ * w6-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4-w7: Not used (MBZ)
+ */
+#define TZ_FFA_UNREGISTER_SHM TZ_FFA_BLOCKING_CALL(3)
+
+/*
+ * Call with struct TZ_msg_arg as argument in the supplied shared memory
+ * with a zero internal offset and normal cached memory attributes
+ * Register usage:
+ * w3: Service ID, TZ_FFA_YIELDING_CALL_WITH_ARG
+ * w4: Lower 32 bits of a 64-bit Shared memory handle
+ * w5: Upper 32 bits of a 64-bit Shared memory handle
+ * w6: Offset into shared memory pointing to a struct TZ_msg_arg
+ * right after the parameters of this struct (at offset
+ * TZ_MSG_GET_ARG_SIZE(num_params) follows a struct TZ_msg_arg
+ * for RPC, this struct has reserved space for the number of RPC
+ * parameters as returned by TZ_FFA_EXCHANGE_CAPABILITIES.
+ * w7: Not used (MBZ)
+ * Resume from RPC. Register usage:
+ * w3: Service ID, TZ_FFA_YIELDING_CALL_RESUME
+ * w4-w6: Not used (MBZ)
+ * w7: Resume info
+ *
+ * Normal return (yielding call is completed). Register usage:
+ * w3: Error code, 0 on success
+ * w4: TZ_FFA_YIELDING_CALL_RETURN_DONE
+ * w5-w7: Not used (MBZ)
+ *
+ * RPC interrupt return (RPC from secure world). Register usage:
+ * w3: Error code == 0
+ * w4: Any defined RPC code but TZ_FFA_YIELDING_CALL_RETURN_DONE
+ * w5-w6: Not used (MBZ)
+ * w7: Resume info
+ *
+ * Possible error codes in register w3:
+ * 0: Success
+ * FFA_DENIED: w4 isn't one of TZ_FFA_YIELDING_CALL_START
+ * TZ_FFA_YIELDING_CALL_RESUME
+ *
+ * Possible error codes for TZ_FFA_YIELDING_CALL_START
+ * FFA_BUSY: Number of OP-TEE OS threads exceeded,
+ * try again later
+ * FFA_DENIED: RPC shared memory object not found
+ * FFA_INVALID_PARAMETER: Bad shared memory handle or offset into the memory
+ *
+ * Possible error codes for TZ_FFA_YIELDING_CALL_RESUME
+ * FFA_INVALID_PARAMETER: Bad resume info
+ */
+#define TZ_FFA_YIELDING_CALL_WITH_ARG TZ_FFA_YIELDING_CALL(0)
+#define TZ_FFA_YIELDING_CALL_RESUME TZ_FFA_YIELDING_CALL(1)
+
+#define TZ_FFA_YIELDING_CALL_RETURN_DONE 0
+#define TZ_FFA_YIELDING_CALL_RETURN_RPC_CMD 1
+#define TZ_FFA_YIELDING_CALL_RETURN_INTERRUPT 2
+
+int ffa_abi_register(void);
+void ffa_abi_unregister(void);
+int ffa_forward_call(struct smc_in_params *in_param, struct smc_out_params *out_param, uint8_t wait);
+
+#endif
\ No newline at end of file
diff --git a/tzdriver/core/gp_ops.c b/tzdriver/core/gp_ops.c
new file mode 100644
index 0000000000000000000000000000000000000000..54b0d60ef08a436a64a58bc67d67d3cc82a3bf62
--- /dev/null
+++ b/tzdriver/core/gp_ops.c
@@ -0,0 +1,1303 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: alloc global operation and pass params to TEE.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "gp_ops.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "teek_client_constants.h"
+#include "tc_ns_client.h"
+#include "agent.h"
+#include "tc_ns_log.h"
+#include "smc_smp.h"
+#include "mem.h"
+#include "mailbox_mempool.h"
+#include "tc_client_driver.h"
+#include "internal_functions.h"
+#include "reserved_mempool.h"
+#include "tlogger.h"
+#include "dynamic_ion_mem.h"
+
+#define MAX_SHARED_SIZE 0x100000 /* 1 MiB */
+
+static void free_operation(const struct tc_call_params *call_params,
+ struct tc_op_params *op_params);
+
+/* dir: 0-inclue input, 1-include output, 2-both */
+#define INPUT 0
+#define OUTPUT 1
+#define INOUT 2
+
+static inline bool is_input_type(int dir)
+{
+ if (dir == INPUT || dir == INOUT)
+ return true;
+
+ return false;
+}
+
+static inline bool is_output_type(int dir)
+{
+ if (dir == OUTPUT || dir == INOUT)
+ return true;
+
+ return false;
+}
+
+static inline bool teec_value_type(unsigned int type, int dir)
+{
+ return ((is_input_type(dir) && type == TEEC_VALUE_INPUT) ||
+ (is_output_type(dir) && type == TEEC_VALUE_OUTPUT) ||
+ type == TEEC_VALUE_INOUT) ? true : false;
+}
+
+static inline bool teec_tmpmem_type(unsigned int type, int dir)
+{
+ return ((is_input_type(dir) && type == TEEC_MEMREF_TEMP_INPUT) ||
+ (is_output_type(dir) && type == TEEC_MEMREF_TEMP_OUTPUT) ||
+ type == TEEC_MEMREF_TEMP_INOUT) ? true : false;
+}
+
+static inline bool teec_memref_type(unsigned int type, int dir)
+{
+ return ((is_input_type(dir) && type == TEEC_MEMREF_PARTIAL_INPUT) ||
+ (is_output_type(dir) && type == TEEC_MEMREF_PARTIAL_OUTPUT) ||
+ type == TEEC_MEMREF_PARTIAL_INOUT) ? true : false;
+}
+
+static int check_user_param(const struct tc_ns_client_context *client_context,
+ unsigned int index)
+{
+ if (!client_context) {
+ tloge("client_context is null\n");
+ return -EINVAL;
+ }
+
+ if (index >= PARAM_NUM) {
+ tloge("index is invalid, index:%x\n", index);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+bool is_tmp_mem(uint32_t param_type)
+{
+ if (param_type == TEEC_MEMREF_TEMP_INPUT ||
+ param_type == TEEC_MEMREF_TEMP_OUTPUT ||
+ param_type == TEEC_MEMREF_TEMP_INOUT)
+ return true;
+
+ return false;
+}
+
+bool is_ref_mem(uint32_t param_type)
+{
+ if (param_type == TEEC_MEMREF_PARTIAL_INPUT ||
+ param_type == TEEC_MEMREF_PARTIAL_OUTPUT ||
+ param_type == TEEC_MEMREF_PARTIAL_INOUT)
+ return true;
+
+ return false;
+}
+
+bool is_val_param(uint32_t param_type)
+{
+ if (param_type == TEEC_VALUE_INPUT ||
+ param_type == TEEC_VALUE_OUTPUT ||
+ param_type == TEEC_VALUE_INOUT ||
+ param_type == TEEC_ION_INPUT ||
+ param_type == TEEC_ION_SGLIST_INPUT)
+ return true;
+
+ return false;
+}
+
+static bool is_mem_param(uint32_t param_type)
+{
+ if (is_tmp_mem(param_type) || is_ref_mem(param_type))
+ return true;
+
+ return false;
+}
+
+/* Check the size and buffer addresses have valid userspace addresses */
+static bool is_usr_refmem_valid(const union tc_ns_client_param *client_param)
+{
+ uint32_t size = 0;
+ uint64_t size_addr = client_param->memref.size_addr |
+ ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM);
+ uint64_t buffer_addr = client_param->memref.buffer |
+ ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \
+ LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71))
+ if (access_ok(VERIFY_READ, (void *)(uintptr_t)size_addr, sizeof(uint32_t)) == 0)
+#else
+ if (access_ok((void *)(uintptr_t)size_addr, sizeof(uint32_t)) == 0)
+#endif
+ return false;
+
+ get_user(size, (uint32_t *)(uintptr_t)size_addr);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \
+ LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71))
+ if (access_ok(VERIFY_READ, (void *)(uintptr_t)buffer_addr, size) == 0)
+#else
+ if (access_ok((void *)(uintptr_t)buffer_addr, size) == 0)
+#endif
+ return false;
+
+ return true;
+}
+
+static bool is_usr_valmem_valid(const union tc_ns_client_param *client_param)
+{
+ uint64_t a_addr = client_param->value.a_addr |
+ ((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM);
+ uint64_t b_addr = client_param->value.b_addr |
+ ((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \
+ LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71))
+ if (access_ok(VERIFY_READ, (void *)(uintptr_t)a_addr, sizeof(uint32_t)) == 0)
+#else
+ if (access_ok((void *)(uintptr_t)a_addr, sizeof(uint32_t)) == 0)
+#endif
+ return false;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \
+ LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71))
+ if (access_ok(VERIFY_READ, (void *)(uintptr_t)b_addr, sizeof(uint32_t)) == 0)
+#else
+ if (access_ok((void *)(uintptr_t)b_addr, sizeof(uint32_t)) == 0)
+#endif
+ return false;
+
+ return true;
+}
+
+bool tc_user_param_valid(struct tc_ns_client_context *client_context,
+ unsigned int index)
+{
+ union tc_ns_client_param *client_param = NULL;
+ unsigned int param_type;
+
+ if (check_user_param(client_context, index) != 0)
+ return false;
+
+ client_param = &(client_context->params[index]);
+ param_type = teec_param_type_get(client_context->param_types, index);
+ tlogd("param %u type is %x\n", index, param_type);
+ if (param_type == TEEC_NONE) {
+ tlogd("param type is TEEC_NONE\n");
+ return true;
+ }
+
+ if (is_mem_param(param_type)) {
+ if (!is_usr_refmem_valid(client_param))
+ return false;
+ } else if (is_val_param(param_type)) {
+ if (!is_usr_valmem_valid(client_param))
+ return false;
+ } else {
+ tloge("param types is not supported\n");
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * These function handle read from client. Because client here can be
+ * kernel client or user space client, we must use the proper function
+ */
+int read_from_client(void *dest, size_t dest_size,
+ const void __user *src, size_t size, uint8_t kernel_api)
+{
+ int ret;
+
+ if (!dest || !src) {
+ tloge("src or dest is NULL input buffer\n");
+ return -EINVAL;
+ }
+
+ if (size > dest_size) {
+ tloge("size is larger than dest_size or size is 0\n");
+ return -EINVAL;
+ }
+ if (size == 0)
+ return 0;
+
+ if (kernel_api != 0) {
+ ret = memcpy_s(dest, dest_size, src, size);
+ if (ret != EOK) {
+ tloge("memcpy fail. line=%d, s_ret=%d\n",
+ __LINE__, ret);
+ return ret;
+ }
+ return ret;
+ }
+ /* buffer is in user space(CA call TEE API) */
+ if (copy_from_user(dest, src, size) != 0) {
+ tloge("copy from user failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int write_to_client(void __user *dest, size_t dest_size,
+ const void *src, size_t size, uint8_t kernel_api)
+{
+ int ret;
+
+ if (!dest || !src) {
+ tloge("src or dest is NULL input buffer\n");
+ return -EINVAL;
+ }
+
+ if (size > dest_size) {
+ tloge("size is larger than dest_size\n");
+ return -EINVAL;
+ }
+
+ if (size == 0)
+ return 0;
+
+ if (kernel_api != 0) {
+ ret = memcpy_s(dest, dest_size, src, size);
+ if (ret != EOK) {
+ tloge("write to client fail. line=%d, ret=%d\n",
+ __LINE__, ret);
+ return ret;
+ }
+ return ret;
+ }
+
+ /* buffer is in user space(CA call TEE API) */
+ if (copy_to_user(dest, src, size) != 0) {
+ tloge("copy to user failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static bool is_input_tempmem(unsigned int param_type)
+{
+ if (param_type == TEEC_MEMREF_TEMP_INPUT ||
+ param_type == TEEC_MEMREF_TEMP_INOUT)
+ return true;
+
+ return false;
+}
+
+static int update_input_data(const union tc_ns_client_param *client_param,
+ uint32_t buffer_size, void *temp_buf,
+ unsigned int param_type, uint8_t kernel_params)
+{
+ uint64_t buffer_addr;
+ if (!is_input_tempmem(param_type))
+ return 0;
+
+ buffer_addr = client_param->memref.buffer |
+ ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM);
+ if (read_from_client(temp_buf, buffer_size,
+ (void *)(uintptr_t)buffer_addr,
+ buffer_size, kernel_params) != 0) {
+ tloge("copy memref buffer failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/*
+ * temp buffers we need to allocate/deallocate
+ * for every operation
+ */
+static int alloc_for_tmp_mem(const struct tc_call_params *call_params,
+ struct tc_op_params *op_params, uint8_t kernel_params,
+ uint32_t param_type, unsigned int index)
+{
+ union tc_ns_client_param *client_param = NULL;
+ void *temp_buf = NULL;
+ uint32_t buffer_size = 0;
+ uint64_t size_addr;
+
+ /* this never happens */
+ if (index >= TEE_PARAM_NUM)
+ return -EINVAL;
+
+ /* For compatibility sake we assume buffer size to be 32bits */
+ client_param = &(call_params->context->params[index]);
+ size_addr = client_param->memref.size_addr |
+ ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM);
+ if (read_from_client(&buffer_size, sizeof(buffer_size),
+ (uint32_t __user *)(uintptr_t)size_addr,
+ sizeof(uint32_t), kernel_params) != 0) {
+ tloge("copy memref.size_addr failed\n");
+ return -EFAULT;
+ }
+
+ if (buffer_size > MAX_SHARED_SIZE) {
+ tloge("buffer size %u from user is too large\n", buffer_size);
+ return -EFAULT;
+ }
+
+ op_params->mb_pack->operation.params[index].memref.size = buffer_size;
+ /* TEEC_MEMREF_TEMP_INPUT equal to TEE_PARAM_TYPE_MEMREF_INPUT */
+ op_params->trans_paramtype[index] = param_type;
+
+ if (buffer_size == 0) {
+ op_params->local_tmpbuf[index].temp_buffer = NULL;
+ op_params->local_tmpbuf[index].size = 0;
+ op_params->mb_pack->operation.params[index].memref.buffer = 0;
+ op_params->mb_pack->operation.buffer_h_addr[index] = 0;
+ return 0;
+ }
+
+ temp_buf = mailbox_alloc(buffer_size, MB_FLAG_ZERO);
+ if (!temp_buf) {
+ tloge("temp buf malloc failed, i = %u\n", index);
+ return -ENOMEM;
+ }
+ op_params->local_tmpbuf[index].temp_buffer = temp_buf;
+ op_params->local_tmpbuf[index].size = buffer_size;
+
+ if (update_input_data(client_param, buffer_size, temp_buf,
+ param_type, kernel_params) != 0)
+ return -EFAULT;
+
+ op_params->mb_pack->operation.params[index].memref.buffer =
+ mailbox_virt_to_phys((uintptr_t)temp_buf);
+ op_params->mb_pack->operation.buffer_h_addr[index] =
+ (unsigned int)(mailbox_virt_to_phys((uintptr_t)temp_buf) >> ADDR_TRANS_NUM);
+
+ return 0;
+}
+
+static int check_buffer_for_ref(uint32_t *buffer_size,
+ const union tc_ns_client_param *client_param, uint8_t kernel_params)
+{
+ uint64_t size_addr = client_param->memref.size_addr |
+ ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM);
+ if (read_from_client(buffer_size, sizeof(*buffer_size),
+ (uint32_t __user *)(uintptr_t)size_addr,
+ sizeof(uint32_t), kernel_params) != 0) {
+ tloge("copy memref.size_addr failed\n");
+ return -EFAULT;
+ }
+ if (*buffer_size == 0) {
+ tloge("buffer_size from user is 0\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static bool is_refmem_offset_valid(const struct tc_ns_shared_mem *shared_mem,
+ const union tc_ns_client_param *client_param, uint32_t buffer_size)
+{
+ /*
+ * arbitrary CA can control offset by ioctl, so in here
+ * offset must be checked, and avoid integer overflow.
+ */
+ if (((shared_mem->len - client_param->memref.offset) >= buffer_size) &&
+ (shared_mem->len > client_param->memref.offset))
+ return true;
+ tloge("Unexpected size %u vs %u", shared_mem->len, buffer_size);
+ return false;
+}
+
+static bool is_phyaddr_valid(const struct tc_ns_operation *operation, int index)
+{
+ /*
+ * for 8G physical memory device, there is a chance that
+ * operation->params[i].memref.buffer could be all 0,
+ * buffer_h_addr cannot be 0 in the same time.
+ */
+ if ((operation->params[index].memref.buffer == 0) &&
+ (operation->buffer_h_addr[index]) == 0) {
+ tloge("can not find shared buffer, exit\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int set_operation_buffer(const struct tc_ns_shared_mem *shared_mem, void *buffer_addr,
+ uint32_t buffer_size, unsigned int index, struct tc_op_params *op_params)
+{
+ if (shared_mem->mem_type == RESERVED_TYPE) {
+ /* no copy to mailbox */
+ op_params->mb_pack->operation.mb_buffer[index] = buffer_addr;
+ op_params->mb_pack->operation.params[index].memref.buffer =
+ res_mem_virt_to_phys((uintptr_t)buffer_addr);
+ op_params->mb_pack->operation.buffer_h_addr[index] =
+ res_mem_virt_to_phys((uintptr_t)buffer_addr) >> ADDR_TRANS_NUM;
+ } else {
+ void *tmp_buffer_addr = mailbox_copy_alloc(buffer_addr, buffer_size);
+ if (tmp_buffer_addr == NULL)
+ return -ENOMEM;
+
+ op_params->mb_pack->operation.mb_buffer[index] = tmp_buffer_addr;
+ op_params->mb_pack->operation.params[index].memref.buffer =
+ (unsigned int)mailbox_virt_to_phys((uintptr_t)tmp_buffer_addr);
+ op_params->mb_pack->operation.buffer_h_addr[index] =
+ (unsigned int)((uint64_t)mailbox_virt_to_phys((uintptr_t)tmp_buffer_addr) >> ADDR_TRANS_NUM);
+ }
+ return 0;
+}
+
+/*
+ * MEMREF_PARTIAL buffers are already allocated so we just
+ * need to search for the shared_mem ref;
+ * For interface compatibility we assume buffer size to be 32bits
+ */
+static int alloc_for_ref_mem(const struct tc_call_params *call_params,
+ struct tc_op_params *op_params, uint8_t kernel_params,
+ uint32_t param_type, unsigned int index)
+{
+ union tc_ns_client_param *client_param = NULL;
+ struct tc_ns_shared_mem *shared_mem = NULL;
+ uint32_t buffer_size = 0;
+ void *buffer_addr = NULL;
+ int ret = 0;
+
+ /* this never happens */
+ if (index >= TEE_PARAM_NUM)
+ return -EINVAL;
+
+ client_param = &(call_params->context->params[index]);
+ if (check_buffer_for_ref(&buffer_size, client_param, kernel_params) != 0)
+ return -EINVAL;
+
+ op_params->mb_pack->operation.params[index].memref.buffer = 0;
+
+ mutex_lock(&call_params->dev->shared_mem_lock);
+ list_for_each_entry(shared_mem,
+ &call_params->dev->shared_mem_list, head) {
+ buffer_addr = (void *)(uintptr_t)(client_param->memref.buffer |
+ ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM));
+ if (shared_mem->user_addr != buffer_addr)
+ continue;
+ if (!is_refmem_offset_valid(shared_mem, client_param,
+ buffer_size)) {
+ break;
+ }
+ buffer_addr = (void *)(uintptr_t)(
+ (uintptr_t)shared_mem->kernel_addr +
+ client_param->memref.offset);
+
+ ret = set_operation_buffer(shared_mem, buffer_addr, buffer_size, index, op_params);
+ if (ret != 0) {
+ tloge("set operation buffer failed\n");
+ break;
+ }
+ op_params->mb_pack->operation.sharemem[index] = shared_mem;
+ get_sharemem_struct(shared_mem);
+ break;
+ }
+ mutex_unlock(&call_params->dev->shared_mem_lock);
+ if (ret != 0)
+ return ret;
+
+ if (!is_phyaddr_valid(&op_params->mb_pack->operation, index))
+ return -EINVAL;
+
+ op_params->mb_pack->operation.params[index].memref.size = buffer_size;
+ /* Change TEEC_MEMREF_PARTIAL_XXXXX to TEE_PARAM_TYPE_MEMREF_XXXXX */
+ op_params->trans_paramtype[index] = param_type -
+ (TEEC_MEMREF_PARTIAL_INPUT - TEE_PARAM_TYPE_MEMREF_INPUT);
+
+ if (shared_mem->mem_type == RESERVED_TYPE)
+ op_params->trans_paramtype[index] +=
+ (TEE_PARAM_TYPE_RESMEM_INPUT - TEE_PARAM_TYPE_MEMREF_INPUT);
+ return ret;
+}
+
+#ifdef CONFIG_NOCOPY_SHAREDMEM
+static int fill_shared_mem_info(void *start_vaddr, uint32_t pages_no, uint32_t offset, uint32_t buffer_size, void *buff)
+{
+ struct pagelist_info *page_info = NULL;
+ struct page **pages = NULL;
+ uint64_t *phys_addr = NULL;
+ uint32_t page_num;
+ uint32_t i;
+ if (pages_no == 0)
+ return -EFAULT;
+ pages = (struct page **)vmalloc(pages_no * sizeof(uint64_t));
+ if (pages == NULL)
+ return -EFAULT;
+ down_read(&mm_sem_lock(current->mm));
+ page_num = get_user_pages((uintptr_t)start_vaddr, pages_no, FOLL_WRITE, pages, NULL);
+ up_read(&mm_sem_lock(current->mm));
+ if (page_num != pages_no) {
+ tloge("get page phy addr failed\n");
+ if (page_num > 0)
+ release_pages(pages, page_num);
+ vfree(pages);
+ return -EFAULT;
+ }
+ page_info = buff;
+ page_info->page_num = pages_no;
+ page_info->page_size = PAGE_SIZE;
+ page_info->sharedmem_offset = offset;
+ page_info->sharedmem_size = buffer_size;
+ phys_addr = (uint64_t *)buff + (sizeof(*page_info) / sizeof(uint64_t));
+ for (i = 0; i < pages_no; i++) {
+ struct page *page = pages[i];
+ if (page == NULL) {
+ release_pages(pages, page_num);
+ vfree(pages);
+ return -EFAULT;
+ }
+ phys_addr[i] = (uintptr_t)page_to_phys(page);
+ }
+ vfree(pages);
+ return 0;
+}
+
+static int check_buffer_for_sharedmem(uint32_t *buffer_size,
+ const union tc_ns_client_param *client_param, uint8_t kernel_params)
+{
+ uint64_t size_addr = client_param->memref.size_addr |
+ ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM);
+ uint64_t buffer_addr = client_param->memref.buffer |
+ ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM);
+ if (read_from_client(buffer_size, sizeof(*buffer_size),
+ (uint32_t __user *)(uintptr_t)size_addr,
+ sizeof(uint32_t), kernel_params)) {
+ tloge("copy size_addr failed\n");
+ return -EFAULT;
+ }
+
+ if (*buffer_size == 0 || *buffer_size > SZ_256M) {
+ tloge("invalid buffer size\n");
+ return -ENOMEM;
+ }
+
+ if ((client_param->memref.offset >= SZ_256M) ||
+ (UINT64_MAX - buffer_addr <= client_param->memref.offset)) {
+ tloge("invalid buff or offset\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int transfer_shared_mem(const struct tc_call_params *call_params,
+ struct tc_op_params *op_params, uint8_t kernel_params,
+ uint32_t param_type, unsigned int index)
+{
+ void *buff = NULL;
+ void *start_vaddr = NULL;
+ union tc_ns_client_param *client_param = NULL;
+ uint32_t buffer_size;
+ uint32_t pages_no;
+ uint32_t offset;
+ uint32_t buff_len;
+ uint64_t buffer_addr;
+
+ if (index >= TEE_PARAM_NUM)
+ return -EINVAL;
+
+ client_param = &(call_params->context->params[index]);
+ if (check_buffer_for_sharedmem(&buffer_size, client_param, kernel_params))
+ return -EINVAL;
+
+ buffer_addr = client_param->memref.buffer |
+ ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM);
+ buff = (void *)(uint64_t)(buffer_addr + client_param->memref.offset);
+ start_vaddr = (void *)(((uint64_t)buff) & PAGE_MASK);
+ offset = ((uint32_t)(uintptr_t)buff) & (~PAGE_MASK);
+ pages_no = PAGE_ALIGN(offset + buffer_size) / PAGE_SIZE;
+
+ buff_len = sizeof(struct pagelist_info) + (sizeof(uint64_t) * pages_no);
+ buff = mailbox_alloc(buff_len, MB_FLAG_ZERO);
+ if (buff == NULL)
+ return -EFAULT;
+
+ if (fill_shared_mem_info(start_vaddr, pages_no, offset, buffer_size, buff)) {
+ mailbox_free(buff);
+ return -EFAULT;
+ }
+
+ op_params->local_tmpbuf[index].temp_buffer = buff;
+ op_params->local_tmpbuf[index].size = buff_len;
+
+ op_params->mb_pack->operation.params[index].memref.buffer = mailbox_virt_to_phys((uintptr_t)buff);
+ op_params->mb_pack->operation.buffer_h_addr[index] = (uint64_t)mailbox_virt_to_phys((uintptr_t)buff) >> ADDR_TRANS_NUM;
+ op_params->mb_pack->operation.params[index].memref.size = buff_len;
+ op_params->trans_paramtype[index] = param_type;
+ return 0;
+}
+#else
+static int transfer_shared_mem(const struct tc_call_params *call_params,
+ const struct tc_op_params *op_params, uint8_t kernel_params,
+ uint32_t param_type, unsigned int index)
+{
+ (void)call_params;
+ (void)op_params;
+ (void)kernel_params;
+ (void)param_type;
+ (void)index;
+ tloge("invalid shared mem type\n");
+ return -1;
+}
+#endif
+
+static int transfer_client_value(const struct tc_call_params *call_params,
+ struct tc_op_params *op_params, uint8_t kernel_params,
+ uint32_t param_type, unsigned int index)
+{
+ struct tc_ns_operation *operation = &op_params->mb_pack->operation;
+ union tc_ns_client_param *client_param = NULL;
+ uint64_t a_addr, b_addr;
+
+ /* this never happens */
+ if (index >= TEE_PARAM_NUM)
+ return -EINVAL;
+
+ client_param = &(call_params->context->params[index]);
+ a_addr = client_param->value.a_addr |
+ ((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM);
+ b_addr = client_param->value.b_addr |
+ ((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM);
+
+ if (read_from_client(&operation->params[index].value.a,
+ sizeof(operation->params[index].value.a),
+ (void *)(uintptr_t)a_addr,
+ sizeof(operation->params[index].value.a),
+ kernel_params) != 0) {
+ tloge("copy valuea failed\n");
+ return -EFAULT;
+ }
+ if (read_from_client(&operation->params[index].value.b,
+ sizeof(operation->params[index].value.b),
+ (void *)(uintptr_t)b_addr,
+ sizeof(operation->params[index].value.b),
+ kernel_params) != 0) {
+ tloge("copy valueb failed\n");
+ return -EFAULT;
+ }
+
+ /* TEEC_VALUE_INPUT equal to TEE_PARAM_TYPE_VALUE_INPUT */
+ op_params->trans_paramtype[index] = param_type;
+ return 0;
+}
+
+static int alloc_operation(const struct tc_call_params *call_params,
+ struct tc_op_params *op_params)
+{
+ int ret = 0;
+ uint32_t index;
+ uint8_t kernel_params;
+ uint32_t param_type;
+
+ kernel_params = call_params->dev->kernel_api;
+ for (index = 0; index < TEE_PARAM_NUM; index++) {
+ /*
+ * Normally kernel_params = kernel_api
+ * But when TC_CALL_LOGIN, params 2/3 will
+ * be filled by kernel. so under this circumstance,
+ * params 2/3 has to be set to kernel mode; and
+ * param 0/1 will keep the same with kernel_api.
+ */
+ if ((call_params->flags & TC_CALL_LOGIN) && (index >= 2))
+ kernel_params = TEE_REQ_FROM_KERNEL_MODE;
+ param_type = teec_param_type_get(
+ call_params->context->param_types, index);
+
+ tlogd("param %u type is %x\n", index, param_type);
+ if (teec_tmpmem_type(param_type, INOUT))
+ ret = alloc_for_tmp_mem(call_params, op_params,
+ kernel_params, param_type, index);
+ else if (teec_memref_type(param_type, INOUT))
+ ret = alloc_for_ref_mem(call_params, op_params,
+ kernel_params, param_type, index);
+ else if (teec_value_type(param_type, INOUT))
+ ret = transfer_client_value(call_params, op_params,
+ kernel_params, param_type, index);
+ else if (param_type == TEEC_ION_INPUT)
+ ret = alloc_for_ion(call_params, op_params,
+ kernel_params, param_type, index);
+ else if (param_type == TEEC_ION_SGLIST_INPUT)
+ ret = alloc_for_ion_sglist(call_params, op_params,
+ kernel_params, param_type, index);
+ else if (param_type == TEEC_MEMREF_SHARED_INOUT)
+ ret = transfer_shared_mem(call_params, op_params,
+ kernel_params, param_type, index);
+ else
+ tlogd("param type = TEEC_NONE\n");
+
+ if (ret != 0)
+ break;
+ }
+ if (ret != 0) {
+ free_operation(call_params, op_params);
+ return ret;
+ }
+ op_params->mb_pack->operation.paramtypes =
+ teec_param_types(op_params->trans_paramtype[0],
+ op_params->trans_paramtype[1],
+ op_params->trans_paramtype[2],
+ op_params->trans_paramtype[3]);
+ op_params->op_inited = true;
+
+ return ret;
+}
+
+static int update_tmp_mem(const struct tc_call_params *call_params,
+ const struct tc_op_params *op_params, unsigned int index, bool is_complete)
+{
+ union tc_ns_client_param *client_param = NULL;
+ uint32_t buffer_size;
+ struct tc_ns_operation *operation = &op_params->mb_pack->operation;
+ uint64_t size_addr, buffer_addr;
+
+ if (index >= TEE_PARAM_NUM) {
+ tloge("tmp buf size or index is invalid\n");
+ return -EFAULT;
+ }
+
+ buffer_size = operation->params[index].memref.size;
+ client_param = &(call_params->context->params[index]);
+ size_addr = client_param->memref.size_addr |
+ ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM);
+ buffer_addr = client_param->memref.buffer |
+ ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM);
+ /* Size is updated all the time */
+ if (write_to_client((void *)(uintptr_t)size_addr,
+ sizeof(buffer_size),
+ &buffer_size, sizeof(buffer_size),
+ call_params->dev->kernel_api) != 0) {
+ tloge("copy tempbuf size failed\n");
+ return -EFAULT;
+ }
+ if (buffer_size > op_params->local_tmpbuf[index].size) {
+ /* incomplete case, when the buffer size is invalid see next param */
+ if (!is_complete)
+ return 0;
+ /*
+ * complete case, operation is allocated from mailbox
+ * and share with gtask, so it's possible to be changed
+ */
+ tloge("memref.size has been changed larger than the initial\n");
+ return -EFAULT;
+ }
+ if (buffer_size == 0)
+ return 0;
+ /* Only update the buffer when the buffer size is valid in complete case */
+ if (write_to_client((void *)(uintptr_t)buffer_addr,
+ operation->params[index].memref.size,
+ op_params->local_tmpbuf[index].temp_buffer,
+ operation->params[index].memref.size,
+ call_params->dev->kernel_api) != 0) {
+ tloge("copy tempbuf failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int update_for_ref_mem(const struct tc_call_params *call_params,
+ const struct tc_op_params *op_params, unsigned int index)
+{
+ union tc_ns_client_param *client_param = NULL;
+ uint32_t buffer_size;
+ unsigned int orig_size = 0;
+ struct tc_ns_operation *operation = &op_params->mb_pack->operation;
+ uint64_t size_addr;
+
+ if (index >= TEE_PARAM_NUM) {
+ tloge("index is invalid\n");
+ return -EFAULT;
+ }
+
+ /* update size */
+ buffer_size = operation->params[index].memref.size;
+ client_param = &(call_params->context->params[index]);
+ size_addr = client_param->memref.size_addr |
+ ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM);
+
+ if (read_from_client(&orig_size,
+ sizeof(orig_size),
+ (uint32_t __user *)(uintptr_t)size_addr,
+ sizeof(orig_size), call_params->dev->kernel_api) != 0) {
+ tloge("copy orig memref.size_addr failed\n");
+ return -EFAULT;
+ }
+
+ if (write_to_client((void *)(uintptr_t)size_addr,
+ sizeof(buffer_size),
+ &buffer_size, sizeof(buffer_size),
+ call_params->dev->kernel_api) != 0) {
+ tloge("copy buf size failed\n");
+ return -EFAULT;
+ }
+
+ /* reserved memory no need to copy */
+ if (operation->sharemem[index]->mem_type == RESERVED_TYPE)
+ return 0;
+ /* copy from mb_buffer to sharemem */
+ if (operation->mb_buffer[index] && orig_size >= buffer_size) {
+ void *buffer_addr =
+ (void *)(uintptr_t)((uintptr_t)
+ operation->sharemem[index]->kernel_addr +
+ client_param->memref.offset);
+ if (memcpy_s(buffer_addr,
+ operation->sharemem[index]->len -
+ client_param->memref.offset,
+ operation->mb_buffer[index], buffer_size) != 0) {
+ tloge("copy to sharemem failed\n");
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static int update_for_value(const struct tc_call_params *call_params,
+ const struct tc_op_params *op_params, unsigned int index)
+{
+ union tc_ns_client_param *client_param = NULL;
+ struct tc_ns_operation *operation = &op_params->mb_pack->operation;
+ uint64_t a_addr, b_addr;
+
+ if (index >= TEE_PARAM_NUM) {
+ tloge("index is invalid\n");
+ return -EFAULT;
+ }
+ client_param = &(call_params->context->params[index]);
+ a_addr = client_param->value.a_addr |
+ ((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM);
+ b_addr = client_param->value.b_addr |
+ ((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM);
+
+ if (write_to_client((void *)(uintptr_t)a_addr,
+ sizeof(operation->params[index].value.a),
+ &operation->params[index].value.a,
+ sizeof(operation->params[index].value.a),
+ call_params->dev->kernel_api) != 0) {
+ tloge("inc copy value.a_addr failed\n");
+ return -EFAULT;
+ }
+ if (write_to_client((void *)(uintptr_t)b_addr,
+ sizeof(operation->params[index].value.b),
+ &operation->params[index].value.b,
+ sizeof(operation->params[index].value.b),
+ call_params->dev->kernel_api) != 0) {
+ tloge("inc copy value.b_addr failed\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int update_client_operation(const struct tc_call_params *call_params,
+ const struct tc_op_params *op_params, bool is_complete)
+{
+ int ret = 0;
+ uint32_t param_type;
+ uint32_t index;
+
+ if (!op_params->op_inited)
+ return 0;
+
+ /* if paramTypes is NULL, no need to update */
+ if (call_params->context->param_types == 0)
+ return 0;
+
+ for (index = 0; index < TEE_PARAM_NUM; index++) {
+ param_type = teec_param_type_get(
+ call_params->context->param_types, index);
+ if (teec_tmpmem_type(param_type, OUTPUT))
+ ret = update_tmp_mem(call_params, op_params,
+ index, is_complete);
+ else if (teec_memref_type(param_type, OUTPUT))
+ ret = update_for_ref_mem(call_params,
+ op_params, index);
+ else if (is_complete && teec_value_type(param_type, OUTPUT))
+ ret = update_for_value(call_params, op_params, index);
+ else
+ tlogd("param_type:%u don't need to update\n", param_type);
+ if (ret != 0)
+ break;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_NOCOPY_SHAREDMEM
+static void release_page(void *buf)
+{
+ uint32_t i;
+ uint64_t *phys_addr = NULL;
+ struct pagelist_info *page_info = NULL;
+ struct page *page = NULL;
+
+ page_info = buf;
+ phys_addr = (uint64_t *)buf + (sizeof(*page_info) / sizeof(uint64_t));
+ for (i = 0; i < page_info->page_num; i++) {
+ page = (struct page *)(uintptr_t)phys_to_page(phys_addr[i]);
+ if (page == NULL)
+ continue;
+ set_bit(PG_dirty, &page->flags);
+ put_page(page);
+ }
+}
+#endif
+static void free_operation(const struct tc_call_params *call_params, struct tc_op_params *op_params)
+{
+ uint32_t param_type;
+ uint32_t index;
+ void *temp_buf = NULL;
+ struct tc_ns_temp_buf *local_tmpbuf = op_params->local_tmpbuf;
+ struct tc_ns_operation *operation = &op_params->mb_pack->operation;
+
+ for (index = 0; index < TEE_PARAM_NUM; index++) {
+ param_type = teec_param_type_get(call_params->context->param_types, index);
+ if (is_tmp_mem(param_type)) {
+ /* free temp buffer */
+ temp_buf = local_tmpbuf[index].temp_buffer;
+ tlogd("free temp buf, i = %u\n", index);
+#if (!defined(CONFIG_LIBLINUX)) && (!defined(CONFIG_SHARED_MEM_RESERVED))
+ /* if temp_buf from iomap instead of page_alloc, virt_addr_valid will return false */
+ if (!virt_addr_valid((unsigned long)(uintptr_t)temp_buf))
+ continue;
+#endif
+ if (!ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)temp_buf)) {
+ mailbox_free(temp_buf);
+ temp_buf = NULL;
+ }
+ } else if (is_ref_mem(param_type)) {
+ struct tc_ns_shared_mem *shm = operation->sharemem[index];
+ if (shm != NULL && shm->mem_type == RESERVED_TYPE) {
+ put_sharemem_struct(operation->sharemem[index]);
+ continue;
+ }
+ put_sharemem_struct(operation->sharemem[index]);
+ if (operation->mb_buffer[index])
+ mailbox_free(operation->mb_buffer[index]);
+ } else if (param_type == TEEC_ION_SGLIST_INPUT) {
+ temp_buf = local_tmpbuf[index].temp_buffer;
+ tlogd("free ion sglist buf, i = %u\n", index);
+#if (!defined(CONFIG_LIBLINUX)) && (!defined(CONFIG_SHARED_MEM_RESERVED))
+ /* if temp_buf from iomap instead of page_alloc, virt_addr_valid will return false */
+ if (!virt_addr_valid((uint64_t)(uintptr_t)temp_buf))
+ continue;
+#endif
+ if (!ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)temp_buf)) {
+ mailbox_free(temp_buf);
+ temp_buf = NULL;
+ }
+ } else if (param_type == TEEC_MEMREF_SHARED_INOUT) {
+#ifdef CONFIG_NOCOPY_SHAREDMEM
+ temp_buf = local_tmpbuf[index].temp_buffer;
+ if (temp_buf != NULL) {
+ release_page(temp_buf);
+ mailbox_free(temp_buf);
+ }
+#endif
+ }
+ }
+}
+
+static bool is_clicall_params_vaild(const struct tc_call_params *call_params)
+{
+ if (!call_params) {
+ tloge("call param is null");
+ return false;
+ }
+
+ if (!call_params->dev) {
+ tloge("dev file is null");
+ return false;
+ }
+
+ if (!call_params->context) {
+ tloge("client context is null");
+ return false;
+ }
+
+ return true;
+}
+
+static int alloc_for_client_call(struct tc_op_params *op_params)
+{
+ op_params->smc_cmd = kzalloc(sizeof(*(op_params->smc_cmd)),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)(op_params->smc_cmd))) {
+ tloge("smc cmd malloc failed\n");
+ return -ENOMEM;
+ }
+
+ op_params->mb_pack = mailbox_alloc_cmd_pack();
+ if (!op_params->mb_pack) {
+ kfree(op_params->smc_cmd);
+ op_params->smc_cmd = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int init_smc_cmd(const struct tc_call_params *call_params,
+ struct tc_op_params *op_params)
+{
+ struct tc_ns_smc_cmd *smc_cmd = op_params->smc_cmd;
+ struct tc_ns_client_context *context = call_params->context;
+ struct tc_ns_operation *operation = &op_params->mb_pack->operation;
+ bool global = call_params->flags & TC_CALL_GLOBAL;
+
+ smc_cmd->cmd_type = global ? CMD_TYPE_GLOBAL : CMD_TYPE_TA;
+ if (memcpy_s(smc_cmd->uuid, sizeof(smc_cmd->uuid),
+ context->uuid, UUID_LEN) != 0) {
+ tloge("memcpy uuid error\n");
+ return -EFAULT;
+ }
+ smc_cmd->cmd_id = context->cmd_id;
+ smc_cmd->dev_file_id = call_params->dev->dev_file_id;
+ smc_cmd->context_id = context->session_id;
+ smc_cmd->err_origin = context->returns.origin;
+ smc_cmd->started = context->started;
+ smc_cmd->ca_pid = current->pid;
+ smc_cmd->pid = current->tgid;
+
+ tlogv("current uid is %u\n", smc_cmd->uid);
+ if (context->param_types != 0) {
+ smc_cmd->operation_phys =
+ mailbox_virt_to_phys((uintptr_t)operation);
+ smc_cmd->operation_h_phys =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)operation) >> ADDR_TRANS_NUM;
+ } else {
+ smc_cmd->operation_phys = 0;
+ smc_cmd->operation_h_phys = 0;
+ }
+ smc_cmd->login_method = context->login.method;
+
+ /* if smc from kernel CA, set login_method to TEEK_LOGIN_IDENTIFY */
+ if (call_params->dev->kernel_api == TEE_REQ_FROM_KERNEL_MODE)
+ smc_cmd->login_method = TEEK_LOGIN_IDENTIFY;
+
+ return 0;
+}
+
+static bool need_check_login(const struct tc_call_params *call_params,
+ const struct tc_op_params *op_params)
+{
+ if (call_params->dev->pub_key_len == sizeof(uint32_t) &&
+ op_params->smc_cmd->cmd_id == GLOBAL_CMD_ID_OPEN_SESSION &&
+ current->mm && ((call_params->flags & TC_CALL_GLOBAL) != 0))
+ return true;
+
+ return false;
+}
+
+static int check_login_for_encrypt(const struct tc_call_params *call_params,
+ struct tc_op_params *op_params)
+{
+ struct tc_ns_session *sess = call_params->sess;
+ struct tc_ns_smc_cmd *smc_cmd = op_params->smc_cmd;
+ struct mb_cmd_pack *mb_pack = op_params->mb_pack;
+
+ if (need_check_login(call_params, op_params) && sess) {
+ if (memcpy_s(mb_pack->login_data, sizeof(mb_pack->login_data),
+ sess->auth_hash_buf,
+ sizeof(sess->auth_hash_buf)) != 0) {
+ tloge("copy login data failed\n");
+ return -EFAULT;
+ }
+ smc_cmd->login_data_phy = mailbox_virt_to_phys((uintptr_t)mb_pack->login_data);
+ smc_cmd->login_data_h_addr =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)mb_pack->login_data) >> ADDR_TRANS_NUM;
+ smc_cmd->login_data_len = MAX_SHA_256_SZ * (NUM_OF_SO + 1);
+ } else {
+ smc_cmd->login_data_phy = 0;
+ smc_cmd->login_data_h_addr = 0;
+ smc_cmd->login_data_len = 0;
+ }
+ return 0;
+}
+
+static uint32_t get_uid_for_cmd(void)
+{
+ kuid_t kuid;
+
+ kuid = current_uid();
+ return kuid.val;
+}
+
+static void reset_session_id(const struct tc_call_params *call_params,
+ const struct tc_op_params *op_params, int tee_ret)
+{
+ bool need_reset = false;
+
+ call_params->context->session_id = op_params->smc_cmd->context_id;
+ /*
+ * if tee_ret error except TEEC_PENDING,
+ * but context_id is seted,need to reset to 0
+ */
+ need_reset = ((call_params->flags & TC_CALL_GLOBAL) &&
+ call_params->context->cmd_id == GLOBAL_CMD_ID_OPEN_SESSION &&
+ tee_ret && tee_ret != (int)TEEC_PENDING);
+ if (need_reset)
+ call_params->context->session_id = 0;
+ return;
+}
+
+static void pend_ca_thread(struct tc_ns_session *session,
+ const struct tc_ns_smc_cmd *smc_cmd)
+{
+ struct tc_wait_data *wq = NULL;
+
+ if (session)
+ wq = &session->wait_data;
+
+ if (wq) {
+ tlogv("before wait event\n");
+ /*
+ * use wait_event instead of wait_event_interruptible so
+ * that ap suspend will not wake up the TEE wait call
+ */
+ wait_event(wq->send_cmd_wq, wq->send_wait_flag != 0);
+ wq->send_wait_flag = 0;
+ }
+ tlogv("operation start is :%d\n", smc_cmd->started);
+ return;
+}
+
+
+static void release_tc_call_resource(const struct tc_call_params *call_params,
+ struct tc_op_params *op_params, int tee_ret)
+{
+ /* kfree(NULL) is safe and this check is probably not required */
+ call_params->context->returns.code = tee_ret;
+ call_params->context->returns.origin = op_params->smc_cmd->err_origin;
+
+ /*
+ * 1. when CA invoke command and crash, Gtask release service node
+ * then del ion won't be triggered, so here tzdriver need to kill ion;
+ * 2. when ta crash, tzdriver also need to kill ion;
+ */
+ if (tee_ret == (int)TEE_ERROR_TAGET_DEAD || tee_ret == (int)TEEC_ERROR_GENERIC)
+ kill_ion_by_uuid((struct tc_uuid *)op_params->smc_cmd->uuid);
+
+ if (op_params->op_inited)
+ free_operation(call_params, op_params);
+
+ kfree(op_params->smc_cmd);
+ mailbox_free(op_params->mb_pack);
+}
+
+static int config_smc_cmd_context(const struct tc_call_params *call_params,
+ struct tc_op_params *op_params)
+{
+ int ret;
+
+ ret = init_smc_cmd(call_params, op_params);
+ if (ret != 0)
+ return ret;
+
+ ret = check_login_for_encrypt(call_params, op_params);
+
+ return ret;
+}
+
+static int handle_ta_pending(const struct tc_call_params *call_params,
+ struct tc_op_params *op_params, int *tee_ret)
+{
+ if (*tee_ret != (int)TEEC_PENDING)
+ return 0;
+
+ while (*tee_ret == (int)TEEC_PENDING) {
+ pend_ca_thread(call_params->sess, op_params->smc_cmd);
+ *tee_ret = tc_ns_smc_with_no_nr(op_params->smc_cmd);
+ }
+
+ return 0;
+}
+
+static int post_proc_smc_return(const struct tc_call_params *call_params,
+ struct tc_op_params *op_params, int tee_ret)
+{
+ int ret;
+
+ if (tee_ret != 0) {
+ tloge("smc call ret 0x%x, cmd ret val 0x%x, origin %u\n", tee_ret,
+ op_params->smc_cmd->ret_val, op_params->smc_cmd->err_origin);
+ /* same as libteec_vendor, err from TEE, set ret positive */
+ ret = EFAULT;
+ if (tee_ret == (int)TEEC_CLIENT_INTR)
+ ret = -ERESTARTSYS;
+
+ if (tee_ret == (int)TEEC_ERROR_SHORT_BUFFER)
+ (void)update_client_operation(call_params, op_params, false);
+ } else {
+ tz_log_write();
+ ret = update_client_operation(call_params, op_params, true);
+ }
+
+ return ret;
+}
+
+int tc_client_call(const struct tc_call_params *call_params)
+{
+ int ret;
+ int tee_ret = 0;
+ struct tc_op_params op_params = { NULL, NULL, {{0}}, {0}, false };
+
+ if (!is_clicall_params_vaild(call_params))
+ return -EINVAL;
+
+ if (alloc_for_client_call(&op_params) != 0)
+ return -ENOMEM;
+
+ op_params.smc_cmd->err_origin = TEEC_ORIGIN_COMMS;
+ op_params.smc_cmd->uid = get_uid_for_cmd();
+ if (call_params->context->param_types != 0) {
+ ret = alloc_operation(call_params, &op_params);
+ if (ret != 0)
+ goto free_src;
+ }
+
+ ret = config_smc_cmd_context(call_params, &op_params);
+ if (ret != 0)
+ goto free_src;
+
+ tee_ret = tc_ns_smc(op_params.smc_cmd);
+
+ reset_session_id(call_params, &op_params, tee_ret);
+
+ ret = handle_ta_pending(call_params, &op_params, &tee_ret);
+ if (ret != 0)
+ goto free_src;
+
+ ret = post_proc_smc_return(call_params, &op_params, tee_ret);
+
+free_src:
+ if (ret < 0) /* if ret > 0, means err from TEE */
+ op_params.smc_cmd->err_origin = TEEC_ORIGIN_COMMS;
+ release_tc_call_resource(call_params, &op_params, tee_ret);
+ return ret;
+}
diff --git a/tzdriver/core/gp_ops.h b/tzdriver/core/gp_ops.h
new file mode 100644
index 0000000000000000000000000000000000000000..32dab319b5a928b701d893316601cef37b24905f
--- /dev/null
+++ b/tzdriver/core/gp_ops.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: function declaration for alloc global operation and pass params to TEE.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef GP_OPS_H
+#define GP_OPS_H
+#include "tc_ns_client.h"
+#include "teek_ns_client.h"
+
+struct pagelist_info {
+ uint64_t page_num;
+ uint64_t page_size;
+ uint64_t sharedmem_offset;
+ uint64_t sharedmem_size;
+};
+
+int write_to_client(void __user *dest, size_t dest_size,
+ const void *src, size_t size, uint8_t kernel_api);
+int read_from_client(void *dest, size_t dest_size,
+ const void __user *src, size_t size, uint8_t kernel_api);
+bool tc_user_param_valid(struct tc_ns_client_context *client_context,
+ unsigned int index);
+int tc_client_call(const struct tc_call_params *call_params);
+bool is_tmp_mem(uint32_t param_type);
+bool is_ref_mem(uint32_t param_type);
+bool is_val_param(uint32_t param_type);
+
+#endif
diff --git a/tzdriver/core/mailbox_mempool.c b/tzdriver/core/mailbox_mempool.c
new file mode 100644
index 0000000000000000000000000000000000000000..c01c04aae7c671248023426ad8625d0092fd92af
--- /dev/null
+++ b/tzdriver/core/mailbox_mempool.c
@@ -0,0 +1,644 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: mailbox memory managing for sharing memory with TEE.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "mailbox_mempool.h"
+#include "shared_mem.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE)
+#include
+#endif
+#include "teek_client_constants.h"
+#include "tc_ns_log.h"
+#include "smc_smp.h"
+#include "ko_adapt.h"
+#include "internal_functions.h"
+
+#define MAILBOX_PAGE_MAX (MAILBOX_POOL_SIZE >> PAGE_SHIFT)
+static int g_max_oder;
+
+#define OPT_MODE 0660U
+#define STATE_MODE 0440U
+
+struct mb_page_t {
+ struct list_head node;
+ mailbox_page_t *page;
+ int order;
+ unsigned int count; /* whether be used */
+};
+
+struct mb_free_area_t {
+ struct list_head page_list;
+ int order;
+};
+
+struct mb_zone_t {
+ mailbox_page_t *all_pages;
+ struct mb_page_t pages[MAILBOX_PAGE_MAX];
+ struct mb_free_area_t free_areas[0];
+};
+
+static struct mb_zone_t *g_m_zone;
+static struct mutex g_mb_lock;
+
+static void mailbox_show_status(void)
+{
+ unsigned int i;
+ struct mb_page_t *pos = NULL;
+ struct list_head *head = NULL;
+ unsigned int used = 0;
+
+ if (!g_m_zone) {
+ tloge("zone struct is NULL\n");
+ return;
+ }
+
+ tloge("########################################\n");
+ mutex_lock(&g_mb_lock);
+ for (i = 0; i < MAILBOX_PAGE_MAX; i++) {
+ if (g_m_zone->pages[i].count != 0) {
+ tloge("page[%02d], order=%02d, count=%d\n", i, g_m_zone->pages[i].order, g_m_zone->pages[i].count);
+ used += (1 << (uint32_t)g_m_zone->pages[i].order);
+ }
+ }
+ tloge("total usage:%u/%u\n", used, MAILBOX_PAGE_MAX);
+ tloge("----------------------------------------\n");
+
+ for (i = 0; i < (unsigned int)g_max_oder; i++) {
+ head = &g_m_zone->free_areas[i].page_list;
+ if (list_empty(head) != 0) {
+ tloge("order[%02d] is empty\n", i);
+ } else {
+ list_for_each_entry(pos, head, node)
+ tloge("order[%02d]\n", i);
+ }
+ }
+ mutex_unlock(&g_mb_lock);
+
+ tloge("########################################\n");
+}
+
+#define MB_SHOW_LINE 64
+#define BITS_OF_BYTE 8
+static void mailbox_show_details(void)
+{
+ unsigned int i;
+ unsigned int used = 0;
+ unsigned int left = 0;
+ unsigned int order = 0;
+
+ if (!g_m_zone) {
+ tloge("zone struct is NULL\n");
+ return;
+ }
+ tloge("----- show mailbox details -----");
+ mutex_lock(&g_mb_lock);
+ for (i = 0; i < MAILBOX_PAGE_MAX; i++) {
+ if (i % MB_SHOW_LINE == 0) {
+ tloge("\n");
+ tloge("%04d-%04d:", i, i + MB_SHOW_LINE);
+ }
+ if (g_m_zone->pages[i].count != 0) {
+ left = 1 << (uint32_t)g_m_zone->pages[i].order;
+ order = (uint32_t)g_m_zone->pages[i].order;
+ used += (1 << (uint32_t)g_m_zone->pages[i].order);
+ }
+ if (left != 0) {
+ left--;
+ tloge("%01d", order);
+ } else {
+ tloge("X");
+ }
+ if (i > 1 && (i + 1) % (MB_SHOW_LINE / BITS_OF_BYTE) == 0)
+ tloge(" ");
+ }
+ tloge("total usage:%u/%u\n", used, MAILBOX_PAGE_MAX);
+ mutex_unlock(&g_mb_lock);
+}
+
+void *mailbox_alloc(size_t size, unsigned int flag)
+{
+ unsigned int i;
+ struct mb_page_t *pos = (struct mb_page_t *)NULL;
+ struct list_head *head = NULL;
+ int order = get_order(ALIGN(size, SZ_4K));
+ void *addr = NULL;
+
+ if ((size == 0) || !g_m_zone) {
+ tlogw("alloc 0 size mailbox or zone struct is NULL\n");
+ return NULL;
+ }
+
+ if (order > g_max_oder || order < 0) {
+ tloge("invalid order %d\n", order);
+ return NULL;
+ }
+ mutex_lock(&g_mb_lock);
+
+ for (i = (unsigned int)order; i <= (unsigned int)g_max_oder; i++) {
+ unsigned int j;
+ head = &g_m_zone->free_areas[i].page_list;
+ if (list_empty(head) != 0)
+ continue;
+ pos = list_first_entry(head, struct mb_page_t, node);
+ pos->count = 1;
+ pos->order = order;
+ /* split and add free list */
+ for (j = (unsigned int)order; j < i; j++) {
+ struct mb_page_t *new_page = NULL;
+ new_page = pos + (1 << j);
+ new_page->count = 0;
+ new_page->order = (int)j;
+ list_add_tail(&new_page->node, &g_m_zone->free_areas[j].page_list);
+ }
+ list_del(&pos->node);
+ addr = (void *)mailbox_page_address(pos->page);
+ break;
+ }
+
+ mutex_unlock(&g_mb_lock);
+ if (addr && ((flag & MB_FLAG_ZERO) != 0)) {
+ if (memset_s(addr, ALIGN(size, SZ_4K), 0, ALIGN(size, SZ_4K)) != 0) {
+ tloge("clean mailbox failed\n");
+ mailbox_free(addr);
+ return NULL;
+ }
+ }
+ return addr;
+}
+
+static void add_max_order_block(unsigned int idex)
+{
+ struct mb_page_t *self = NULL;
+
+ if (idex != (unsigned int)g_max_oder || !g_m_zone)
+ return;
+
+ /*
+ * when idex equal max order, no one use mailbox mem,
+ * we need to hang all pages in the last free area page list
+ */
+ self = &g_m_zone->pages[0];
+ list_add_tail(&self->node,
+ &g_m_zone->free_areas[g_max_oder].page_list);
+}
+
+static bool is_ptr_valid(const mailbox_page_t *page)
+{
+ if (!g_m_zone)
+ return false;
+
+ if (page < g_m_zone->all_pages ||
+ page >= (g_m_zone->all_pages + MAILBOX_PAGE_MAX)) {
+ tloge("invalid ptr to free in mailbox\n");
+ return false;
+ }
+ return true;
+}
+
+void mailbox_free(const void *ptr)
+{
+ unsigned int i;
+ mailbox_page_t *page = NULL;
+ struct mb_page_t *self = NULL;
+ struct mb_page_t *buddy = NULL;
+ unsigned int self_idx;
+ unsigned int buddy_idx;
+
+ if (!ptr || !g_m_zone) {
+ tloge("invalid ptr or zone struct is NULL\n");
+ return;
+ }
+
+ page = mailbox_virt_to_page((uint64_t)(uintptr_t)ptr);
+ if (!is_ptr_valid(page))
+ return;
+ mutex_lock(&g_mb_lock);
+ self_idx = page - g_m_zone->all_pages;
+ self = &g_m_zone->pages[self_idx];
+ if (self->count == 0) {
+ tloge("already freed in mailbox\n");
+ mutex_unlock(&g_mb_lock);
+ return;
+ }
+
+ for (i = (unsigned int)self->order; i <
+ (unsigned int)g_max_oder; i++) {
+ self_idx = page - g_m_zone->all_pages;
+ buddy_idx = self_idx ^ (uint32_t)(1 << i);
+ self = &g_m_zone->pages[self_idx];
+ buddy = &g_m_zone->pages[buddy_idx];
+ self->count = 0;
+ /* is buddy free */
+ if ((unsigned int)buddy->order == i && buddy->count == 0) {
+ /* release buddy */
+ list_del(&buddy->node);
+ /* combine self and buddy */
+ if (self_idx > buddy_idx) {
+ page = buddy->page;
+ buddy->order = (int)i + 1;
+ self->order = -1;
+ } else {
+ self->order = (int)i + 1;
+ buddy->order = -1;
+ }
+ } else {
+ /* release self */
+ list_add_tail(&self->node,
+ &g_m_zone->free_areas[i].page_list);
+ mutex_unlock(&g_mb_lock);
+ return;
+ }
+ }
+
+ add_max_order_block(i);
+ mutex_unlock(&g_mb_lock);
+}
+
+struct mb_cmd_pack *mailbox_alloc_cmd_pack(void)
+{
+ void *pack = mailbox_alloc(SZ_4K, MB_FLAG_ZERO);
+
+ if (!pack)
+ tloge("alloc mb cmd pack failed\n");
+
+ return (struct mb_cmd_pack *)pack;
+}
+
+void *mailbox_copy_alloc(const void *src, size_t size)
+{
+ void *mb_ptr = NULL;
+
+ if (!src || !size) {
+ tloge("invali src to alloc mailbox copy\n");
+ return NULL;
+ }
+
+ mb_ptr = mailbox_alloc(size, 0);
+ if (!mb_ptr) {
+ tloge("alloc size %zu mailbox failed\n", size);
+ return NULL;
+ }
+
+ if (memcpy_s(mb_ptr, size, src, size) != 0) {
+ tloge("memcpy to mailbox failed\n");
+ mailbox_free(mb_ptr);
+ return NULL;
+ }
+
+ return mb_ptr;
+}
+
+struct mb_dbg_entry {
+ struct list_head node;
+ unsigned int idx;
+ void *ptr;
+};
+
+static LIST_HEAD(mb_dbg_list);
+static DEFINE_MUTEX(mb_dbg_lock);
+static unsigned int g_mb_dbg_entry_count = 1;
+static unsigned int g_mb_dbg_last_res; /* only cache 1 opt result */
+static struct dentry *g_mb_dbg_dentry;
+
+static unsigned int mb_dbg_add_entry(void *ptr)
+{
+ struct mb_dbg_entry *new_entry = NULL;
+ unsigned int index = 0;
+
+ new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)new_entry)) {
+ tloge("alloc entry failed\n");
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&new_entry->node);
+ new_entry->ptr = ptr;
+ mutex_lock(&mb_dbg_lock);
+ new_entry->idx = g_mb_dbg_entry_count;
+
+ if ((g_mb_dbg_entry_count++) == 0)
+ g_mb_dbg_entry_count++;
+ list_add_tail(&new_entry->node, &mb_dbg_list);
+ index = new_entry->idx;
+ mutex_unlock(&mb_dbg_lock);
+
+ return index;
+}
+
+static void mb_dbg_remove_entry(unsigned int idx)
+{
+ struct mb_dbg_entry *pos = NULL;
+ struct mb_dbg_entry *temp = NULL;
+
+ mutex_lock(&mb_dbg_lock);
+ list_for_each_entry_safe(pos, temp, &mb_dbg_list, node) {
+ if (pos->idx == idx) {
+ mailbox_free(pos->ptr);
+ list_del(&pos->node);
+ kfree(pos);
+ mutex_unlock(&mb_dbg_lock);
+ return;
+ }
+ }
+ mutex_unlock(&mb_dbg_lock);
+
+ tloge("entry %u invalid\n", idx);
+}
+
+static void mb_dbg_reset(void)
+{
+ struct mb_dbg_entry *pos = NULL;
+ struct mb_dbg_entry *tmp = NULL;
+
+ mutex_lock(&mb_dbg_lock);
+ list_for_each_entry_safe(pos, tmp, &mb_dbg_list, node) {
+ mailbox_free(pos->ptr);
+ list_del(&pos->node);
+ kfree(pos);
+ }
+ g_mb_dbg_entry_count = 0;
+ mutex_unlock(&mb_dbg_lock);
+}
+
+#define MB_WRITE_SIZE 64
+
+static bool is_opt_write_param_valid(const struct file *filp,
+ const char __user *ubuf, size_t cnt, const loff_t *ppos)
+{
+ if (!filp || !ppos || !ubuf)
+ return false;
+
+ if (cnt >= MB_WRITE_SIZE || cnt == 0)
+ return false;
+
+ return true;
+}
+
+static void alloc_dbg_entry(unsigned int alloc_size)
+{
+ unsigned int idx;
+ void *ptr = NULL;
+
+ ptr = mailbox_alloc(alloc_size, 0);
+ if (!ptr) {
+ tloge("alloc order=%u in mailbox failed\n", alloc_size);
+ return;
+ }
+
+ idx = mb_dbg_add_entry(ptr);
+ if (idx == 0)
+ mailbox_free(ptr);
+ g_mb_dbg_last_res = idx;
+}
+
+static ssize_t mb_dbg_opt_write(struct file *filp,
+ const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[MB_WRITE_SIZE] = {0};
+ char *cmd = NULL;
+ char *value = NULL;
+ unsigned int alloc_size;
+ unsigned int free_idx;
+
+ if (!is_opt_write_param_valid(filp, ubuf, cnt, ppos))
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt) != 0)
+ return -EFAULT;
+
+ buf[cnt] = 0;
+ value = buf;
+ if (strncmp(value, "reset", strlen("reset")) == 0) {
+ tlogi("mb dbg reset\n");
+ mb_dbg_reset();
+ return (ssize_t)cnt;
+ }
+
+ cmd = strsep(&value, ":");
+ if (!cmd || !value) {
+ tloge("no valid cmd or value for mb dbg\n");
+ return -EFAULT;
+ }
+
+ if (strncmp(cmd, "alloc", strlen("alloc")) == 0) {
+ if (kstrtou32(value, 10, &alloc_size) == 0)
+ alloc_dbg_entry(alloc_size);
+ else
+ tloge("invalid value format for mb dbg\n");
+ } else if (strncmp(cmd, "free", strlen("free")) == 0) {
+ if (kstrtou32(value, 10, &free_idx) == 0)
+ mb_dbg_remove_entry(free_idx);
+ else
+ tloge("invalid value format for mb dbg\n");
+ } else {
+ tloge("invalid format for mb dbg\n");
+ }
+
+ return (ssize_t)cnt;
+}
+
+static ssize_t mb_dbg_opt_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[16] = {0};
+ ssize_t ret;
+
+ (void)(filp);
+
+ ret = snprintf_s(buf, sizeof(buf), 15, "%u\n", g_mb_dbg_last_res);
+ if (ret < 0) {
+ tloge("snprintf idx failed\n");
+ return -EINVAL;
+ }
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, ret);
+}
+
+static const struct file_operations g_mb_dbg_opt_fops = {
+ .owner = THIS_MODULE,
+ .read = mb_dbg_opt_read,
+ .write = mb_dbg_opt_write,
+};
+
+static ssize_t mb_dbg_state_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ (void)cnt;
+ (void)(filp);
+ (void)(ubuf);
+ (void)(ppos);
+ mailbox_show_status();
+ mailbox_show_details();
+ return 0;
+}
+
+static const struct file_operations g_mb_dbg_state_fops = {
+ .owner = THIS_MODULE,
+ .read = mb_dbg_state_read,
+};
+
+static int mailbox_register(const void *mb_pool, unsigned int size)
+{
+ struct tc_ns_operation *operation = NULL;
+ struct tc_ns_smc_cmd *smc_cmd = NULL;
+ int ret = 0;
+
+ smc_cmd = kzalloc(sizeof(*smc_cmd), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)smc_cmd)) {
+ tloge("alloc smc_cmd failed\n");
+ return -EIO;
+ }
+
+ operation = (struct tc_ns_operation *)(uintptr_t)get_operation_vaddr();
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)operation)) {
+ tloge("alloc operation failed\n");
+ ret = -EIO;
+ goto free_smc_cmd;
+ }
+
+ operation->paramtypes = TEE_PARAM_TYPE_VALUE_INPUT |
+ (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM);
+ operation->params[0].value.a = mailbox_virt_to_phys((uintptr_t)mb_pool);
+ operation->params[0].value.b =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)mb_pool) >> ADDR_TRANS_NUM;
+ operation->params[1].value.a = size;
+
+ smc_cmd->cmd_type = CMD_TYPE_GLOBAL;
+ smc_cmd->cmd_id = GLOBAL_CMD_ID_REGISTER_MAILBOX;
+ smc_cmd->operation_phys = mailbox_virt_to_phys((uintptr_t)operation);
+ smc_cmd->operation_h_phys =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)operation) >> ADDR_TRANS_NUM;
+
+ if (is_tee_rebooting())
+ ret = send_smc_cmd_rebooting(TSP_REQUEST, 0, 0, smc_cmd);
+ else
+ ret= tc_ns_smc(smc_cmd);
+
+ if (ret != 0) {
+ tloge("resigter mailbox failed\n");
+ ret = -EIO;
+ }
+
+ free_operation((uint64_t)(uintptr_t)operation);
+ operation = NULL;
+free_smc_cmd:
+ kfree(smc_cmd);
+ smc_cmd = NULL;
+ return ret;
+}
+
+static void mailbox_debug_init(void)
+{
+#ifdef DEF_ENG
+ g_mb_dbg_dentry = debugfs_create_dir("tz_mailbox", NULL);
+ debugfs_create_file("opt", OPT_MODE, g_mb_dbg_dentry, NULL, &g_mb_dbg_opt_fops);
+ debugfs_create_file("state", STATE_MODE, g_mb_dbg_dentry, NULL, &g_mb_dbg_state_fops);
+#endif
+}
+
+int re_register_mailbox(void)
+{
+ if (!g_m_zone)
+ return -EFAULT;
+
+ if (g_m_zone->all_pages != NULL) {
+ if (memset_s((void *)mailbox_page_address(g_m_zone->all_pages),
+ MAILBOX_POOL_SIZE, 0, MAILBOX_POOL_SIZE) != EOK) {
+ tloge("memset mailbox failed\n");
+ return -EFAULT;
+ }
+ if (mailbox_register((const void *) mailbox_page_address(g_m_zone->all_pages), MAILBOX_POOL_SIZE) != 0) {
+ tloge("register mailbox failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+int mailbox_mempool_init(void)
+{
+ int i;
+ struct mb_page_t *mb_page = NULL;
+ struct mb_free_area_t *area = NULL;
+ mailbox_page_t *all_pages = NULL;
+ size_t zone_len;
+
+ g_max_oder = get_order(MAILBOX_POOL_SIZE);
+ tlogi("in this RE, mailbox max order is: %d\n", g_max_oder);
+
+ /* zone len is fixed, will not overflow */
+ zone_len = sizeof(*area) * (g_max_oder + 1) + sizeof(*g_m_zone);
+ g_m_zone = kzalloc(zone_len, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_m_zone)) {
+ tloge("fail to alloc zone struct\n");
+ return -ENOMEM;
+ }
+ all_pages = mailbox_alloc_pages(g_max_oder);
+ if (!all_pages) {
+ tloge("fail to alloc mailbox mempool\n");
+ kfree(g_m_zone);
+ g_m_zone = NULL;
+ return -ENOMEM;
+ }
+ if (mailbox_register((const void *) mailbox_page_address(all_pages), MAILBOX_POOL_SIZE) != 0) {
+ tloge("register mailbox failed\n");
+ mailbox_free_pages(all_pages, g_max_oder);
+ kfree(g_m_zone);
+ g_m_zone = NULL;
+ return -EIO;
+ }
+ for (i = 0; i < MAILBOX_PAGE_MAX; i++) {
+ g_m_zone->pages[i].order = -1;
+ g_m_zone->pages[i].count = 0;
+ g_m_zone->pages[i].page = &all_pages[i];
+ }
+
+ g_m_zone->pages[0].order = g_max_oder;
+ for (i = 0; i <= g_max_oder; i++) {
+ area = &g_m_zone->free_areas[i];
+ INIT_LIST_HEAD(&area->page_list);
+ area->order = i;
+ }
+
+ mb_page = &g_m_zone->pages[0];
+ list_add_tail(&mb_page->node, &area->page_list);
+ g_m_zone->all_pages = all_pages;
+ mutex_init(&g_mb_lock);
+ mailbox_debug_init();
+
+ return 0;
+}
+
+void free_mailbox_mempool(void)
+{
+ mailbox_free_pages(g_m_zone->all_pages, g_max_oder);
+ g_m_zone->all_pages = NULL;
+ kfree(g_m_zone);
+ g_m_zone = NULL;
+
+ if (!g_mb_dbg_dentry)
+ return;
+ debugfs_remove_recursive(g_mb_dbg_dentry);
+ g_mb_dbg_dentry = NULL;
+}
diff --git a/tzdriver/core/mailbox_mempool.h b/tzdriver/core/mailbox_mempool.h
new file mode 100644
index 0000000000000000000000000000000000000000..30df77d553c40e9cc726ebd333ae43219dc0d5ee
--- /dev/null
+++ b/tzdriver/core/mailbox_mempool.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: mailbox memory managing for sharing memory with TEE.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MAILBOX_MEMPOOOL_H
+#define MAILBOX_MEMPOOOL_H
+
+#include
+#include
+#include "teek_ns_client.h"
+
+#ifndef MAILBOX_POOL_SIZE
+#define MAILBOX_POOL_SIZE SZ_4M
+#endif
+
+/* alloc options */
+#define MB_FLAG_ZERO 0x1 /* set 0 after alloc page */
+#define GLOBAL_UUID_LEN 17 /* first char represent global cmd */
+
+void *mailbox_alloc(size_t size, unsigned int flag);
+void mailbox_free(const void *ptr);
+int mailbox_mempool_init(void);
+void free_mailbox_mempool(void);
+struct mb_cmd_pack *mailbox_alloc_cmd_pack(void);
+void *mailbox_copy_alloc(const void *src, size_t size);
+int re_register_mailbox(void);
+uintptr_t mailbox_virt_to_phys(uintptr_t addr);
+
+#endif
diff --git a/tzdriver/core/mem.c b/tzdriver/core/mem.c
new file mode 100644
index 0000000000000000000000000000000000000000..75f49977f9a1e978fee2d1302f2e379adb5070fd
--- /dev/null
+++ b/tzdriver/core/mem.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: memory operation for gp sharedmem.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "mem.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "smc_smp.h"
+#include "tc_ns_client.h"
+#include "teek_ns_client.h"
+#include "agent.h"
+#include "tc_ns_log.h"
+#include "mailbox_mempool.h"
+#include "internal_functions.h"
+#include "reserved_mempool.h"
+
+void tc_mem_free(struct tc_ns_shared_mem *shared_mem)
+{
+ if (!shared_mem)
+ return;
+ if (shared_mem->mem_type == RESERVED_TYPE) {
+ reserved_mem_free(shared_mem->kernel_addr);
+ kfree(shared_mem);
+ return;
+ }
+
+ if (shared_mem->kernel_addr) {
+#ifndef CONFIG_LIBLINUX
+ vfree(shared_mem->kernel_addr);
+#else
+ kfree(shared_mem->kernel_addr);
+#endif
+ shared_mem->kernel_addr = NULL;
+ }
+ kfree(shared_mem);
+}
+
+static void init_shared_mem(struct tc_ns_shared_mem *sh, void *addr, size_t len)
+{
+ sh->kernel_addr = addr;
+ sh->len = (uint32_t)len;
+ sh->user_addr = INVALID_MAP_ADDR;
+ sh->user_addr_ca = INVALID_MAP_ADDR;
+ atomic_set(&sh->usage, 0);
+}
+struct tc_ns_shared_mem *tc_mem_allocate(size_t len)
+{
+ struct tc_ns_shared_mem *shared_mem = NULL;
+ void *addr = NULL;
+
+ shared_mem = kmalloc(sizeof(*shared_mem), GFP_KERNEL | __GFP_ZERO);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)shared_mem)) {
+ tloge("shared_mem kmalloc failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ shared_mem->mem_type = VMALLOC_TYPE;
+ len = ALIGN(len, SZ_4K);
+ if (exist_res_mem()) {
+ if (len > get_res_mem_slice_size()) {
+ tloge("allocate reserved mem size too large\n");
+ kfree(shared_mem);
+ return ERR_PTR(-EINVAL);
+ }
+ addr = reserved_mem_alloc(len);
+ if (addr) {
+ shared_mem->mem_type = RESERVED_TYPE;
+ init_shared_mem(shared_mem, addr, len);
+ return shared_mem;
+ } else {
+ tlogw("no more reserved memory to alloc so we use system vmalloc.\n");
+ }
+ }
+ if (len > MAILBOX_POOL_SIZE) {
+ tloge("alloc sharemem size %zu is too large\n", len);
+ kfree(shared_mem);
+ return ERR_PTR(-EINVAL);
+ }
+#ifndef CONFIG_LIBLINUX
+ addr = vmalloc_user(len);
+#else
+ addr = kzalloc(len, GFP_KERNEL);
+#endif
+ if (!addr) {
+ tloge("alloc mailbox failed\n");
+ kfree(shared_mem);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init_shared_mem(shared_mem, addr, len);
+ return shared_mem;
+}
diff --git a/tzdriver/core/mem.h b/tzdriver/core/mem.h
new file mode 100644
index 0000000000000000000000000000000000000000..81a68b0f1bd176068a9715d59047b88325a73fbf
--- /dev/null
+++ b/tzdriver/core/mem.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: memory operation for gp sharedmem.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MEM_H
+#define MEM_H
+#include
+#include "teek_ns_client.h"
+
+#define PRE_ALLOCATE_SIZE (1024*1024)
+#define MEM_POOL_ELEMENT_SIZE (64*1024)
+#define MEM_POOL_ELEMENT_NR (8)
+#define MEM_POOL_ELEMENT_ORDER (4)
+
+struct tc_ns_shared_mem *tc_mem_allocate(size_t len);
+void tc_mem_free(struct tc_ns_shared_mem *shared_mem);
+
+static inline void get_sharemem_struct(struct tc_ns_shared_mem *sharemem)
+{
+ if (sharemem != NULL)
+ atomic_inc(&sharemem->usage);
+}
+
+static inline void put_sharemem_struct(struct tc_ns_shared_mem *sharemem)
+{
+ if (sharemem != NULL) {
+ if (atomic_dec_and_test(&sharemem->usage))
+ tc_mem_free(sharemem);
+ }
+}
+
+#endif
diff --git a/tzdriver/core/reserved_mempool.c b/tzdriver/core/reserved_mempool.c
new file mode 100644
index 0000000000000000000000000000000000000000..2b9f74ca0cc5a3bb4231d13f32429b082d107a93
--- /dev/null
+++ b/tzdriver/core/reserved_mempool.c
@@ -0,0 +1,523 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: memory managering for reserved memory with TEE.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "reserved_mempool.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "teek_client_constants.h"
+#include "tc_ns_log.h"
+#include "smc_smp.h"
+
+#define STATE_MODE 0440U
+#define SLICE_RATE 4
+#define MAX_SLICE 0x400000
+#define MIN_RES_MEM_SIZE 0x400000
+
+struct virt_page {
+ unsigned long start;
+};
+
+struct reserved_page_t {
+ struct list_head node;
+ struct virt_page *page;
+ int order;
+ unsigned int count; /* whether be used */
+};
+
+struct reserved_free_area_t {
+ struct list_head page_list;
+ int order;
+};
+
+struct reserved_zone_t {
+ struct virt_page *all_pages;
+ struct reserved_page_t *pages;
+ struct reserved_free_area_t free_areas[0];
+};
+
+static struct reserved_zone_t *g_res_zone;
+static struct mutex g_res_lock;
+static int g_res_max_order;
+static unsigned long g_start_vaddr = 0;
+static unsigned long g_start_paddr;
+static struct dentry *g_res_mem_dbg_dentry;
+static unsigned int g_res_mem_size = 0;
+
+static unsigned int get_res_page_size(void)
+{
+ return g_res_mem_size >> PAGE_SHIFT;
+}
+
+static unsigned int calc_res_mem_size(unsigned int rsize)
+{
+ unsigned int size = rsize;
+ unsigned int idx = 0;
+
+ if (size == 0 || (size & (size - 1)) == 0)
+ return size;
+
+ while (size != 0) {
+ size = size >> 1;
+ idx++;
+ }
+ return (1 << (idx - 1));
+}
+
+unsigned int get_res_mem_slice_size(void)
+{
+ unsigned int size = (g_res_mem_size >> SLICE_RATE);
+ return (size > MAX_SLICE) ? MAX_SLICE : size;
+}
+
+bool exist_res_mem(void)
+{
+ return (g_start_vaddr != 0) && (g_res_mem_size != 0);
+}
+
+unsigned long res_mem_virt_to_phys(unsigned long vaddr)
+{
+ return vaddr - g_start_vaddr + g_start_paddr;
+}
+
+int load_reserved_mem(void)
+{
+ struct device_node *np = NULL;
+ struct resource r;
+ unsigned int res_size;
+ int rc;
+ void *p = NULL;
+
+ np = of_find_compatible_node(NULL, NULL, "tz_reserved");
+ if (np == NULL) {
+ tlogd("can not find reserved memory.\n");
+ return 0;
+ }
+
+ rc = of_address_to_resource(np, 0, &r);
+ if (rc != 0) {
+ tloge("of_address_to_resource error\n");
+ return -ENODEV;
+ }
+
+ res_size = (unsigned int)resource_size(&r);
+ if (res_size < MIN_RES_MEM_SIZE) {
+ tloge("reserved memory size is too small\n");
+ return -EINVAL;
+ }
+
+ p = ioremap(r.start, res_size);
+ if (p == NULL) {
+ tloge("io remap for reserved memory failed\n");
+ return -ENOMEM;
+ }
+ g_start_vaddr = (unsigned long)(uintptr_t)p;
+ g_start_paddr = (unsigned long)r.start;
+ g_res_mem_size = calc_res_mem_size(res_size);
+
+ return 0;
+}
+
+void unmap_res_mem(void)
+{
+ if (exist_res_mem()) {
+ iounmap((void __iomem *)g_start_vaddr);
+ g_start_vaddr = 0;
+ g_res_mem_size = 0;
+ }
+}
+
+static int create_zone(void)
+{
+ size_t zone_len;
+ g_res_max_order = get_order(g_res_mem_size);
+ zone_len = sizeof(struct reserved_free_area_t) * (g_res_max_order + 1) + sizeof(*g_res_zone);
+
+ g_res_zone = kzalloc(zone_len, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_res_zone)) {
+ tloge("fail to create zone\n");
+ return -ENOMEM;
+ }
+
+ g_res_zone->pages = kzalloc(sizeof(struct reserved_page_t) * get_res_page_size(), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_res_zone->pages)) {
+ tloge("failed to alloc zone pages\n");
+ kfree(g_res_zone);
+ g_res_zone = NULL;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static struct virt_page *create_virt_pages(void)
+{
+ unsigned int i = 0;
+ struct virt_page *pages = NULL;
+
+ pages = kzalloc(get_res_page_size() * sizeof(struct virt_page), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)pages)) {
+ tloge("alloc pages failed\n");
+ return NULL;
+ }
+
+ for (i = 0; i < get_res_page_size(); i++)
+ pages[i].start = g_start_vaddr + i * PAGE_SIZE;
+ return pages;
+}
+
+void free_reserved_mempool(void)
+{
+ if (!exist_res_mem())
+ return;
+
+ if (g_res_zone->all_pages != NULL) {
+ kfree(g_res_zone->all_pages);
+ g_res_zone->all_pages = NULL;
+ }
+
+ if (g_res_zone->pages != NULL) {
+ kfree(g_res_zone->pages);
+ g_res_zone->pages = NULL;
+ }
+
+ if (g_res_zone != NULL) {
+ kfree(g_res_zone);
+ g_res_zone = NULL;
+ }
+
+ if (!g_res_mem_dbg_dentry)
+ return;
+ debugfs_remove_recursive(g_res_mem_dbg_dentry);
+ g_res_mem_dbg_dentry = NULL;
+}
+
+static void show_res_mem_info(void)
+{
+ unsigned int i;
+ struct reserved_page_t *pos = NULL;
+ struct list_head *head = NULL;
+ unsigned int used = 0;
+
+ if (g_res_zone == NULL) {
+ tloge("res zone is NULL\n");
+ return;
+ }
+
+ tloge("################## reserved memory info ######################\n");
+ mutex_lock(&g_res_lock);
+ for (i = 0; i < get_res_page_size(); i++) {
+ if (g_res_zone->pages[i].count != 0) {
+ tloge("page[%02d], order=%02d, count=%d\n",
+ i, g_res_zone->pages[i].order,
+ g_res_zone->pages[i].count);
+ used += (1 << (uint32_t)g_res_zone->pages[i].order);
+ }
+ }
+ tloge("reserved memory total usage:%u/%u\n", used, get_res_page_size());
+ tloge("--------------------------------------------------------------\n");
+
+ for (i = 0; i < (unsigned int)g_res_max_order; i++) {
+ head = &g_res_zone->free_areas[i].page_list;
+ if (list_empty(head) != 0) {
+ tloge("order[%02d] is empty\n", i);
+ } else {
+ list_for_each_entry(pos, head, node)
+ tloge("order[%02d]\n", i);
+ }
+ }
+ mutex_unlock(&g_res_lock);
+
+ tloge("#############################################################\n");
+}
+
+static ssize_t mb_res_mem_state_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ (void)(filp);
+ (void)(ubuf);
+ (void)cnt;
+ (void)(ppos);
+ show_res_mem_info();
+ return 0;
+}
+
+static const struct file_operations g_res_mem_dbg_state_fops = {
+ .owner = THIS_MODULE,
+ .read = mb_res_mem_state_read,
+};
+
+static void init_res_mem_dentry(void)
+{
+#ifdef DEF_ENG
+ g_res_mem_dbg_dentry = debugfs_create_dir("tz_res_mem", NULL);
+ debugfs_create_file("state", STATE_MODE, g_res_mem_dbg_dentry, NULL, &g_res_mem_dbg_state_fops);
+#endif
+}
+
+static int res_mem_register(unsigned long paddr, unsigned int size)
+{
+ struct tc_ns_operation *operation = NULL;
+ struct tc_ns_smc_cmd *smc_cmd = NULL;
+ int ret = 0;
+
+ smc_cmd = kzalloc(sizeof(*smc_cmd), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)smc_cmd)) {
+ tloge("alloc smc_cmd failed\n");
+ return -ENOMEM;
+ }
+
+ operation = kzalloc(sizeof(*operation), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)operation)) {
+ tloge("alloc operation failed\n");
+ ret = -ENOMEM;
+ goto free_smc_cmd;
+ }
+
+ operation->paramtypes = TEE_PARAM_TYPE_VALUE_INPUT |
+ (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM);
+ operation->params[0].value.a = paddr;
+ operation->params[0].value.b = paddr >> ADDR_TRANS_NUM;
+ operation->params[1].value.a = size;
+
+ smc_cmd->cmd_type = CMD_TYPE_GLOBAL;
+ smc_cmd->cmd_id = GLOBAL_CMD_ID_REGISTER_RESMEM;
+ smc_cmd->operation_phys = virt_to_phys(operation);
+ smc_cmd->operation_h_phys = virt_to_phys(operation) >> ADDR_TRANS_NUM;
+
+ if (tc_ns_smc(smc_cmd) != 0) {
+ tloge("resigter res mem failed\n");
+ ret = -EIO;
+ }
+
+ kfree(operation);
+ operation = NULL;
+free_smc_cmd:
+ kfree(smc_cmd);
+ smc_cmd = NULL;
+ return ret;
+}
+
+static void zone_init(struct virt_page *all_pages)
+{
+ int i;
+ struct reserved_free_area_t *area = NULL;
+ int max_order_cnt;
+ struct reserved_page_t *res_page = NULL;
+
+ for (i = 0; i < (int)get_res_page_size(); i++) {
+ g_res_zone->pages[i].order = -1;
+ g_res_zone->pages[i].count = 0;
+ g_res_zone->pages[i].page = &all_pages[i];
+ }
+
+ for (i = 0; i <= g_res_max_order; i++) {
+ area = &g_res_zone->free_areas[i];
+ INIT_LIST_HEAD(&area->page_list);
+ area->order = i;
+ }
+
+ max_order_cnt = (int)(get_res_page_size() / (1 << (unsigned int)g_res_max_order));
+ g_res_zone->all_pages = all_pages;
+ for (i = 0; i < max_order_cnt; i++) {
+ int idx = i * (1 << (unsigned int)g_res_max_order);
+ g_res_zone->pages[idx].order = g_res_max_order;
+ res_page = &g_res_zone->pages[idx];
+ list_add_tail(&res_page->node, &area->page_list);
+ }
+}
+
+int reserved_mempool_init(void)
+{
+ struct virt_page *all_pages = NULL;
+ int ret = 0;
+ unsigned long paddr;
+
+ if (!exist_res_mem())
+ return 0;
+
+ ret = create_zone();
+ if (ret != 0)
+ return ret;
+
+ all_pages = create_virt_pages();
+ if (all_pages == NULL) {
+ kfree(g_res_zone->pages);
+ g_res_zone->pages = NULL;
+ kfree(g_res_zone);
+ g_res_zone = NULL;
+ return -ENOMEM;
+ }
+
+ paddr = g_start_paddr;
+ ret = res_mem_register(paddr, g_res_mem_size);
+ if (ret != 0) {
+ kfree(all_pages);
+ all_pages = NULL;
+ kfree(g_res_zone->pages);
+ g_res_zone->pages = NULL;
+ kfree(g_res_zone);
+ g_res_zone = NULL;
+ return -EIO;
+ }
+
+ zone_init(all_pages);
+
+ mutex_init(&g_res_lock);
+ init_res_mem_dentry();
+ return 0;
+}
+
+void *reserved_mem_alloc(size_t size)
+{
+ int i, j;
+ struct reserved_page_t *pos = NULL;
+ struct list_head *head = NULL;
+ int order = get_order(ALIGN(size, SZ_4K));
+ unsigned long addr = 0;
+
+ bool valid_param = (size > 0 && order <= g_res_max_order && order >= 0);
+ if (!valid_param) {
+ tloge("invalid alloc param, size %d, order %d, max %d\n",(int)size, order, g_res_max_order);
+ return NULL;
+ }
+ mutex_lock(&g_res_lock);
+ for (i = order; i <= g_res_max_order; i++) {
+ head = &g_res_zone->free_areas[i].page_list;
+ if (list_empty(head) != 0)
+ continue;
+
+ pos = list_first_entry(head, struct reserved_page_t, node);
+ pos->count = 1;
+ pos->order = order;
+
+ for (j = order; j < i; j++) {
+ struct reserved_page_t *new_page = NULL;
+ new_page = pos + (1 << (unsigned int)j);
+ new_page->count = 0;
+ new_page->order = j;
+ list_add_tail(&new_page->node, &g_res_zone->free_areas[j].page_list);
+ }
+ list_del(&pos->node);
+ addr = pos->page->start;
+ break;
+ }
+ mutex_unlock(&g_res_lock);
+ return (void *)(uintptr_t)addr;
+}
+
+static int get_virt_page_index(const void *ptr)
+{
+ unsigned long vaddr = (unsigned long)(uintptr_t)ptr;
+ unsigned long offset = vaddr - g_start_vaddr;
+ int pg_idx = offset / (1 << PAGE_SHIFT);
+ if ((unsigned int)pg_idx >= get_res_page_size() || pg_idx < 0)
+ return -1;
+ return pg_idx;
+}
+
+static int buddy_merge(struct virt_page *vpage, int order, unsigned int *page_index)
+{
+ int i;
+ unsigned int cur_idx;
+ unsigned int buddy_idx;
+ struct reserved_page_t *self = NULL;
+ struct reserved_page_t *buddy = NULL;
+
+ for (i = order; i < g_res_max_order; i++) {
+ cur_idx = vpage - g_res_zone->all_pages;
+ buddy_idx = cur_idx ^ (1 << (unsigned int)i);
+ self = &g_res_zone->pages[cur_idx];
+ buddy = &g_res_zone->pages[buddy_idx];
+ self->count = 0;
+ /* is buddy free */
+ if (buddy->order == i && buddy->count == 0) {
+ /* release buddy */
+ list_del(&buddy->node);
+ /* combine self and buddy */
+ if (cur_idx > buddy_idx) {
+ vpage = buddy->page;
+ buddy->order = i + 1;
+ self->order = -1;
+ } else {
+ self->order = i + 1;
+ buddy->order = -1;
+ }
+ } else {
+ /* release self */
+ list_add_tail(&self->node,
+ &g_res_zone->free_areas[i].page_list);
+ return -1;
+ }
+ }
+
+ if (order == g_res_max_order) {
+ cur_idx = vpage - g_res_zone->all_pages;
+ tlogd("no need to find buddy, cur is %u\n", cur_idx);
+ *page_index = cur_idx;
+ return 0;
+ }
+ *page_index = (cur_idx > buddy_idx) ? buddy_idx : cur_idx;
+ return 0;
+}
+
+void reserved_mem_free(const void *ptr)
+{
+ struct reserved_page_t *self = NULL;
+ int self_idx;
+ unsigned int page_index;
+ struct reserved_page_t *max_order_page = NULL;
+
+ if (ptr == NULL) {
+ tloge("invalid ptr\n");
+ return;
+ }
+
+ mutex_lock(&g_res_lock);
+ self_idx = get_virt_page_index(ptr);
+ if (self_idx < 0) {
+ mutex_unlock(&g_res_lock);
+ tloge("invalid page\n");
+ return;
+ }
+ self = &g_res_zone->pages[self_idx];
+ if (self->count == 0) {
+ tloge("already free in reserved mempool\n");
+ mutex_unlock(&g_res_lock);
+ return;
+ }
+
+ if (buddy_merge(self->page, self->order, &page_index) < 0) {
+ mutex_unlock(&g_res_lock);
+ return;
+ }
+
+ max_order_page = &g_res_zone->pages[page_index];
+ list_add_tail(&max_order_page->node,
+ &g_res_zone->free_areas[g_res_max_order].page_list);
+ mutex_unlock(&g_res_lock);
+}
diff --git a/tzdriver/core/reserved_mempool.h b/tzdriver/core/reserved_mempool.h
new file mode 100644
index 0000000000000000000000000000000000000000..1728bb0a482f9e62507d19fb558afb5ba9515472
--- /dev/null
+++ b/tzdriver/core/reserved_mempool.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: reserved memory managing for sharing memory with TEE.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef RESERVED_MEMPOOOL_H
+#define RESERVED_MEMPOOOL_H
+
+#include
+#include
+
+int load_reserved_mem(void);
+void unmap_res_mem(void);
+void *reserved_mem_alloc(size_t size);
+void free_reserved_mempool(void);
+int reserved_mempool_init(void);
+void reserved_mem_free(const void *ptr);
+bool exist_res_mem(void);
+unsigned long res_mem_virt_to_phys(unsigned long vaddr);
+unsigned int get_res_mem_slice_size(void);
+#endif
diff --git a/tzdriver/core/secs_power_ctrl.h b/tzdriver/core/secs_power_ctrl.h
new file mode 100644
index 0000000000000000000000000000000000000000..62b8a0f2168256397c666b2aa2dbfd582ca3970f
--- /dev/null
+++ b/tzdriver/core/secs_power_ctrl.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: function declaration for secs power ctrl.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef SECS_POWER_CTRL_H
+#define SECS_POWER_CTRL_H
+
+#include
+
+#ifdef CONFIG_HISI_SECS_CTRL
+#include
+
+#define SECS_SUSPEND_STATUS 0xA5A5
+unsigned long get_secs_suspend_status(void);
+
+static int power_on_cc(void)
+{
+ return hisi_secs_power_on();
+}
+
+static int power_down_cc(void)
+{
+ return hisi_secs_power_down();
+}
+
+static void secs_suspend_status(uint64_t target)
+{
+ if (get_secs_suspend_status() == SECS_SUSPEND_STATUS)
+ tloge("WARNING irq during suspend! No = %lld\n", target);
+}
+#else
+
+static int power_on_cc(void)
+{
+ return 0;
+}
+
+static int power_down_cc(void)
+{
+ return 0;
+}
+
+static void secs_suspend_status(uint64_t target)
+{
+ (void)target;
+ return;
+}
+#endif
+
+#endif
diff --git a/tzdriver/core/session_manager.c b/tzdriver/core/session_manager.c
new file mode 100644
index 0000000000000000000000000000000000000000..3063932a566cb5d9ac284f85a46027ad6356d1e2
--- /dev/null
+++ b/tzdriver/core/session_manager.c
@@ -0,0 +1,1459 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: function for session management.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "session_manager.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE)
+#include
+#include
+#include
+#endif
+#include
+#include
+#include "smc_smp.h"
+#include "mem.h"
+#include "gp_ops.h"
+#include "tc_ns_log.h"
+#include "teek_client_constants.h"
+#include "client_hash_auth.h"
+#include "mailbox_mempool.h"
+#include "tc_client_driver.h"
+#include "internal_functions.h"
+#include "dynamic_ion_mem.h"
+#include "ko_adapt.h"
+
+#ifdef CONFIG_CRL_PATH
+#include "tz_update_crl.h"
+uint8_t g_update_crl_flag = 0;
+#endif
+
+static DEFINE_MUTEX(g_load_app_lock);
+#define MAX_REF_COUNT (255)
+
+/* record all service node and need mutex to avoid race */
+struct list_head g_service_list;
+DEFINE_MUTEX(g_service_list_lock);
+
+void init_srvc_list(void)
+{
+ INIT_LIST_HEAD(&g_service_list);
+}
+
+void get_session_struct(struct tc_ns_session *session)
+{
+ if (!session)
+ return;
+
+ atomic_inc(&session->usage);
+}
+
+void put_session_struct(struct tc_ns_session *session)
+{
+ if (!session || !atomic_dec_and_test(&session->usage))
+ return;
+
+ if (memset_s(session, sizeof(*session), 0, sizeof(*session)) != 0)
+ tloge("Caution, memset failed!\n");
+ kfree(session);
+}
+
+void get_service_struct(struct tc_ns_service *service)
+{
+ if (!service)
+ return;
+
+ atomic_inc(&service->usage);
+ tlogd("service->usage = %d\n", atomic_read(&service->usage));
+}
+
+void put_service_struct(struct tc_ns_service *service)
+{
+ if (!service)
+ return;
+
+ tlogd("service->usage = %d\n", atomic_read(&service->usage));
+ mutex_lock(&g_service_list_lock);
+ if (atomic_dec_and_test(&service->usage)) {
+ tlogd("del service [0x%x] from service list\n",
+ *(uint32_t *)service->uuid);
+ list_del(&service->head);
+ kfree(service);
+ }
+ mutex_unlock(&g_service_list_lock);
+}
+
+static int add_service_to_dev(struct tc_ns_dev_file *dev,
+ struct tc_ns_service *service)
+{
+ uint32_t i;
+
+ if (!dev || !service)
+ return -EINVAL;
+
+ for (i = 0; i < SERVICES_MAX_COUNT; i++) {
+ if (!dev->services[i]) {
+ tlogd("add service %u to %u\n", i, dev->dev_file_id);
+ dev->services[i] = service;
+ dev->service_ref[i] = 1;
+ return 0;
+ }
+ }
+ return -EFAULT;
+}
+
+static void tz_srv_sess_dump(const char *param)
+{
+ struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
+
+ (void)param;
+ smc_cmd.cmd_id = GLOBAL_CMD_ID_DUMP_SRV_SESS;
+ smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
+
+ livepatch_down_read_sem();
+ if (tc_ns_smc(&smc_cmd))
+ tloge("send dump service session failed\n");
+ livepatch_up_read_sem();
+}
+
+void dump_services_status(const char *param)
+{
+ struct tc_ns_service *service = NULL;
+
+ (void)param;
+ mutex_lock(&g_service_list_lock);
+ tlogi("show service list:\n");
+ list_for_each_entry(service, &g_service_list, head) {
+ tlogi("uuid-%x, usage=%d\n", *(uint32_t *)service->uuid,
+ atomic_read(&service->usage));
+ }
+ mutex_unlock(&g_service_list_lock);
+
+ tz_srv_sess_dump(param);
+}
+
+static void del_service_from_dev(struct tc_ns_dev_file *dev,
+ struct tc_ns_service *service)
+{
+ uint32_t i;
+
+ for (i = 0; i < SERVICES_MAX_COUNT; i++) {
+ if (dev->services[i] == service) {
+ tlogd("dev service ref-%u = %u\n", i,
+ dev->service_ref[i]);
+ if (dev->service_ref[i] == 0) {
+ tloge("Caution! No service to be deleted!\n");
+ break;
+ }
+ dev->service_ref[i]--;
+ if (dev->service_ref[i] == 0) {
+ tlogd("del service %u from %u\n",
+ i, dev->dev_file_id);
+ dev->services[i] = NULL;
+ put_service_struct(service);
+ }
+ break;
+ }
+ }
+}
+
+struct tc_ns_session *tc_find_session_withowner(
+ const struct list_head *session_list,
+ unsigned int session_id, const struct tc_ns_dev_file *dev_file)
+{
+ struct tc_ns_session *session = NULL;
+
+ if (!session_list || !dev_file) {
+ tloge("session list or dev is null\n");
+ return NULL;
+ }
+
+ list_for_each_entry(session, session_list, head) {
+ if (session->session_id == session_id &&
+ session->owner == dev_file)
+ return session;
+ }
+ return NULL;
+}
+
+struct tc_ns_service *tc_find_service_in_dev(const struct tc_ns_dev_file *dev,
+ const unsigned char *uuid, int uuid_size)
+{
+ uint32_t i;
+
+ if (!dev || !uuid || uuid_size != UUID_LEN)
+ return NULL;
+
+ for (i = 0; i < SERVICES_MAX_COUNT; i++) {
+ if (dev->services[i] != NULL &&
+ memcmp(dev->services[i]->uuid, uuid, UUID_LEN) == 0)
+ return dev->services[i];
+ }
+ return NULL;
+}
+
+struct tc_ns_session *tc_find_session_by_uuid(unsigned int dev_file_id,
+ const struct tc_ns_smc_cmd *cmd)
+{
+ struct tc_ns_dev_file *dev_file = NULL;
+ struct tc_ns_service *service = NULL;
+ struct tc_ns_session *session = NULL;
+
+ if (!cmd) {
+ tloge("parameter is null pointer!\n");
+ return NULL;
+ }
+
+ dev_file = tc_find_dev_file(dev_file_id);
+ if (!dev_file) {
+ tloge("can't find dev file!\n");
+ return NULL;
+ }
+
+ mutex_lock(&dev_file->service_lock);
+ service = tc_find_service_in_dev(dev_file, cmd->uuid, UUID_LEN);
+ get_service_struct(service);
+ mutex_unlock(&dev_file->service_lock);
+ if (!service) {
+ tloge("can't find service!\n");
+ return NULL;
+ }
+
+ mutex_lock(&service->session_lock);
+ session = tc_find_session_withowner(&service->session_list,
+ cmd->context_id, dev_file);
+ get_session_struct(session);
+ mutex_unlock(&service->session_lock);
+ put_service_struct(service);
+ if (!session) {
+ tloge("can't find session-0x%x!\n", cmd->context_id);
+ return NULL;
+ }
+ return session;
+}
+
+static int tc_ns_need_load_image(unsigned int file_id,
+ const unsigned char *uuid, unsigned int uuid_len)
+{
+ int ret;
+ int smc_ret;
+ struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
+ struct mb_cmd_pack *mb_pack = NULL;
+ char *mb_param = NULL;
+
+ if (!uuid || uuid_len != UUID_LEN) {
+ tloge("invalid uuid\n");
+ return -ENOMEM;
+ }
+ mb_pack = mailbox_alloc_cmd_pack();
+ if (!mb_pack) {
+ tloge("alloc mb pack failed\n");
+ return -ENOMEM;
+ }
+ mb_param = mailbox_copy_alloc(uuid, uuid_len);
+ if (!mb_param) {
+ tloge("alloc mb param failed\n");
+ ret = -ENOMEM;
+ goto clean;
+ }
+ mb_pack->operation.paramtypes = TEEC_MEMREF_TEMP_INOUT;
+ mb_pack->operation.params[0].memref.buffer =
+ mailbox_virt_to_phys((uintptr_t)mb_param);
+ mb_pack->operation.buffer_h_addr[0] =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)mb_param) >> ADDR_TRANS_NUM;
+ mb_pack->operation.params[0].memref.size = SZ_4K;
+ smc_cmd.cmd_id = GLOBAL_CMD_ID_NEED_LOAD_APP;
+ smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
+ smc_cmd.dev_file_id = file_id;
+ smc_cmd.context_id = 0;
+ smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
+ smc_cmd.operation_h_phys =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
+
+ smc_ret = tc_ns_smc(&smc_cmd);
+ if (smc_ret != 0) {
+ tloge("smc call returns error ret 0x%x\n", smc_ret);
+ ret = -EFAULT;
+ goto clean;
+ } else {
+ ret = *(int *)mb_param;
+ }
+clean:
+ if (mb_param)
+ mailbox_free(mb_param);
+ mailbox_free(mb_pack);
+
+ return ret;
+}
+
+int tc_ns_load_secfile(struct tc_ns_dev_file *dev_file,
+ void __user *argp, bool is_from_client_node)
+{
+ int ret;
+ struct load_secfile_ioctl_struct ioctl_arg = { {0}, {0}, {NULL} };
+ bool load = true;
+ void *file_addr = NULL;
+
+ if (!dev_file || !argp) {
+ tloge("Invalid params !\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ioctl_arg, argp, sizeof(ioctl_arg)) != 0) {
+ tloge("copy from user failed\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ if (ioctl_arg.sec_file_info.secfile_type >= LOAD_TYPE_MAX ||
+ ioctl_arg.sec_file_info.secfile_type == LOAD_PATCH) {
+ tloge("invalid secfile type: %d!", ioctl_arg.sec_file_info.secfile_type);
+ return -EINVAL;
+ }
+
+ mutex_lock(&g_load_app_lock);
+ if (is_from_client_node) {
+ if (ioctl_arg.sec_file_info.secfile_type != LOAD_TA &&
+ ioctl_arg.sec_file_info.secfile_type != LOAD_LIB) {
+ tloge("this node does not allow this type of file to be loaded\n");
+ mutex_unlock(&g_load_app_lock);
+ return -EINVAL;
+ }
+ }
+
+ if (ioctl_arg.sec_file_info.secfile_type == LOAD_TA) {
+ ret = tc_ns_need_load_image(dev_file->dev_file_id, ioctl_arg.uuid, (unsigned int)UUID_LEN);
+ if (ret != 1) /* 1 means we need to load image */
+ load = false;
+ }
+
+ if (load) {
+ file_addr = (void *)(uintptr_t)(ioctl_arg.memref.file_addr |
+ (((uint64_t)ioctl_arg.memref.file_h_addr) << ADDR_TRANS_NUM));
+ ret = tc_ns_load_image(dev_file, file_addr, &ioctl_arg.sec_file_info, NULL);
+ if (ret != 0)
+ tloge("load TA secfile: %d failed, ret = 0x%x\n",
+ ioctl_arg.sec_file_info.secfile_type, ret);
+ }
+ mutex_unlock(&g_load_app_lock);
+ if (copy_to_user(argp, &ioctl_arg, sizeof(ioctl_arg)) != 0)
+ tloge("copy to user failed\n");
+ return ret;
+}
+
+static uint32_t tc_ns_get_uid(void)
+{
+ struct task_struct *task = NULL;
+ const struct cred *cred = NULL;
+ uint32_t uid;
+
+ rcu_read_lock();
+ task = get_current();
+ get_task_struct(task);
+ rcu_read_unlock();
+ cred = koadpt_get_task_cred(task);
+ if (!cred) {
+ tloge("failed to get uid of the task\n");
+ put_task_struct(task);
+ return (uint32_t)(-1);
+ }
+
+ uid = cred->uid.val;
+ put_cred(cred);
+ put_task_struct(task);
+ tlogd("current uid is %u\n", uid);
+ return uid;
+}
+
+#ifdef CONFIG_AUTH_SUPPORT_UNAME
+static int set_login_information_uname(struct tc_ns_dev_file *dev_file, uint32_t uid)
+{
+ char uname[MAX_NAME_LENGTH] = { 0 };
+ uint32_t username_len = 0;
+ int ret = tc_ns_get_uname(uid, uname, sizeof(uname), &username_len);
+ if (ret < 0 || username_len >= MAX_NAME_LENGTH) {
+ tloge("get user name filed\n");
+ return -EFAULT;
+ }
+ if (memcpy_s(dev_file->pub_key, MAX_PUBKEY_LEN, uname, username_len)) {
+ tloge("failed to copy username, pub key len=%u\n", dev_file->pub_key_len);
+ return -EFAULT;
+ }
+ /* use pub_key to store username info */
+ dev_file->pub_key_len = username_len;
+ return 0;
+}
+#else
+static int set_login_information_uid(struct tc_ns_dev_file *dev_file, uint32_t ca_uid)
+{
+ if (memcpy_s(dev_file->pub_key, MAX_PUBKEY_LEN, &ca_uid, sizeof(ca_uid)) != 0) {
+ tloge("failed to copy pubkey, pub key len=%u\n",
+ dev_file->pub_key_len);
+ return -EFAULT;
+ }
+ dev_file->pub_key_len = sizeof(ca_uid);
+ return 0;
+}
+#endif
+
+/*
+ * Modify the client context so params id 2 and 3 contain temp pointers to the
+ * public key and package name for the open session. This is used for the
+ * TEEC_LOGIN_IDENTIFY open session method
+ */
+static int set_login_information(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context)
+{
+ uint64_t size_addr, buffer_addr;
+ /* The daemon has failed to get login information or not supplied */
+ if (dev_file->pkg_name_len == 0)
+ return -EINVAL;
+ /*
+ * The 3rd parameter buffer points to the pkg name buffer in the
+ * device file pointer
+ * get package name len and package name
+ */
+ size_addr = (__u64)(uintptr_t)&dev_file->pkg_name_len;
+ buffer_addr = (__u64)(uintptr_t)dev_file->pkg_name;
+ context->params[3].memref.size_addr = (__u32)size_addr;
+ context->params[3].memref.size_h_addr = (__u32)(size_addr >> ADDR_TRANS_NUM);
+ context->params[3].memref.buffer = (__u32)buffer_addr;
+ context->params[3].memref.buffer_h_addr = (__u32)(buffer_addr >> ADDR_TRANS_NUM);
+
+ /* Set public key len and public key */
+ if (dev_file->pub_key_len == 0) {
+ /* If get public key failed, then get uid in kernel */
+ uint32_t ca_uid = tc_ns_get_uid();
+ if (ca_uid == (uint32_t)(-1)) {
+ tloge("failed to get uid of the task\n");
+ goto error;
+ }
+#ifdef CONFIG_AUTH_SUPPORT_UNAME
+ if (set_login_information_uname(dev_file, ca_uid) != 0)
+ goto error;
+#else
+ if (set_login_information_uid(dev_file, ca_uid) != 0)
+ goto error;
+#endif
+#ifdef CONFIG_AUTH_HASH
+ dev_file->pkg_name_len = strlen((unsigned char *)dev_file->pkg_name);
+#endif
+ }
+ size_addr = (__u64)(uintptr_t)&dev_file->pub_key_len;
+ buffer_addr = (__u64)(uintptr_t)dev_file->pub_key;
+ context->params[2].memref.size_addr = (__u32)size_addr;
+ context->params[2].memref.size_h_addr = (__u32)(size_addr >> ADDR_TRANS_NUM);
+ context->params[2].memref.buffer = (__u32)buffer_addr;
+ context->params[2].memref.buffer_h_addr = (__u32)(buffer_addr >> ADDR_TRANS_NUM);
+ /* Now we mark the 2 parameters as input temp buffers */
+ context->param_types = teec_param_types(
+ teec_param_type_get(context->param_types, 0),
+ teec_param_type_get(context->param_types, 1),
+ TEEC_MEMREF_TEMP_INPUT, TEEC_MEMREF_TEMP_INPUT);
+#ifdef CONFIG_AUTH_HASH
+ if(set_login_information_hash(dev_file) != 0) {
+ tloge("set login information hash failed\n");
+ goto error;
+ }
+#endif
+ return 0;
+error:
+ return -EFAULT;
+}
+
+static int check_login_method(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context, uint8_t *flags)
+{
+ int ret;
+
+ if (!dev_file || !context || !flags)
+ return -EFAULT;
+
+ if (is_tee_rebooting()) {
+ context->returns.code = TEE_ERROR_IS_DEAD;
+ /* when ret > 0, use context return code */
+ return EFAULT;
+ }
+
+ if (context->login.method != TEEC_LOGIN_IDENTIFY) {
+ tloge("login method is not supported\n");
+ return -EINVAL;
+ }
+
+ tlogd("login method is IDENTIFY\n");
+ /* check if usr params 0 and 1 are valid */
+ if (dev_file->kernel_api == TEE_REQ_FROM_USER_MODE &&
+ (!tc_user_param_valid(context, (unsigned int)0) ||
+ !tc_user_param_valid(context, (unsigned int)1)))
+ return -EINVAL;
+
+ ret = set_login_information(dev_file, context);
+ if (ret != 0) {
+ tloge("set login information failed ret =%d\n", ret);
+ return ret;
+ }
+ *flags |= TC_CALL_LOGIN;
+
+ return 0;
+}
+
+static struct tc_ns_service *tc_ref_service_in_dev(struct tc_ns_dev_file *dev,
+ const unsigned char *uuid, int uuid_size, bool *is_full)
+{
+ uint32_t i;
+
+ if (uuid_size != UUID_LEN)
+ return NULL;
+
+ for (i = 0; i < SERVICES_MAX_COUNT; i++) {
+ if (dev->services[i] != NULL &&
+ memcmp(dev->services[i]->uuid, uuid, UUID_LEN) == 0) {
+ if (dev->service_ref[i] == MAX_REF_COUNT) {
+ *is_full = true;
+ return NULL;
+ }
+ dev->service_ref[i]++;
+ return dev->services[i];
+ }
+ }
+ return NULL;
+}
+
+static int tc_ns_service_init(const unsigned char *uuid, uint32_t uuid_len,
+ struct tc_ns_service **new_service)
+{
+ int ret = 0;
+ struct tc_ns_service *service = NULL;
+
+ if (!uuid || !new_service || uuid_len != UUID_LEN)
+ return -EINVAL;
+
+ service = kzalloc(sizeof(*service), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)service)) {
+ tloge("kzalloc failed\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ if (memcpy_s(service->uuid, sizeof(service->uuid), uuid, uuid_len) != 0) {
+ kfree(service);
+ return -EFAULT;
+ }
+
+ INIT_LIST_HEAD(&service->session_list);
+ mutex_init(&service->session_lock);
+ list_add_tail(&service->head, &g_service_list);
+ tlogd("add service: 0x%x to service list\n", *(const uint32_t *)uuid);
+ atomic_set(&service->usage, 1);
+ mutex_init(&service->operation_lock);
+ *new_service = service;
+
+ return ret;
+}
+
+static struct tc_ns_service *tc_find_service_from_all(
+ const unsigned char *uuid, uint32_t uuid_len)
+{
+ struct tc_ns_service *service = NULL;
+
+ if (!uuid || uuid_len != UUID_LEN)
+ return NULL;
+
+ list_for_each_entry(service, &g_service_list, head) {
+ if (memcmp(service->uuid, uuid, sizeof(service->uuid)) == 0)
+ return service;
+ }
+
+ return NULL;
+}
+
+static struct tc_ns_service *find_service(struct tc_ns_dev_file *dev_file,
+ const struct tc_ns_client_context *context)
+{
+ int ret;
+ struct tc_ns_service *service = NULL;
+ bool is_full = false;
+
+ mutex_lock(&dev_file->service_lock);
+ service = tc_ref_service_in_dev(dev_file, context->uuid,
+ UUID_LEN, &is_full);
+ /* if service has been opened in this dev or ref cnt is full */
+ if (service || is_full) {
+ /*
+ * If service has been reference by this dev, find service in dev
+ * will incre ref count to declaim there's how many callers to
+ * this service from the dev, instead of incre service->usage.
+ * While close session, dev->service_ref[i] will decre and till
+ * it get to 0, put service struct will be called.
+ */
+ mutex_unlock(&dev_file->service_lock);
+ return service;
+ }
+ mutex_lock(&g_service_list_lock);
+ service = tc_find_service_from_all(context->uuid, UUID_LEN);
+ /* if service has been opened in other dev */
+ if (service) {
+ get_service_struct(service);
+ mutex_unlock(&g_service_list_lock);
+ goto add_service;
+ }
+ /* Create a new service if we couldn't find it in list */
+ ret = tc_ns_service_init(context->uuid, UUID_LEN, &service);
+ /* unlock after init to make sure find service from all is correct */
+ mutex_unlock(&g_service_list_lock);
+ if (ret != 0) {
+ tloge("service init failed");
+ mutex_unlock(&dev_file->service_lock);
+ return NULL;
+ }
+add_service:
+ ret = add_service_to_dev(dev_file, service);
+ mutex_unlock(&dev_file->service_lock);
+ if (ret != 0) {
+ /*
+ * for new srvc, match init usage to 1;
+ * for srvc already exist, match get;
+ */
+ put_service_struct(service);
+ service = NULL;
+ tloge("fail to add service to dev\n");
+ return NULL;
+ }
+ return service;
+}
+
+static bool is_valid_ta_size(const char *file_buffer, unsigned int file_size)
+{
+ if (!file_buffer || file_size == 0) {
+ tloge("invalid load ta size\n");
+ return false;
+ }
+
+ if (file_size > SZ_8M) {
+ tloge("not support TA larger than 8M, size=%u\n", file_size);
+ return false;
+ }
+ return true;
+}
+
+static int alloc_for_load_image(struct load_img_params *params)
+{
+ /* we will try any possible to alloc mailbox mem to load TA */
+ for (; params->mb_load_size > 0; params->mb_load_size >>= 1) {
+ params->mb_load_mem = mailbox_alloc(params->mb_load_size, 0);
+ if (params->mb_load_mem)
+ break;
+ tlogw("alloc mem size=%u for TA load mem fail\n",
+ params->mb_load_size);
+ }
+
+ if (!params->mb_load_mem) {
+ tloge("alloc TA load mem failed\n");
+ return -ENOMEM;
+ }
+
+ params->mb_pack = mailbox_alloc_cmd_pack();
+ if (!params->mb_pack) {
+ mailbox_free(params->mb_load_mem);
+ params->mb_load_mem = NULL;
+ tloge("alloc mb pack failed\n");
+ return -ENOMEM;
+ }
+
+ params->uuid_return = mailbox_alloc(sizeof(*(params->uuid_return)), 0);
+ if (!params->uuid_return) {
+ mailbox_free(params->mb_load_mem);
+ params->mb_load_mem = NULL;
+ mailbox_free(params->mb_pack);
+ params->mb_pack = NULL;
+ tloge("alloc uuid failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void pack_load_frame_cmd(uint32_t load_size,
+ const struct load_img_params *params, struct tc_ns_smc_cmd *smc_cmd)
+{
+ struct mb_cmd_pack *mb_pack = params->mb_pack;
+ char *mb_load_mem = params->mb_load_mem;
+ struct tc_uuid *uuid_return = params->uuid_return;
+
+ mb_pack->operation.params[0].memref.buffer =
+ mailbox_virt_to_phys((uintptr_t)mb_load_mem);
+ mb_pack->operation.buffer_h_addr[0] =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)mb_load_mem) >> ADDR_TRANS_NUM;
+ mb_pack->operation.params[0].memref.size = load_size + sizeof(int);
+ mb_pack->operation.params[2].memref.buffer =
+ mailbox_virt_to_phys((uintptr_t)uuid_return);
+ mb_pack->operation.buffer_h_addr[2] =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)uuid_return) >> ADDR_TRANS_NUM;
+ mb_pack->operation.params[2].memref.size = sizeof(*uuid_return);
+ mb_pack->operation.paramtypes = teec_param_types(TEEC_MEMREF_TEMP_INPUT,
+ TEEC_VALUE_INOUT, TEEC_MEMREF_TEMP_OUTPUT, TEEC_VALUE_INOUT);
+
+ smc_cmd->cmd_type = CMD_TYPE_GLOBAL;
+ smc_cmd->cmd_id = GLOBAL_CMD_ID_LOAD_SECURE_APP;
+ smc_cmd->context_id = 0;
+ smc_cmd->operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
+ smc_cmd->operation_h_phys =
+ (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM;
+}
+
+static int32_t load_image_copy_file(struct load_img_params *params, uint32_t load_size,
+ int32_t load_flag, uint32_t loaded_size)
+{
+ if (params->dev_file->kernel_api == TEE_REQ_FROM_KERNEL_MODE) {
+ if (memcpy_s(params->mb_load_mem + sizeof(load_flag),
+ params->mb_load_size - sizeof(load_flag),
+ params->file_buffer + loaded_size, load_size) != 0) {
+ tloge("memcpy file buf get fail\n");
+ return -EFAULT;
+ }
+ return 0;
+ }
+ if (copy_from_user(params->mb_load_mem + sizeof(load_flag),
+ (const void __user *)params->file_buffer + loaded_size, load_size)) {
+ tloge("file buf get fail\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int load_image_by_frame(struct load_img_params *params, unsigned int load_times,
+ struct tc_ns_client_return *tee_ret, struct sec_file_info *sec_file_info)
+{
+ char *p = params->mb_load_mem;
+ uint32_t load_size;
+ int load_flag = 1; /* 0:it's last block, 1:not last block */
+ uint32_t loaded_size = 0;
+ unsigned int index;
+ struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
+ int smc_ret;
+
+ for (index = 0; index < load_times; index++) {
+ smc_cmd.err_origin = TEEC_ORIGIN_COMMS;
+ if (index == (load_times - 1)) {
+ load_flag = 0;
+ load_size = params->file_size - loaded_size;
+ } else {
+ load_size = params->mb_load_size - sizeof(load_flag);
+ }
+ *(int *)p = load_flag;
+ if (load_size > params->mb_load_size - sizeof(load_flag)) {
+ tloge("invalid load size %u/%u\n", load_size,
+ params->mb_load_size);
+ return -EINVAL;
+ }
+
+ if (load_image_copy_file(params, load_size, load_flag, loaded_size) != 0)
+ return -EFAULT;
+
+ pack_load_frame_cmd(load_size, params, &smc_cmd);
+ params->mb_pack->operation.params[3].value.a = index;
+ params->mb_pack->operation.params[1].value.a = sec_file_info->secfile_type;
+ smc_cmd.dev_file_id = params->dev_file->dev_file_id;
+ smc_ret = tc_ns_smc(&smc_cmd);
+ tlogd("configid=%u, ret=%d, load_flag=%d, index=%u\n",
+ params->mb_pack->operation.params[1].value.a, smc_ret,
+ load_flag, index);
+
+ if (smc_ret != 0) {
+ if (tee_ret != NULL) {
+ tee_ret->code = smc_ret;
+ tee_ret->origin = smc_cmd.err_origin;
+ }
+ sec_file_info->sec_load_err = (int32_t)params->mb_pack->operation.params[3].value.b;
+ return -EFAULT;
+ }
+
+ if (!smc_ret && !load_flag && load_image_for_ion(params, tee_ret ? &tee_ret->origin : NULL))
+ return -EPERM;
+
+ loaded_size += load_size;
+ }
+ return 0;
+}
+
+int tc_ns_load_image_with_lock(struct tc_ns_dev_file *dev, const char *file_buffer,
+ unsigned int file_size, enum secfile_type_t type)
+{
+ int ret;
+ struct sec_file_info sec_file = {0, 0, 0};
+
+ if (!dev || !file_buffer) {
+ tloge("dev or file buffer is NULL!\n");
+ return -EINVAL;
+ }
+
+ sec_file.secfile_type = type;
+ sec_file.file_size = file_size;
+
+ mutex_lock(&g_load_app_lock);
+ ret = tc_ns_load_image(dev, file_buffer, &sec_file, NULL);
+ mutex_unlock(&g_load_app_lock);
+
+ return ret;
+}
+
+static void free_load_image_buffer(struct load_img_params *params)
+{
+ mailbox_free(params->mb_load_mem);
+ mailbox_free(params->mb_pack);
+ mailbox_free(params->uuid_return);
+}
+
+int load_image(struct load_img_params *params,
+ struct sec_file_info *sec_file_info, struct tc_ns_client_return *tee_ret)
+{
+ int ret;
+ unsigned int load_times;
+ unsigned int file_size;
+
+ /* tee_ret can be null */
+ if (params == NULL || sec_file_info == NULL)
+ return -1;
+
+ file_size = params->file_size;
+
+ params->mb_load_size = (file_size > (SZ_1M - sizeof(int))) ?
+ SZ_1M : ALIGN(file_size, SZ_4K);
+
+ ret = alloc_for_load_image(params);
+ if (ret != 0) {
+ tloge("Alloc load image buf fail!\n");
+ return ret;
+ }
+
+ if (params->mb_load_size <= sizeof(int)) {
+ tloge("mb load size is too small!\n");
+ free_load_image_buffer(params);
+ return -ENOMEM;
+ }
+
+ load_times = file_size / (params->mb_load_size - sizeof(int));
+ if ((file_size % (params->mb_load_size - sizeof(int))) != 0)
+ load_times += 1;
+
+ ret = load_image_by_frame(params, load_times, tee_ret, sec_file_info);
+ if (ret != 0) {
+ tloge("load image by frame fail!\n");
+ free_load_image_buffer(params);
+ return ret;
+ }
+
+ free_load_image_buffer(params);
+ return 0;
+}
+
+int tc_ns_load_image(struct tc_ns_dev_file *dev, const char *file_buffer,
+ struct sec_file_info *sec_file_info, struct tc_ns_client_return *tee_ret)
+{
+ int ret;
+ unsigned int file_size;
+ struct load_img_params params = { dev, file_buffer, 0, NULL, NULL, NULL, 0 };
+
+ if (!dev || !file_buffer || !sec_file_info) {
+ tloge("dev or file buffer or sec_file_info is NULL!\n");
+ return -EINVAL;
+ }
+
+ file_size = sec_file_info->file_size;
+ params.file_size = file_size;
+#ifdef CONFIG_CRL_PATH
+ if (g_update_crl_flag == 0) {
+ if (tz_update_crl(CONFIG_CRL_PATH, dev) != 0) {
+ tloge("tzdriver updates main crl failed\n");
+ if (tz_update_crl(CONFIG_CRL_BAK_PATH, dev) != 0) {
+ tloge("tzdriver updates backup crl failed\n");
+ } else {
+ g_update_crl_flag = 1;
+ tloge("tzdriver updates backup crl successfully\n");
+ }
+ } else {
+ g_update_crl_flag = 1;
+ tloge("tzdriver updates main crl successfully\n");
+ }
+ }
+#endif
+
+ if (!is_valid_ta_size(file_buffer, file_size))
+ return -EINVAL;
+
+ return load_image(¶ms, sec_file_info, tee_ret);
+}
+
+static int load_ta_image(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context)
+{
+ int ret;
+ struct sec_file_info sec_file = {0, 0, 0};
+ struct tc_ns_client_return tee_ret = {0};
+ void *file_addr = NULL;
+
+ tee_ret.origin = TEEC_ORIGIN_COMMS;
+
+ mutex_lock(&g_load_app_lock);
+ ret = tc_ns_need_load_image(dev_file->dev_file_id, context->uuid, (unsigned int)UUID_LEN);
+ if (ret == 1) { /* 1 means we need to load image */
+ if (!context->file_buffer) {
+ tloge("context's file_buffer is NULL");
+ mutex_unlock(&g_load_app_lock);
+ return -1;
+ }
+ file_addr = (void *)(uintptr_t)(context->memref.file_addr |
+ (((uint64_t)context->memref.file_h_addr) << ADDR_TRANS_NUM));
+ sec_file.secfile_type = LOAD_TA;
+ sec_file.file_size = context->file_size;
+ ret = tc_ns_load_image(dev_file, file_addr, &sec_file, &tee_ret);
+ if (ret != 0) {
+ tloge("load image failed, ret=%x", ret);
+ context->returns.code = tee_ret.code;
+ if (tee_ret.origin != TEEC_ORIGIN_COMMS) {
+ context->returns.origin = tee_ret.origin;
+ ret = EFAULT;
+ }
+ mutex_unlock(&g_load_app_lock);
+ return ret;
+ }
+ }
+ mutex_unlock(&g_load_app_lock);
+
+ return ret;
+}
+
+static void init_new_sess_node(struct tc_ns_dev_file *dev_file,
+ const struct tc_ns_client_context *context,
+ struct tc_ns_service *service,
+ struct tc_ns_session *session)
+{
+ session->session_id = context->session_id;
+ atomic_set(&session->usage, 1);
+ session->owner = dev_file;
+
+ session->wait_data.send_wait_flag = 0;
+ init_waitqueue_head(&session->wait_data.send_cmd_wq);
+
+ mutex_lock(&service->session_lock);
+ list_add_tail(&session->head, &service->session_list);
+ mutex_unlock(&service->session_lock);
+}
+
+static int proc_open_session(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context, struct tc_ns_service *service,
+ struct tc_ns_session *session, uint8_t flags)
+{
+ int ret;
+ struct tc_call_params params = {
+ dev_file, context, session, flags
+ };
+
+ mutex_lock(&service->operation_lock);
+ ret = load_ta_image(dev_file, context);
+ if (ret != 0) {
+ tloge("load ta image failed\n");
+ mutex_unlock(&service->operation_lock);
+ return ret;
+ }
+
+ ret = tc_client_call(¶ms);
+ if (ret != 0) {
+ /* Clean this session secure information */
+ kill_ion_by_uuid((struct tc_uuid *)context->uuid);
+ mutex_unlock(&service->operation_lock);
+ tloge("smc call returns error, ret=0x%x\n", ret);
+ return ret;
+ }
+ init_new_sess_node(dev_file, context, service, session);
+ /*
+ * session_id in tee is unique, but in concurrency scene
+ * same session_id may appear in tzdriver, put session_list
+ * add/del in service->operation_lock can avoid it.
+ */
+ mutex_unlock(&service->operation_lock);
+ return ret;
+}
+
+static void clear_context_param(struct tc_ns_client_context *context)
+{
+ context->params[2].memref.size_addr = 0;
+ context->params[2].memref.size_h_addr = 0;
+ context->params[2].memref.buffer = 0;
+ context->params[2].memref.buffer_h_addr = 0;
+ context->params[3].memref.size_addr = 0;
+ context->params[3].memref.size_h_addr = 0;
+ context->params[3].memref.buffer = 0;
+ context->params[3].memref.buffer_h_addr = 0;
+}
+
+int tc_ns_open_session(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context)
+{
+ int ret;
+ struct tc_ns_service *service = NULL;
+ struct tc_ns_session *session = NULL;
+ uint8_t flags = TC_CALL_GLOBAL;
+
+ if (!dev_file || !context) {
+ tloge("invalid dev_file or context\n");
+ return -EINVAL;
+ }
+
+ ret = check_login_method(dev_file, context, &flags);
+ if (ret != 0)
+ goto err_clear_param;
+
+ context->cmd_id = GLOBAL_CMD_ID_OPEN_SESSION;
+
+ service = find_service(dev_file, context);
+ if (!service) {
+ tloge("find service failed\n");
+ ret = -ENOMEM;
+ goto err_clear_param;
+ }
+
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)session)) {
+ tloge("kzalloc failed\n");
+ mutex_lock(&dev_file->service_lock);
+ del_service_from_dev(dev_file, service);
+ mutex_unlock(&dev_file->service_lock);
+ ret = -ENOMEM;
+ goto err_clear_param;
+ }
+ mutex_init(&session->ta_session_lock);
+
+#ifndef CONFIG_LIBLINUX
+ ret = calc_client_auth_hash(dev_file, context, session);
+ if (ret != 0) {
+ tloge("calc client auth hash failed\n");
+ goto err_free_rsrc;
+ }
+#endif
+
+ ret = proc_open_session(dev_file, context, service, session, flags);
+ if (ret == 0)
+ goto err_clear_param;
+err_free_rsrc:
+ mutex_lock(&dev_file->service_lock);
+ del_service_from_dev(dev_file, service);
+ mutex_unlock(&dev_file->service_lock);
+
+ kfree(session);
+err_clear_param:
+ clear_context_param(context);
+ return ret;
+}
+
+static struct tc_ns_session *get_session(struct tc_ns_service *service,
+ const struct tc_ns_dev_file *dev_file,
+ const struct tc_ns_client_context *context)
+{
+ struct tc_ns_session *session = NULL;
+
+ mutex_lock(&service->session_lock);
+ session = tc_find_session_withowner(&service->session_list,
+ context->session_id, dev_file);
+ get_session_struct(session);
+ mutex_unlock(&service->session_lock);
+
+ return session;
+}
+
+static struct tc_ns_service *get_service(struct tc_ns_dev_file *dev_file,
+ const struct tc_ns_client_context *context)
+{
+ struct tc_ns_service *service = NULL;
+
+ mutex_lock(&dev_file->service_lock);
+ service = tc_find_service_in_dev(dev_file, context->uuid, UUID_LEN);
+ get_service_struct(service);
+ mutex_unlock(&dev_file->service_lock);
+
+ return service;
+}
+
+static int close_session(struct tc_ns_dev_file *dev,
+ struct tc_ns_session *session, const unsigned char *uuid,
+ unsigned int uuid_len, unsigned int session_id)
+{
+ struct tc_ns_client_context context;
+ int ret;
+ struct tc_call_params params = {
+ dev, &context, session, 0
+ };
+
+ if (uuid_len != UUID_LEN)
+ return -EINVAL;
+
+ if (memset_s(&context, sizeof(context), 0, sizeof(context)) != 0)
+ return -EFAULT;
+
+ if (memcpy_s(context.uuid, sizeof(context.uuid), uuid, uuid_len) != 0)
+ return -EFAULT;
+
+ context.session_id = session_id;
+ context.cmd_id = GLOBAL_CMD_ID_CLOSE_SESSION;
+ params.flags = TC_CALL_GLOBAL | TC_CALL_SYNC;
+ ret = tc_client_call(¶ms);
+ if (ret != 0)
+ tloge("close session failed, ret=0x%x\n", ret);
+
+ kill_ion_by_uuid((struct tc_uuid *)context.uuid);
+ return ret;
+}
+
+static void close_session_in_service_list(struct tc_ns_dev_file *dev,
+ struct tc_ns_service *service)
+{
+ struct tc_ns_session *tmp_session = NULL;
+ struct tc_ns_session *session = NULL;
+ int ret;
+
+ list_for_each_entry_safe(session, tmp_session,
+ &service->session_list, head) {
+ if (session->owner != dev)
+ continue;
+ ret = close_session(dev, session, service->uuid,
+ (unsigned int)UUID_LEN, session->session_id);
+ if (ret != 0)
+ tloge("close session smc failed when close fd!\n");
+ mutex_lock(&service->session_lock);
+ list_del(&session->head);
+ mutex_unlock(&service->session_lock);
+
+ put_session_struct(session); /* pair with open session */
+ }
+}
+
+static bool if_exist_unclosed_session(struct tc_ns_dev_file *dev)
+{
+ uint32_t index;
+
+ for (index = 0; index < SERVICES_MAX_COUNT; index++) {
+ if (dev->services[index] != NULL &&
+ list_empty(&dev->services[index]->session_list) == 0)
+ return true;
+ }
+ return false;
+}
+
+static int close_session_thread_fn(void *arg)
+{
+ struct tc_ns_dev_file *dev = arg;
+ uint32_t index;
+ struct tc_ns_service *service = NULL;
+
+ /* close unclosed session */
+ for (index = 0; index < SERVICES_MAX_COUNT; index++) {
+ if (dev->services[index] != NULL &&
+ list_empty(&dev->services[index]->session_list) == 0) {
+ service = dev->services[index];
+
+ mutex_lock(&service->operation_lock);
+ close_session_in_service_list(dev, service);
+ mutex_unlock(&service->operation_lock);
+
+ put_service_struct(service); /* pair with open session */
+ }
+ }
+
+ tlogd("complete close all unclosed session\n");
+ complete(&dev->close_comp);
+ return 0;
+}
+
+void close_unclosed_session_in_kthread(struct tc_ns_dev_file *dev)
+{
+ struct task_struct *close_thread = NULL;
+
+ if (!dev) {
+ tloge("dev is invalid\n");
+ return;
+ }
+
+ if (!if_exist_unclosed_session(dev))
+ return;
+
+ /* when self recovery, release session in reboot interface */
+ if (is_tee_rebooting())
+ return;
+ close_thread = kthread_create(close_session_thread_fn,
+ dev, "close_fn_%6d", dev->dev_file_id);
+ if (unlikely(IS_ERR_OR_NULL(close_thread))) {
+ tloge("fail to create close session thread\n");
+ return;
+ }
+
+ tz_kthread_bind_mask(close_thread);
+ wake_up_process(close_thread);
+ wait_for_completion(&dev->close_comp);
+ tlogd("wait for completion success\n");
+}
+
+int tc_ns_close_session(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context)
+{
+ int ret = -EINVAL;
+ struct tc_ns_service *service = NULL;
+ struct tc_ns_session *session = NULL;
+
+ if (!dev_file || !context) {
+ tloge("invalid dev_file or context\n");
+ return ret;
+ }
+
+ if (is_tee_rebooting()) {
+ context->returns.code = TEE_ERROR_IS_DEAD;
+ return TEE_ERROR_IS_DEAD;
+ }
+
+ service = get_service(dev_file, context);
+ if (!service) {
+ tloge("invalid service\n");
+ return ret;
+ }
+ /*
+ * session_id in tee is unique, but in concurrency scene
+ * same session_id may appear in tzdriver, put session_list
+ * add/del in service->operation_lock can avoid it.
+ */
+ mutex_lock(&service->operation_lock);
+ session = get_session(service, dev_file, context);
+ if (session) {
+ int ret2;
+ mutex_lock(&session->ta_session_lock);
+ ret2 = close_session(dev_file, session, context->uuid,
+ (unsigned int)UUID_LEN, context->session_id);
+ mutex_unlock(&session->ta_session_lock);
+ if (ret2 != 0)
+ tloge("close session smc failed!\n");
+ mutex_lock(&service->session_lock);
+ list_del(&session->head);
+ mutex_unlock(&service->session_lock);
+
+ put_session_struct(session);
+ put_session_struct(session); /* pair with open session */
+
+ ret = 0;
+ mutex_lock(&dev_file->service_lock);
+ del_service_from_dev(dev_file, service);
+ mutex_unlock(&dev_file->service_lock);
+ } else {
+ tloge("invalid session\n");
+ }
+ mutex_unlock(&service->operation_lock);
+ put_service_struct(service);
+ return ret;
+}
+
+int tc_ns_send_cmd(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context)
+{
+ int ret = -EINVAL;
+ struct tc_ns_service *service = NULL;
+ struct tc_ns_session *session = NULL;
+ struct tc_call_params params = {
+ dev_file, context, NULL, 0
+ };
+
+ if (!dev_file || !context) {
+ tloge("invalid dev_file or context\n");
+ return ret;
+ }
+
+ if (is_tee_rebooting()) {
+ context->returns.code = TEE_ERROR_IS_DEAD;
+ return EFAULT;
+ }
+
+ service = get_service(dev_file, context);
+ if (service) {
+ session = get_session(service, dev_file, context);
+ put_service_struct(service);
+ if (session) {
+ tlogd("send cmd find session id %x\n",
+ context->session_id);
+ goto find_session;
+ }
+ tloge("can't find session\n");
+ } else {
+ tloge("can't find service\n");
+ }
+
+ return ret;
+find_session:
+ mutex_lock(&session->ta_session_lock);
+ params.sess = session;
+ ret = tc_client_call(¶ms);
+ mutex_unlock(&session->ta_session_lock);
+ put_session_struct(session);
+ if (ret != 0)
+ tloge("smc call returns error, ret=0x%x\n", ret);
+ return ret;
+}
+
+static int ioctl_session_send_cmd(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context, void *argp)
+{
+ int ret;
+
+ ret = tc_ns_send_cmd(dev_file, context);
+ if (ret != 0)
+ tloge("send cmd failed ret is %d\n", ret);
+ if (copy_to_user(argp, context, sizeof(*context)) != 0) {
+ if (ret == 0)
+ ret = -EFAULT;
+ }
+ return ret;
+}
+
+int tc_client_session_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -EINVAL;
+ void *argp = (void __user *)(uintptr_t)arg;
+ struct tc_ns_dev_file *dev_file = NULL;
+ struct tc_ns_client_context context;
+
+ if (!argp || !file) {
+ tloge("invalid params\n");
+ return -EINVAL;
+ }
+
+ dev_file = file->private_data;
+ if (copy_from_user(&context, argp, sizeof(context)) != 0) {
+ tloge("copy from user failed\n");
+ return -EFAULT;
+ }
+
+ context.returns.origin = TEEC_ORIGIN_COMMS;
+ switch (cmd) {
+ case TC_NS_CLIENT_IOCTL_SES_OPEN_REQ:
+ ret = tc_ns_open_session(dev_file, &context);
+ if (ret != 0)
+ tloge("open session failed ret is %d\n", ret);
+ if (copy_to_user(argp, &context, sizeof(context)) != 0 && ret == 0)
+ ret = -EFAULT;
+ break;
+ case TC_NS_CLIENT_IOCTL_SES_CLOSE_REQ:
+ ret = tc_ns_close_session(dev_file, &context);
+ break;
+ case TC_NS_CLIENT_IOCTL_SEND_CMD_REQ:
+ tee_trace_add_event(INVOKE_CMD_START, 0);
+ ret = ioctl_session_send_cmd(dev_file, &context, argp);
+ tee_trace_add_event(INVOKE_CMD_END, 0);
+ break;
+ default:
+ tloge("invalid cmd:0x%x!\n", cmd);
+ return ret;
+ }
+ /*
+ * Don't leak ERESTARTSYS to user space.
+ *
+ * CloseSession is not reentrant, so convert to -EINTR.
+ * In other case, restart_syscall().
+ *
+ * It is better to call it right after the error code
+ * is generated (in tc_client_call), but kernel CAs are
+ * still exist when these words are written. Setting TIF
+ * flags for callers of those CAs is very hard to analysis.
+ *
+ * For kernel CA, when ERESTARTSYS is seen, loop in kernel
+ * instead of notifying user.
+ *
+ * P.S. ret code in this function is in mixed naming space.
+ * See the definition of ret. However, this function never
+ * return its default value, so using -EXXX is safe.
+ */
+ if (ret == -ERESTARTSYS) {
+ if (cmd == TC_NS_CLIENT_IOCTL_SES_CLOSE_REQ)
+ ret = -EINTR;
+ else
+ return restart_syscall();
+ }
+ return ret;
+}
+
+static void cleanup_session(struct tc_ns_service *service)
+{
+ struct tc_ns_session *session = NULL;
+ struct tc_ns_session *session_tmp = NULL;
+
+ if (!service)
+ return;
+
+ /* close unclosed session */
+ if (list_empty(&service->session_list) == 0) {
+ mutex_lock(&service->operation_lock);
+ list_for_each_entry_safe(session, session_tmp, &service->session_list, head) {
+ tlogd("clean up session %u\n", session->session_id);
+ mutex_lock(&service->session_lock);
+ list_del(&session->head);
+ mutex_unlock(&service->session_lock);
+ put_session_struct(session);
+ }
+ mutex_unlock(&service->operation_lock);
+ }
+ put_service_struct(service);
+
+ return;
+}
+
+void free_all_session(void)
+{
+ struct tc_ns_dev_file *dev_file = NULL;
+ struct tc_ns_dev_file *dev_file_tmp = NULL;
+ struct tc_ns_dev_list *dev_list = NULL;
+ int i;
+
+ dev_list = get_dev_list();
+ if (!dev_list) {
+ tloge("cleanup session, dev list is null\n");
+ return;
+ }
+ mutex_lock(&dev_list->dev_lock);
+ list_for_each_entry_safe(dev_file, dev_file_tmp, &dev_list->dev_file_list, head) {
+ mutex_lock(&dev_file->service_lock);
+ for (i = 0; i < SERVICES_MAX_COUNT; i++) {
+ if (dev_file->services[i] == NULL)
+ continue;
+ get_service_struct(dev_file->services[i]);
+ /* avoid dead lock in close session */
+ mutex_unlock(&dev_file->service_lock);
+ cleanup_session(dev_file->services[i]);
+ mutex_lock(&dev_file->service_lock);
+ dev_file->services[i] = NULL;
+ }
+ mutex_unlock(&dev_file->service_lock);
+ }
+ mutex_unlock(&dev_list->dev_lock);
+ return;
+}
diff --git a/tzdriver/core/session_manager.h b/tzdriver/core/session_manager.h
new file mode 100644
index 0000000000000000000000000000000000000000..f943434184d3c0cc3cd5119381195c03d0f6bffc
--- /dev/null
+++ b/tzdriver/core/session_manager.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: function declaration for session management.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef SESSION_MANAGER_H
+#define SESSION_MANAGER_H
+
+#include
+#include "tc_ns_client.h"
+#include "teek_ns_client.h"
+
+int tc_client_session_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+int tc_ns_open_session(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context);
+int tc_ns_close_session(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context);
+int tc_ns_send_cmd(struct tc_ns_dev_file *dev_file,
+ struct tc_ns_client_context *context);
+int tc_ns_load_image(struct tc_ns_dev_file *dev, const char *file_buffer,
+ struct sec_file_info *sec_file_info, struct tc_ns_client_return *tee_ret);
+int tc_ns_load_image_with_lock(struct tc_ns_dev_file *dev,
+ const char *file_buffer, unsigned int file_size, enum secfile_type_t type);
+void close_unclosed_session_in_kthread(struct tc_ns_dev_file *dev);
+struct tc_ns_session *tc_find_session_by_uuid(unsigned int dev_file_id,
+ const struct tc_ns_smc_cmd *cmd);
+struct tc_ns_service *tc_find_service_in_dev(const struct tc_ns_dev_file *dev,
+ const unsigned char *uuid, int uuid_size);
+struct tc_ns_session *tc_find_session_withowner(
+ const struct list_head *session_list, unsigned int session_id,
+ const struct tc_ns_dev_file *dev_file);
+int tc_ns_load_secfile(struct tc_ns_dev_file *dev_file,
+ void __user *argp, bool is_from_client_node);
+int load_image(struct load_img_params *params,
+ struct sec_file_info *sec_file_info, struct tc_ns_client_return *tee_ret);
+void get_service_struct(struct tc_ns_service *service);
+void put_service_struct(struct tc_ns_service *service);
+void get_session_struct(struct tc_ns_session *session);
+void put_session_struct(struct tc_ns_session *session);
+void dump_services_status(const char *param);
+void init_srvc_list(void);
+void free_all_session(void);
+
+#endif
diff --git a/tzdriver/core/shared_mem.c b/tzdriver/core/shared_mem.c
new file mode 100644
index 0000000000000000000000000000000000000000..0e81ae47e655f9718136ebcbf341a599cc3f6e5c
--- /dev/null
+++ b/tzdriver/core/shared_mem.c
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "shared_mem.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "tc_ns_log.h"
+#include "tc_ns_client.h"
+#include "teek_ns_client.h"
+#include "smc_smp.h"
+#include "internal_functions.h"
+#include "mailbox_mempool.h"
+#include "ko_adapt.h"
+
+uint64_t get_reserved_cmd_vaddr_of(phys_addr_t cmd_phys, uint64_t cmd_size)
+{
+ if (cmd_phys == 0 || cmd_size == 0) {
+ tloge("cmd phy or cmd size is error\n");
+ return 0;
+ }
+ uint64_t cmd_vaddr = (uint64_t)(uintptr_t)ioremap_cache(cmd_phys, cmd_size);
+ if (cmd_vaddr == 0) {
+ tloge("io remap for reserved cmd buffer failed\n");
+ return 0;
+ }
+ (void)memset_s((void *)(uintptr_t)cmd_vaddr, cmd_size, 0, cmd_size);
+ return cmd_vaddr;
+}
+
+#ifdef CONFIG_SHARED_MEM_RESERVED
+
+#define CMD_MEM_MIN_SIZE 0x1000
+#define SPI_MEM_MIN_SIZE 0x1000
+#define OPERATION_MEM_MIN_SIZE 0x1000
+uint64_t g_cmd_mem_paddr;
+uint64_t g_cmd_mem_size;
+uint64_t g_mailbox_paddr;
+uint64_t g_mailbox_size;
+uint64_t g_log_mem_paddr;
+uint64_t g_log_mem_size;
+uint64_t g_spi_mem_paddr;
+uint64_t g_spi_mem_size;
+static mailbox_page_t *g_mailbox_page;
+static uintptr_t g_shmem_start_virt;
+static uintptr_t g_page_offset;
+
+int load_tz_shared_mem(struct device_node *np)
+{
+ int rc;
+
+ rc = of_property_read_u64(np, "tz_shmem_cmd_addr", &g_cmd_mem_paddr);
+ if (rc != 0) {
+ tloge("read tz_shmem_cmd_addr failed\n");
+ return -ENODEV;
+ }
+
+ rc = of_property_read_u64(np, "tz_shmem_cmd_size", &g_cmd_mem_size);
+ if (rc != 0 || g_cmd_mem_size < CMD_MEM_MIN_SIZE) {
+ tloge("read tz_shmem_cmd_size failed or size too short\n");
+ return -ENODEV;
+ }
+
+ rc = of_property_read_u64(np, "tz_shmem_mailbox_addr", &g_mailbox_paddr);
+ if (rc != 0) {
+ tloge("read tz_shmem_mailbox_addr failed\n");
+ return -ENODEV;
+ }
+
+ rc = of_property_read_u64(np, "tz_shmem_mailbox_size", &g_mailbox_size);
+ if (rc != 0 || g_mailbox_size < MAILBOX_POOL_SIZE + OPERATION_MEM_MIN_SIZE) {
+ tloge("read tz_shmem_mailbox_size failed or size too short\n");
+ return -ENODEV;
+ }
+
+ rc = of_property_read_u64(np, "tz_shmem_spi_addr", &g_spi_mem_paddr);
+ if (rc != 0) {
+ tloge("read tz_shmem_spi_addr failed\n");
+ return -ENODEV;
+ }
+
+ rc = of_property_read_u64(np, "tz_shmem_spi_size", &g_spi_mem_size);
+ if (rc != 0 || g_spi_mem_size < SPI_MEM_MIN_SIZE) {
+ tloge("read tz_shmem_spi_size failed or size too short\n");
+ return -ENODEV;
+ }
+
+ rc = of_property_read_u64(np, "tz_shmem_log_addr", &g_log_mem_paddr);
+ if (rc != 0) {
+ tloge("read tz_shmem_log_addr failed\n");
+ return -ENODEV;
+ }
+
+ rc = of_property_read_u64(np, "tz_shmem_log_size", &g_log_mem_size);
+ if (rc != 0 || g_log_mem_size < PAGES_LOG_MEM_LEN) {
+ tloge("read tz_shmem_log_size failed or size too short\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+mailbox_page_t *mailbox_alloc_pages(int order)
+{
+ uint32_t i;
+ uint32_t page_num = 1 << (unsigned int)order;
+ uint32_t page_size = page_num * sizeof(mailbox_page_t);
+
+ g_page_offset = MAILBOX_POOL_SIZE / page_num;
+ g_mailbox_page = kmalloc(page_size, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_mailbox_page)) {
+ tloge("Failed to allocate mailbox page\n");
+ return NULL;
+ }
+
+ g_shmem_start_virt = (uintptr_t)ioremap_cache(g_mailbox_paddr, g_mailbox_size);
+ if (g_shmem_start_virt == 0) {
+ tloge("io remap for mailbox page failed\n");
+ kfree(g_mailbox_page);
+ g_mailbox_page = NULL;
+ return NULL;
+ }
+ (void)memset_s((void *)g_shmem_start_virt, g_mailbox_size, 0, g_mailbox_size);
+ g_mailbox_page[0] = (mailbox_page_t)g_shmem_start_virt;
+ for (i = 1; i < page_num; i++)
+ g_mailbox_page[i] = g_mailbox_page[i - 1] + g_page_offset;
+
+ return g_mailbox_page;
+}
+
+void mailbox_free_pages(mailbox_page_t *pages, int order)
+{
+ if (!pages || pages != g_mailbox_page)
+ return;
+
+ (void)order;
+ kfree(pages);
+ g_mailbox_page = NULL;
+}
+
+uintptr_t mailbox_page_address(mailbox_page_t *page)
+{
+ if (!page)
+ return 0;
+
+ return *page;
+}
+
+uintptr_t mailbox_virt_to_phys(uintptr_t addr)
+{
+ if (addr < g_shmem_start_virt || addr > g_shmem_start_virt + g_mailbox_size)
+ return 0;
+
+ return g_mailbox_paddr + (addr - g_shmem_start_virt);
+}
+
+mailbox_page_t *mailbox_virt_to_page(uint64_t ptr)
+{
+ if (ptr < g_shmem_start_virt || ptr > g_shmem_start_virt + g_mailbox_size)
+ return 0;
+
+ return &g_mailbox_page[(ptr - g_shmem_start_virt) / g_page_offset];
+}
+
+uint64_t get_operation_vaddr(void)
+{
+ return g_shmem_start_virt + MAILBOX_POOL_SIZE;
+}
+
+void free_operation(uint64_t op_vaddr)
+{
+ (void)op_vaddr;
+}
+
+/*
+ * This function only for wireless platform, CONFIG_LOG_POOL
+ * macro cnotrols the log retention of soft reset feature.
+ * Enable CONFIG_LOG_POOL macro, this function won't memset
+ * log pool memory, and the old log before reset can be retention.
+ */
+uint64_t get_log_mem_vaddr(void)
+{
+ uint64_t log_vaddr = (uint64_t)(uintptr_t)ioremap_cache(g_log_mem_paddr, g_log_mem_size);
+ if (log_vaddr == 0) {
+ tloge("ioremap for log buffer failed\n");
+ return 0;
+ }
+#ifndef CONFIG_LOG_POOL
+ (void)memset_s((void *)(uintptr_t)log_vaddr, g_log_mem_size, 0, g_log_mem_size);
+#endif
+
+ return log_vaddr;
+}
+
+uint64_t get_log_mem_paddr(uint64_t log_vaddr)
+{
+ (void)log_vaddr;
+ return g_log_mem_paddr;
+}
+
+uint64_t get_log_mem_size(void)
+{
+ return g_log_mem_size;
+}
+
+void free_log_mem(uint64_t log_vaddr)
+{
+ iounmap((void __iomem*)(uintptr_t)log_vaddr);
+}
+
+uint64_t get_cmd_mem_vaddr(void)
+{
+ return get_reserved_cmd_vaddr_of(g_cmd_mem_paddr, g_cmd_mem_size);
+}
+
+uint64_t get_cmd_mem_paddr(uint64_t cmd_vaddr)
+{
+ (void)cmd_vaddr;
+ return g_cmd_mem_paddr;
+}
+
+void free_cmd_mem(uint64_t cmd_vaddr)
+{
+ iounmap((void __iomem*)(uintptr_t)cmd_vaddr);
+}
+
+uint64_t get_spi_mem_vaddr(void)
+{
+ uint64_t spi_vaddr = (uint64_t)(uintptr_t)ioremap_cache(g_spi_mem_paddr, g_spi_mem_size);
+ if (spi_vaddr == 0) {
+ tloge("io remap for spi buffer failed\n");
+ return 0;
+ }
+ (void)memset_s((void *)(uintptr_t)spi_vaddr, g_spi_mem_size, 0, g_spi_mem_size);
+ return spi_vaddr;
+}
+
+uint64_t get_spi_mem_paddr(uintptr_t spi_vaddr)
+{
+ (void)spi_vaddr;
+ return g_spi_mem_paddr;
+}
+
+void free_spi_mem(uint64_t spi_vaddr)
+{
+ iounmap((void __iomem*)(uintptr_t)spi_vaddr);
+}
+
+#else
+
+int load_tz_shared_mem(struct device_node *np)
+{
+ (void)np;
+ return 0;
+}
+
+mailbox_page_t *mailbox_alloc_pages(int order)
+{
+ return koadpt_alloc_pages(GFP_KERNEL, order);
+}
+
+void mailbox_free_pages(mailbox_page_t *pages, int order)
+{
+ if (!pages)
+ return;
+
+ __free_pages(pages, order);
+}
+
+uintptr_t mailbox_page_address(mailbox_page_t *page)
+{
+ if (!page)
+ return 0;
+
+ return page_address(page);
+}
+
+uintptr_t mailbox_virt_to_phys(uintptr_t addr)
+{
+ if (!addr)
+ return 0;
+
+ return virt_to_phys(addr);
+}
+
+mailbox_page_t *mailbox_virt_to_page(uint64_t ptr)
+{
+ if (!ptr)
+ return NULL;
+
+ return virt_to_page(ptr);
+}
+
+uint64_t get_operation_vaddr(void)
+{
+ return kzalloc(sizeof(struct tc_ns_operation), GFP_KERNEL);
+}
+
+void free_operation(uint64_t op_vaddr)
+{
+ if (!op_vaddr)
+ return;
+
+ kfree(op_vaddr);
+}
+
+uint64_t get_log_mem_vaddr(void)
+{
+ return __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(PAGES_LOG_MEM_LEN));
+}
+
+uint64_t get_log_mem_paddr(uint64_t log_vaddr)
+{
+ if (!log_vaddr)
+ return 0;
+
+ return virt_to_phys((void *)(uintptr_t)log_vaddr);
+}
+
+uint64_t get_log_mem_size(void)
+{
+ return 0;
+}
+
+void free_log_mem(uint64_t log_vaddr)
+{
+ if (!log_vaddr)
+ return;
+
+ free_pages(log_vaddr, get_order(PAGES_LOG_MEM_LEN));
+}
+
+#define PAGES_BIG_SESSION_CMD_LEN 6
+uint64_t get_cmd_mem_vaddr(void)
+{
+#ifdef CONFIG_BIG_SESSION
+ /* we should map at least 64 pages for 1000 sessions, 2^6 > 40 */
+ return (uint64_t)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PAGES_BIG_SESSION_CMD_LEN);
+#else
+ return (uint64_t)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+#endif
+}
+
+uint64_t get_cmd_mem_paddr(uint64_t cmd_vaddr)
+{
+ if (!cmd_vaddr)
+ return 0;
+
+ return virt_to_phys((void *)(uintptr_t)cmd_vaddr);
+}
+
+void free_cmd_mem(uint64_t cmd_vaddr)
+{
+ if (!cmd_vaddr)
+ return;
+
+#ifdef CONFIG_BIG_SESSION
+ free_pages(cmd_vaddr, PAGES_BIG_SESSION_CMD_LEN);
+#else
+ free_page(cmd_vaddr);
+#endif
+}
+
+uint64_t get_spi_mem_vaddr(void)
+{
+#ifdef CONFIG_BIG_SESSION
+ /* we should map at least 3 pages for 100 sessions, 2^2 > 3 */
+ return (uint64_t)__get_free_pages(GFP_KERNEL | __GFP_ZERO, CONFIG_NOTIFY_PAGE_ORDER);
+#else
+ return (uint64_t)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+#endif
+}
+
+uint64_t get_spi_mem_paddr(uintptr_t spi_vaddr)
+{
+ if (spi_vaddr == 0)
+ return 0;
+
+ return virt_to_phys((void *)spi_vaddr);
+}
+
+void free_spi_mem(uint64_t spi_vaddr)
+{
+ if (!spi_vaddr)
+ return;
+
+#ifdef CONFIG_BIG_SESSION
+ free_pages(spi_vaddr, CONFIG_NOTIFY_PAGE_ORDER);
+#else
+ free_page(spi_vaddr);
+#endif
+}
+#endif
diff --git a/tzdriver/core/shared_mem.h b/tzdriver/core/shared_mem.h
new file mode 100644
index 0000000000000000000000000000000000000000..4b6afb766000b99fbbd4a81cf5c5df236e2c9384
--- /dev/null
+++ b/tzdriver/core/shared_mem.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef SHARED_MEM_H
+#define SHARED_MEM_H
+
+#include
+#include
+
+#ifdef CONFIG_512K_LOG_PAGES_MEM
+#define PAGES_LOG_MEM_LEN (512 * SZ_1K) /* mem size: 512 k */
+#else
+#define PAGES_LOG_MEM_LEN (256 * SZ_1K) /* mem size: 256 k */
+#endif
+
+#ifndef CONFIG_SHARED_MEM_RESERVED
+typedef struct page mailbox_page_t;
+#else
+typedef uintptr_t mailbox_page_t;
+#endif
+
+uint64_t get_reserved_cmd_vaddr_of(phys_addr_t cmd_phys, uint64_t cmd_size);
+int load_tz_shared_mem(struct device_node *np);
+
+mailbox_page_t *mailbox_alloc_pages(int order);
+void mailbox_free_pages(mailbox_page_t *pages, int order);
+uintptr_t mailbox_page_address(mailbox_page_t *page);
+mailbox_page_t *mailbox_virt_to_page(uint64_t ptr);
+uint64_t get_operation_vaddr(void);
+void free_operation(uint64_t op_vaddr);
+
+uint64_t get_log_mem_vaddr(void);
+uint64_t get_log_mem_paddr(uint64_t log_vaddr);
+uint64_t get_log_mem_size(void);
+void free_log_mem(uint64_t log_vaddr);
+
+uint64_t get_cmd_mem_vaddr(void);
+uint64_t get_cmd_mem_paddr(uint64_t cmd_vaddr);
+void free_cmd_mem(uint64_t cmd_vaddr);
+
+uint64_t get_spi_mem_vaddr(void);
+uint64_t get_spi_mem_paddr(uintptr_t spi_vaddr);
+void free_spi_mem(uint64_t spi_vaddr);
+#endif
diff --git a/tzdriver/core/smc_abi.c b/tzdriver/core/smc_abi.c
new file mode 100644
index 0000000000000000000000000000000000000000..2fab1bebd58161015331db5072e13f584bed3761
--- /dev/null
+++ b/tzdriver/core/smc_abi.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include
+#include "smc_call.h"
+#include "smc_smp.h"
+#include "teek_ns_client.h"
+#include "smc_abi.h"
+
+#ifndef CONFIG_ARCH32
+void do_smc_transport(struct smc_in_params *in, struct smc_out_params *out, uint8_t wait)
+{
+ isb();
+ wmb();
+ do {
+ asm volatile(
+ "mov x0, %[fid]\n"
+ "mov x1, %[a1]\n"
+ "mov x2, %[a2]\n"
+ "mov x3, %[a3]\n"
+ "mov x4, %[a4]\n"
+ "mov x5, %[a5]\n"
+ "mov x6, %[a6]\n"
+ "mov x7, %[a7]\n"
+ SMCCC_SMC_INST"\n"
+ "str x0, [%[re0]]\n"
+ "str x1, [%[re1]]\n"
+ "str x2, [%[re2]]\n"
+ "str x3, [%[re3]]\n" :
+ [fid] "+r"(in->x0),
+ [a1] "+r"(in->x1),
+ [a2] "+r"(in->x2),
+ [a3] "+r"(in->x3),
+ [a4] "+r"(in->x4),
+ [a5] "+r"(in->x5),
+ [a6] "+r"(in->x6),
+ [a7] "+r"(in->x7):
+ [re0] "r"(&out->ret),
+ [re1] "r"(&out->exit_reason),
+ [re2] "r"(&out->ta),
+ [re3] "r"(&out->target) :
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7");
+ } while (out->ret == TSP_REQUEST && wait != 0);
+ isb();
+ wmb();
+}
+#else
+void do_smc_transport(struct smc_in_params *in, struct smc_out_params *out, uint8_t wait)
+{
+ isb();
+ wmb();
+ do {
+ asm volatile(
+ "mov r0, %[fid]\n"
+ "mov r1, %[a1]\n"
+ "mov r2, %[a2]\n"
+ "mov r3, %[a3]\n"
+ ".arch_extension sec\n"
+ SMCCC_SMC_INST"\n"
+ "str r0, [%[re0]]\n"
+ "str r1, [%[re1]]\n"
+ "str r2, [%[re2]]\n"
+ "str r3, [%[re3]]\n" :
+ [fid] "+r"(in->x0),
+ [a1] "+r"(in->x1),
+ [a2] "+r"(in->x2),
+ [a3] "+r"(in->x3):
+ [re0] "r"(&out->ret),
+ [re1] "r"(&out->exit_reason),
+ [re2] "r"(&out->ta),
+ [re3] "r"(&out->target) :
+ "r0", "r1", "r2", "r3");
+ } while (out->ret == TSP_REQUEST && wait != 0);
+ isb();
+ wmb();
+}
+#endif
+
+#ifdef CONFIG_THIRDPARTY_COMPATIBLE
+static void fix_params_offset(struct smc_out_params *out_param)
+{
+ out_param->target = out_param->ta;
+ out_param->ta = out_param->exit_reason;
+ out_param->exit_reason = out_param->ret;
+ out_param->ret = TSP_RESPONSE;
+ if (out_param->exit_reason == TEE_EXIT_REASON_CRASH) {
+ union crash_inf temp_info;
+ temp_info.crash_reg[0] = out_param->ta;
+ temp_info.crash_reg[1] = 0;
+ temp_info.crash_reg[2] = out_param->target;
+ temp_info.crash_msg.far = temp_info.crash_msg.elr;
+ temp_info.crash_msg.elr = 0;
+ out_param->ret = TSP_CRASH;
+ out_param->exit_reason = temp_info.crash_reg[0];
+ out_param->ta = temp_info.crash_reg[1];
+ out_param->target = temp_info.crash_reg[2];
+ }
+}
+#endif
+
+void smc_req(struct smc_in_params *in, struct smc_out_params *out, uint8_t wait)
+{
+ do_smc_transport(in, out, wait);
+#ifdef CONFIG_THIRDPARTY_COMPATIBLE
+ fix_params_offset(out);
+#endif
+}
diff --git a/tzdriver/core/smc_abi.h b/tzdriver/core/smc_abi.h
new file mode 100644
index 0000000000000000000000000000000000000000..bf0bb2841ed01fdf693827d7915825d2eddb7eaa
--- /dev/null
+++ b/tzdriver/core/smc_abi.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef SMC_ABI_H
+#define SMC_ABI_H
+
+#include "smc_call.h"
+#define TEE_EXIT_REASON_CRASH 0x4
+void do_smc_transport(struct smc_in_params *in, struct smc_out_params *out, uint8_t wait);
+#endif
diff --git a/tzdriver/core/smc_call.h b/tzdriver/core/smc_call.h
new file mode 100644
index 0000000000000000000000000000000000000000..9401a29a53e1ce6b71b8e49d99e9474165d1a235
--- /dev/null
+++ b/tzdriver/core/smc_call.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef SMC_CALL_H
+#define SMC_CALL_H
+
+#include
+
+struct smc_in_params {
+ unsigned long x0;
+ unsigned long x1;
+ unsigned long x2;
+ unsigned long x3;
+ unsigned long x4;
+ unsigned long x5;
+ unsigned long x6;
+ unsigned long x7;
+};
+
+struct smc_out_params {
+ unsigned long ret;
+ unsigned long exit_reason;
+ unsigned long ta;
+ unsigned long target;
+};
+
+void smc_req(struct smc_in_params *in, struct smc_out_params *out, uint8_t wait);
+
+#endif
diff --git a/tzdriver/core/smc_smp.c b/tzdriver/core/smc_smp.c
new file mode 100644
index 0000000000000000000000000000000000000000..31f7578495809de7451a7c3fcbc430cbbf556eed
--- /dev/null
+++ b/tzdriver/core/smc_smp.c
@@ -0,0 +1,2128 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: function for sending smc cmd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "smc_smp.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#ifdef CONFIG_SCHED_SMT_EXPELLING
+#include
+#endif
+
+#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE)
+#include
+#include
+#endif
+#include
+#include
+
+#ifdef CONFIG_TEE_AUDIT
+#include
+#endif
+
+#ifdef CONFIG_TEE_LOG_EXCEPTION
+#include
+#define IMONITOR_TA_CRASH_EVENT_ID 901002003
+#endif
+
+#include "tc_ns_log.h"
+#include "teek_client_constants.h"
+#include "tc_ns_client.h"
+#include "agent.h"
+#include "teek_ns_client.h"
+#include "mailbox_mempool.h"
+#include "cmdmonitor.h"
+#include "tlogger.h"
+#include "ko_adapt.h"
+#include "log_cfg_api.h"
+#include "tee_compat_check.h"
+#include "secs_power_ctrl.h"
+#include "shared_mem.h"
+#include "tui.h"
+#include "internal_functions.h"
+#ifdef CONFIG_SMC_HOOK
+#include "smc_hook.h"
+#endif
+#include "smc_call.h"
+
+#define PREEMPT_COUNT 10000
+#define HZ_COUNT 10
+#define IDLED_COUNT 100
+/*
+ * when cannot find smc entry, will sleep 1ms
+ * because the task will be killed in 25s if it not return,
+ * so the retry count is 25s/1ms
+ */
+#define FIND_SMC_ENTRY_SLEEP 1
+#define FIND_SMC_ENTRY_RETRY_MAX_COUNT (CMD_MAX_EXECUTE_TIME * S_TO_MS / FIND_SMC_ENTRY_SLEEP)
+
+#define CPU_ZERO 0
+#define CPU_ONE 1
+#define CPU_FOUR 4
+#define CPU_FIVE 5
+#define CPU_SIX 6
+#define CPU_SEVEN 7
+#define LOW_BYTE 0xF
+
+#define PENDING2_RETRY (-1)
+
+#define RETRY_WITH_PM 1
+#define CLEAN_WITHOUT_PM 2
+
+#define MAX_CHAR 0xff
+
+#define MAX_SIQ_NUM 4
+
+/* Current state of the system */
+static bool g_sys_crash;
+
+struct shadow_work {
+ struct kthread_work kthwork;
+ struct work_struct work;
+ uint64_t target;
+};
+
+unsigned long g_shadow_thread_id = 0;
+static struct task_struct *g_siq_thread;
+static struct task_struct *g_smc_svc_thread;
+static struct task_struct *g_ipi_helper_thread;
+static DEFINE_KTHREAD_WORKER(g_ipi_helper_worker);
+
+enum cmd_reuse {
+ CLEAR, /* clear this cmd index */
+ RESEND, /* use this cmd index resend */
+};
+
+struct cmd_reuse_info {
+ int cmd_index;
+ int saved_index;
+ enum cmd_reuse cmd_usage;
+};
+
+#if (CONFIG_CPU_AFF_NR != 0)
+static struct cpumask g_cpu_mask;
+static int g_mask_flag = 0;
+#endif
+
+#ifdef CONFIG_DRM_ADAPT
+static struct cpumask g_drm_cpu_mask;
+static int g_drm_mask_flag = 0;
+#endif
+
+struct tc_ns_smc_queue *g_cmd_data;
+phys_addr_t g_cmd_phys;
+
+static struct list_head g_pending_head;
+static spinlock_t g_pend_lock;
+
+static DECLARE_WAIT_QUEUE_HEAD(siq_th_wait);
+static DECLARE_WAIT_QUEUE_HEAD(ipi_th_wait);
+static atomic_t g_siq_th_run;
+static uint32_t g_siq_queue[MAX_SIQ_NUM];
+DEFINE_MUTEX(g_siq_lock);
+
+enum smc_ops_exit {
+ SMC_OPS_NORMAL = 0x0,
+ SMC_OPS_SCHEDTO = 0x1,
+ SMC_OPS_START_SHADOW = 0x2,
+ SMC_OPS_START_FIQSHD = 0x3,
+ SMC_OPS_PROBE_ALIVE = 0x4,
+ SMC_OPS_ABORT_TASK = 0x5,
+ SMC_EXIT_NORMAL = 0x0,
+ SMC_EXIT_PREEMPTED = 0x1,
+ SMC_EXIT_SHADOW = 0x2,
+ SMC_EXIT_ABORT = 0x3,
+#ifdef CONFIG_THIRDPARTY_COMPATIBLE
+ SMC_EXIT_CRASH = 0x4,
+ SMC_EXIT_MAX = 0x5,
+#else
+ SMC_EXIT_MAX = 0x4,
+#endif
+};
+
+#define SHADOW_EXIT_RUN 0x1234dead
+#define SMC_EXIT_TARGET_SHADOW_EXIT 0x1
+
+#define compile_time_assert(cond, msg) typedef char g_assert_##msg[(cond) ? 1 : -1]
+
+#ifndef CONFIG_BIG_SESSION
+compile_time_assert(sizeof(struct tc_ns_smc_queue) <= PAGE_SIZE,
+ size_of_tc_ns_smc_queue_too_large);
+#endif
+
+static bool g_reserved_cmd_buffer = false;
+static u64 g_cmd_size = 0;
+static bool g_tz_uefi_enable = false;
+
+static int __init tz_check_uefi_enable_func(char *str)
+{
+ if (str != NULL && *str == '1')
+ g_tz_uefi_enable = true;
+
+ return 0;
+}
+early_param("tz_uefi_enable", tz_check_uefi_enable_func);
+
+#define MIN_CMDLINE_SIZE 0x1000
+static int reserved_cmdline(struct reserved_mem *rmem)
+{
+ if (g_tz_uefi_enable && rmem && rmem->size >= MIN_CMDLINE_SIZE) {
+ g_cmd_phys = rmem->base;
+ g_cmd_size = rmem->size;
+ g_reserved_cmd_buffer = true;
+ } else {
+ g_reserved_cmd_buffer = false;
+ }
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(g_teeos_cmdline, "teeos-cmdline", reserved_cmdline);
+
+static void acquire_smc_buf_lock(smc_buf_lock_t *lock)
+{
+ int ret;
+
+ preempt_disable();
+ do
+ ret = (int)cmpxchg(lock, 0, 1);
+ while (ret != 0);
+}
+
+static inline void release_smc_buf_lock(smc_buf_lock_t *lock)
+{
+ (void)cmpxchg(lock, 1, 0);
+ preempt_enable();
+}
+
+static void occupy_setbit_smc_in_doing_entry(int32_t i, int32_t *idx)
+{
+ g_cmd_data->in[i].event_nr = (unsigned int)i;
+ isb();
+ wmb();
+ set_bit((unsigned int)i, (unsigned long *)g_cmd_data->in_bitmap);
+ set_bit((unsigned int)i, (unsigned long *)g_cmd_data->doing_bitmap);
+ *idx = i;
+}
+
+static int occupy_free_smc_in_entry(const struct tc_ns_smc_cmd *cmd)
+{
+ int idx = -1;
+ int i;
+ uint32_t retry_count = 0;
+
+ if (!cmd) {
+ tloge("bad parameters! cmd is NULL\n");
+ return -1;
+ }
+ /*
+ * Note:
+ * acquire_smc_buf_lock will disable preempt and kernel will forbid
+ * call mutex_lock in preempt disabled scenes.
+ * To avoid such case(update_timestamp and update_chksum will call
+ * mutex_lock), only cmd copy is done when preempt is disable,
+ * then do update_timestamp and update_chksum.
+ * As soon as this idx of in_bitmap is set, gtask will see this
+ * cmd_in, but the cmd_in is not ready that lack of update_xxx,
+ * so we make a tricky here, set doing_bitmap and in_bitmap both
+ * at first, after update_xxx is done, clear doing_bitmap.
+ */
+get_smc_retry:
+ acquire_smc_buf_lock(&g_cmd_data->smc_lock);
+ for (i = 0; i < MAX_SMC_CMD; i++) {
+ if (test_bit(i, (unsigned long *)g_cmd_data->in_bitmap) != 0)
+ continue;
+ if (memcpy_s(&g_cmd_data->in[i], sizeof(g_cmd_data->in[i]),
+ cmd, sizeof(*cmd)) != EOK) {
+ tloge("memcpy failed,%s line:%d", __func__, __LINE__);
+ break;
+ }
+ occupy_setbit_smc_in_doing_entry(i, &idx);
+ break;
+ }
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+ if (idx == -1) {
+ if (retry_count <= FIND_SMC_ENTRY_RETRY_MAX_COUNT) {
+ msleep(FIND_SMC_ENTRY_SLEEP);
+ retry_count++;
+ tlogd("can't get any free smc entry and retry:%u\n", retry_count);
+ goto get_smc_retry;
+ }
+ tloge("can't get any free smc entry after retry:%u\n", retry_count);
+ return -1;
+ }
+
+ acquire_smc_buf_lock(&g_cmd_data->smc_lock);
+ isb();
+ wmb();
+ clear_bit((uint32_t)idx, (unsigned long *)g_cmd_data->doing_bitmap);
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+ return idx;
+}
+
+static int reuse_smc_in_entry(uint32_t idx)
+{
+ int rc = 0;
+
+ acquire_smc_buf_lock(&g_cmd_data->smc_lock);
+ if (!(test_bit((int32_t)idx, (unsigned long *)g_cmd_data->in_bitmap) != 0 &&
+ test_bit((int32_t)idx, (unsigned long *)g_cmd_data->doing_bitmap) != 0)) {
+ tloge("invalid cmd to reuse\n");
+ rc = -1;
+ goto out;
+ }
+ if (memcpy_s(&g_cmd_data->in[idx], sizeof(g_cmd_data->in[idx]),
+ &g_cmd_data->out[idx], sizeof(g_cmd_data->out[idx])) != EOK) {
+ tloge("memcpy failed,%s line:%d", __func__, __LINE__);
+ rc = -1;
+ goto out;
+ }
+
+ isb();
+ wmb();
+ clear_bit(idx, (unsigned long *)g_cmd_data->doing_bitmap);
+out:
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+ return rc;
+}
+
+static int copy_smc_out_entry(uint32_t idx, struct tc_ns_smc_cmd *copy,
+ enum cmd_reuse *usage)
+{
+ acquire_smc_buf_lock(&g_cmd_data->smc_lock);
+ if (test_bit((int)idx, (unsigned long *)g_cmd_data->out_bitmap) == 0) {
+ tloge("cmd out %u is not ready\n", idx);
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+ show_cmd_bitmap();
+ return -ENOENT;
+ }
+ if (memcpy_s(copy, sizeof(*copy), &g_cmd_data->out[idx],
+ sizeof(g_cmd_data->out[idx])) != EOK) {
+ tloge("copy smc out failed\n");
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+ return -EFAULT;
+ }
+
+ isb();
+ wmb();
+ if (g_cmd_data->out[idx].ret_val == (int)TEEC_PENDING2 ||
+ g_cmd_data->out[idx].ret_val == (int)TEEC_PENDING) {
+ *usage = RESEND;
+ } else {
+ clear_bit(idx, (unsigned long *)g_cmd_data->in_bitmap);
+ clear_bit(idx, (unsigned long *)g_cmd_data->doing_bitmap);
+ *usage = CLEAR;
+ }
+ clear_bit(idx, (unsigned long *)g_cmd_data->out_bitmap);
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+
+ return 0;
+}
+
+static inline void clear_smc_in_entry(uint32_t idx)
+{
+ acquire_smc_buf_lock(&g_cmd_data->smc_lock);
+ clear_bit(idx, (unsigned long *)g_cmd_data->in_bitmap);
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+}
+
+static void release_smc_entry(uint32_t idx)
+{
+ acquire_smc_buf_lock(&g_cmd_data->smc_lock);
+ clear_bit(idx, (unsigned long *)g_cmd_data->in_bitmap);
+ clear_bit(idx, (unsigned long *)g_cmd_data->doing_bitmap);
+ clear_bit(idx, (unsigned long *)g_cmd_data->out_bitmap);
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+}
+
+static bool is_cmd_working_done(uint32_t idx)
+{
+ bool ret = false;
+
+ acquire_smc_buf_lock(&g_cmd_data->smc_lock);
+ if (test_bit((int)idx, (unsigned long *)g_cmd_data->out_bitmap) != 0)
+ ret = true;
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+ return ret;
+}
+
+void occupy_clean_cmd_buf(void)
+{
+ acquire_smc_buf_lock(&g_cmd_data->smc_lock);
+ memset_s(g_cmd_data, sizeof(struct tc_ns_smc_queue), 0, sizeof(struct tc_ns_smc_queue));
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+}
+
+static void show_in_bitmap(int *cmd_in, uint32_t len)
+{
+ uint32_t idx;
+ uint32_t in = 0;
+ char bitmap[MAX_SMC_CMD + 1];
+
+ if (len != MAX_SMC_CMD || !g_cmd_data)
+ return;
+
+ for (idx = 0; idx < MAX_SMC_CMD; idx++) {
+ if (test_bit((int32_t)idx, (unsigned long *)g_cmd_data->in_bitmap) != 0) {
+ bitmap[idx] = '1';
+ cmd_in[in++] = (int)idx;
+ } else {
+ bitmap[idx] = '0';
+ }
+ }
+ bitmap[MAX_SMC_CMD] = '\0';
+ tloge("in bitmap: %s\n", bitmap);
+}
+
+static void show_out_bitmap(int *cmd_out, uint32_t len)
+{
+ uint32_t idx;
+ uint32_t out = 0;
+ char bitmap[MAX_SMC_CMD + 1];
+
+ if (len != MAX_SMC_CMD || !g_cmd_data)
+ return;
+
+ for (idx = 0; idx < MAX_SMC_CMD; idx++) {
+ if (test_bit((int32_t)idx, (unsigned long *)g_cmd_data->out_bitmap) != 0) {
+ bitmap[idx] = '1';
+ cmd_out[out++] = (int)idx;
+ } else {
+ bitmap[idx] = '0';
+ }
+ }
+ bitmap[MAX_SMC_CMD] = '\0';
+ tloge("out bitmap: %s\n", bitmap);
+}
+
+static void show_doing_bitmap(void)
+{
+ uint32_t idx;
+ char bitmap[MAX_SMC_CMD + 1];
+
+ if (!g_cmd_data)
+ return;
+ for (idx = 0; idx < MAX_SMC_CMD; idx++) {
+ if (test_bit((int)idx, (unsigned long *)g_cmd_data->doing_bitmap) != 0)
+ bitmap[idx] = '1';
+ else
+ bitmap[idx] = '0';
+ }
+ bitmap[MAX_SMC_CMD] = '\0';
+ tloge("doing bitmap: %s\n", bitmap);
+}
+
+static void show_single_cmd_info(const int *cmd, uint32_t len)
+{
+ uint32_t idx;
+
+ if (len != MAX_SMC_CMD || !g_cmd_data)
+ return;
+
+ for (idx = 0; idx < MAX_SMC_CMD; idx++) {
+ if (cmd[idx] == -1)
+ break;
+ tloge("cmd[%d]: cmd_id=%u, ca_pid=%u, dev_id = 0x%x, "
+ "event_nr=%u, ret_val=0x%x\n",
+ cmd[idx],
+ g_cmd_data->in[cmd[idx]].cmd_id,
+ g_cmd_data->in[cmd[idx]].ca_pid,
+ g_cmd_data->in[cmd[idx]].dev_file_id,
+ g_cmd_data->in[cmd[idx]].event_nr,
+ g_cmd_data->in[cmd[idx]].ret_val);
+ }
+}
+
+void show_cmd_bitmap(void)
+{
+ int *cmd_in = NULL;
+ int *cmd_out = NULL;
+
+ cmd_in = kzalloc(sizeof(int) * MAX_SMC_CMD, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)cmd_in)) {
+ tloge("out of mem! cannot show in bitmap\n");
+ return;
+ }
+
+ cmd_out = kzalloc(sizeof(int) * MAX_SMC_CMD, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)cmd_out)) {
+ kfree(cmd_in);
+ tloge("out of mem! cannot show out bitmap\n");
+ return;
+ }
+
+ if (memset_s(cmd_in, sizeof(int)* MAX_SMC_CMD, MAX_CHAR, sizeof(int)* MAX_SMC_CMD) != 0 ||
+ memset_s(cmd_out, sizeof(int)* MAX_SMC_CMD, MAX_CHAR, sizeof(int)* MAX_SMC_CMD) != 0) {
+ tloge("memset failed\n");
+ goto error;
+ }
+
+ acquire_smc_buf_lock(&g_cmd_data->smc_lock);
+
+ show_in_bitmap(cmd_in, MAX_SMC_CMD);
+ show_doing_bitmap();
+ show_out_bitmap(cmd_out, MAX_SMC_CMD);
+
+ tloge("cmd in value:\n");
+ show_single_cmd_info(cmd_in, MAX_SMC_CMD);
+
+ tloge("cmd_out value:\n");
+ show_single_cmd_info(cmd_out, MAX_SMC_CMD);
+
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+
+error:
+ kfree(cmd_in);
+ kfree(cmd_out);
+}
+
+static struct pending_entry *init_pending_entry(void)
+{
+ struct pending_entry *pe = NULL;
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)pe)) {
+ tloge("alloc pe failed\n");
+ return NULL;
+ }
+
+ atomic_set(&pe->users, 1);
+ get_task_struct(current);
+ pe->task = current;
+
+#ifdef CONFIG_TA_AFFINITY
+ cpumask_copy(&pe->ca_mask, CURRENT_CPUS_ALLOWED);
+ cpumask_copy(&pe->ta_mask, CURRENT_CPUS_ALLOWED);
+#endif
+
+ init_waitqueue_head(&pe->wq);
+ atomic_set(&pe->run, 0);
+ INIT_LIST_HEAD(&pe->list);
+ spin_lock(&g_pend_lock);
+ list_add_tail(&pe->list, &g_pending_head);
+ spin_unlock(&g_pend_lock);
+
+ return pe;
+}
+
+struct pending_entry *find_pending_entry(pid_t pid)
+{
+ struct pending_entry *pe = NULL;
+
+ spin_lock(&g_pend_lock);
+ list_for_each_entry(pe, &g_pending_head, list) {
+ if (pe->task->pid == pid) {
+ atomic_inc(&pe->users);
+ spin_unlock(&g_pend_lock);
+ return pe;
+ }
+ }
+ spin_unlock(&g_pend_lock);
+ return NULL;
+}
+
+void foreach_pending_entry(void (*func)(struct pending_entry *))
+{
+ struct pending_entry *pe = NULL;
+
+ if (!func)
+ return;
+
+ spin_lock(&g_pend_lock);
+ list_for_each_entry(pe, &g_pending_head, list) {
+ func(pe);
+ }
+ spin_unlock(&g_pend_lock);
+}
+
+void put_pending_entry(struct pending_entry *pe)
+{
+ if (!pe)
+ return;
+
+ if (!atomic_dec_and_test(&pe->users))
+ return;
+
+ put_task_struct(pe->task);
+ kfree(pe);
+}
+
+#ifdef CONFIG_TA_AFFINITY
+static void restore_cpu_mask(struct pending_entry *pe)
+{
+ if (cpumask_equal(&pe->ca_mask, &pe->ta_mask))
+ return;
+
+ set_cpus_allowed_ptr(current, &pe->ca_mask);
+}
+#endif
+
+static void release_pending_entry(struct pending_entry *pe)
+{
+#ifdef CONFIG_TA_AFFINITY
+ restore_cpu_mask(pe);
+#endif
+ spin_lock(&g_pend_lock);
+ list_del(&pe->list);
+ spin_unlock(&g_pend_lock);
+ put_pending_entry(pe);
+}
+
+static inline bool is_shadow_exit(uint64_t target)
+{
+ return target & SMC_EXIT_TARGET_SHADOW_EXIT;
+}
+
+/*
+ * check ca and ta's affinity is match in 2 scene:
+ * 1. when TA is blocked to REE
+ * 2. when CA is wakeup by SPI wakeup
+ * match_ta_affinity return true if affinity is changed
+ */
+#ifdef CONFIG_TA_AFFINITY
+static bool match_ta_affinity(struct pending_entry *pe)
+{
+ if (!cpumask_equal(CURRENT_CPUS_ALLOWED, &pe->ta_mask)) {
+ if (set_cpus_allowed_ptr(current, &pe->ta_mask)) {
+ tlogw("set %s affinity failed\n", current->comm);
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+}
+#else
+static inline bool match_ta_affinity(struct pending_entry *pe)
+{
+ (void)pe;
+ return false;
+}
+#endif
+
+struct smc_cmd_ret {
+ unsigned long exit;
+ unsigned long ta;
+ unsigned long target;
+};
+
+bool sigkill_pending(struct task_struct *tsk)
+{
+ bool flag = false;
+
+ if (!tsk) {
+ tloge("tsk is null!\n");
+ return false;
+ }
+
+ flag = (sigismember(&tsk->pending.signal, SIGKILL) != 0) ||
+ (sigismember(&tsk->pending.signal, SIGUSR1) != 0);
+
+ if (tsk->signal)
+ return flag || sigismember(&tsk->signal->shared_pending.signal,
+ SIGKILL);
+ return flag;
+}
+
+#if (CONFIG_CPU_AFF_NR != 0)
+static void set_cpu_strategy(struct cpumask *old_mask)
+{
+ unsigned int i;
+
+ if (g_mask_flag == 0) {
+ cpumask_clear(&g_cpu_mask);
+ for (i = 0; i < CONFIG_CPU_AFF_NR; i++)
+ cpumask_set_cpu(i, &g_cpu_mask);
+ g_mask_flag = 1;
+ }
+ cpumask_copy(old_mask, CURRENT_CPUS_ALLOWED);
+ set_cpus_allowed_ptr(current, &g_cpu_mask);
+}
+#endif
+
+#if (CONFIG_CPU_AFF_NR != 0)
+static void restore_cpu(struct cpumask *old_mask)
+{
+ /* current equal old means no set cpu affinity, no need to restore */
+ if (cpumask_equal(CURRENT_CPUS_ALLOWED, old_mask))
+ return;
+
+ set_cpus_allowed_ptr(current, old_mask);
+ schedule();
+}
+#endif
+
+static bool is_ready_to_kill(bool need_kill)
+{
+ return (need_kill && sigkill_pending(current) &&
+ is_thread_reported(current->pid));
+}
+
+static void set_smc_send_arg(struct smc_in_params *in_param,
+ const struct smc_cmd_ret *secret, unsigned long ops)
+{
+ if (secret->exit == SMC_EXIT_PREEMPTED) {
+ in_param->x1 = SMC_OPS_SCHEDTO;
+ in_param->x3 = secret->ta;
+ in_param->x4 = secret->target;
+ }
+
+ if (ops == SMC_OPS_SCHEDTO || ops == SMC_OPS_START_FIQSHD)
+ in_param->x4 = secret->target;
+
+ tlogd("[cpu %d]begin send x0=%lx x1=%lx x2=%lx x3=%lx x4=%lx\n",
+ raw_smp_processor_id(), in_param->x0, in_param->x1,
+ in_param->x2, in_param->x3, in_param->x4);
+}
+
+static void send_asm_smc_cmd(struct smc_in_params *in_param, struct smc_out_params *out_param)
+{
+ smc_req(in_param, out_param, 0);
+}
+
+#ifdef CONFIG_TEE_REBOOT
+int send_smc_cmd_rebooting(uint32_t cmd_id, phys_addr_t cmd_addr, uint32_t cmd_type, const struct tc_ns_smc_cmd *in_cmd)
+{
+ struct tc_ns_smc_cmd cmd = { {0}, 0 };
+ struct smc_in_params in_param = {cmd_id, cmd_addr, cmd_type, cmd_addr >> ADDR_TRANS_NUM, TEE_ERROR_IS_DEAD};
+ struct smc_out_params out_param = {0};
+
+ if (in_cmd != NULL) {
+ if (memcpy_s(&cmd, sizeof(cmd), in_cmd, sizeof(*in_cmd)) != EOK) {
+ tloge("memcpy in cmd failed\n");
+ return -EFAULT;
+ }
+ if (occupy_free_smc_in_entry(&cmd) == -1) {
+ tloge("there's no more smc entry\n");
+ return -ENOMEM;
+ }
+ }
+retry:
+ isb();
+ wmb();
+ send_asm_smc_cmd(&in_param, &out_param);
+ isb();
+ wmb();
+ if (out_param.exit_reason == SMC_EXIT_PREEMPTED)
+ goto retry;
+
+ return out_param.exit_reason;
+}
+#else
+int send_smc_cmd_rebooting(uint32_t cmd_id, phys_addr_t cmd_addr, uint32_t cmd_type, const struct tc_ns_smc_cmd *in_cmd)
+{
+ (void)cmd_id;
+ (void)cmd_addr;
+ (void)cmd_type;
+ (void)in_cmd;
+ return 0;
+}
+#endif
+
+static noinline int smp_smc_send(uint32_t cmd, unsigned long ops, unsigned long ca,
+ struct smc_cmd_ret *secret, bool need_kill)
+{
+ struct smc_in_params in_param = { cmd, ops, ca, 0, 0 };
+ struct smc_out_params out_param = {0};
+#if (CONFIG_CPU_AFF_NR != 0)
+ struct cpumask old_mask;
+#endif
+
+#if (CONFIG_CPU_AFF_NR != 0)
+ set_cpu_strategy(&old_mask);
+#endif
+retry:
+ set_smc_send_arg(&in_param, secret, ops);
+ tee_trace_add_event(SMC_SEND, 0);
+ send_asm_smc_cmd(&in_param, &out_param);
+ tee_trace_add_event(SMC_DONE, 0);
+ tlogd("[cpu %d] return val %lx exit_reason %lx ta %lx targ %lx\n",
+ raw_smp_processor_id(), out_param.ret, out_param.exit_reason,
+ out_param.ta, out_param.target);
+
+ secret->exit = out_param.exit_reason;
+ secret->ta = out_param.ta;
+ secret->target = out_param.target;
+
+ if (out_param.exit_reason == SMC_EXIT_PREEMPTED) {
+ /*
+ * There's 2 ways to send a terminate cmd to kill a running TA,
+ * in current context or another. If send terminate in another
+ * context, may encounter concurrency problem, as terminate cmd
+ * is send but not process, the original cmd has finished.
+ * So we send the terminate cmd in current context.
+ */
+ if (is_ready_to_kill(need_kill)) {
+ secret->exit = SMC_EXIT_ABORT;
+ tloge("receive kill signal\n");
+ } else {
+#if (!defined(CONFIG_PREEMPT)) || defined(CONFIG_RTOS_PREEMPT_OFF)
+ /* yield cpu to avoid soft lockup */
+ cond_resched();
+#endif
+ goto retry;
+ }
+ }
+#if (CONFIG_CPU_AFF_NR != 0)
+ restore_cpu(&old_mask);
+#endif
+ return (int)out_param.ret;
+}
+
+static uint64_t send_smc_cmd(uint32_t cmd, phys_addr_t cmd_addr, uint32_t cmd_type, uint8_t wait)
+{
+ uint64_t ret = 0;
+ struct smc_in_params in_param = { cmd, cmd_addr, cmd_type, cmd_addr >> ADDR_TRANS_NUM };
+ struct smc_out_params out_param = { ret };
+#ifdef CONFIG_THIRDPARTY_COMPATIBLE
+ if (g_sys_crash) {
+ out_param.ret = TSP_CRASH;
+ return out_param.ret;
+ }
+#endif
+ smc_req(&in_param, &out_param, wait);
+ ret = out_param.ret;
+ return ret;
+}
+
+unsigned long raw_smc_send(uint32_t cmd, phys_addr_t cmd_addr,
+ uint32_t cmd_type, uint8_t wait)
+{
+ unsigned long x0;
+
+#if (CONFIG_CPU_AFF_NR != 0)
+ struct cpumask old_mask;
+ set_cpu_strategy(&old_mask);
+#endif
+
+ x0 = send_smc_cmd(cmd, cmd_addr, cmd_type, wait);
+
+#if (CONFIG_CPU_AFF_NR != 0)
+ restore_cpu(&old_mask);
+#endif
+ return x0;
+}
+
+static void siq_dump(phys_addr_t mode, uint32_t siq_mode)
+{
+ int ret = raw_smc_send(TSP_REE_SIQ, mode, 0, false);
+ if (ret == TSP_CRASH) {
+ tloge("TEEOS has crashed!\n");
+ g_sys_crash = true;
+ cmd_monitor_ta_crash(TYPE_CRASH_TEE, NULL, 0);
+ }
+
+ if (siq_mode == SIQ_DUMP_TIMEOUT) {
+ tz_log_write();
+ } else if (siq_mode == SIQ_DUMP_SHELL) {
+#ifdef CONFIG_TEE_LOG_DUMP_PATH
+ (void)tlogger_store_msg(CONFIG_TEE_LOG_DUMP_PATH,
+ sizeof(CONFIG_TEE_LOG_DUMP_PATH));
+#else
+ tz_log_write();
+#endif
+ }
+ do_cmd_need_archivelog();
+}
+
+static uint32_t get_free_siq_index(void)
+{
+ uint32_t i;
+
+ for (i = 0; i < MAX_SIQ_NUM; i++) {
+ if (g_siq_queue[i] == 0)
+ return i;
+ }
+
+ return MAX_SIQ_NUM;
+}
+
+static uint32_t get_undo_siq_index(void)
+{
+ uint32_t i;
+
+ for (i = 0; i < MAX_SIQ_NUM; i++) {
+ if (g_siq_queue[i] != 0)
+ return i;
+ }
+
+ return MAX_SIQ_NUM;
+}
+
+#define RUN_SIQ_THREAD 1
+#define STOP_SIQ_THREAD 2
+static int siq_thread_fn(void *arg)
+{
+ int ret;
+ uint32_t i;
+ (void)arg;
+
+ while (true) {
+ ret = (int)wait_event_interruptible(siq_th_wait,
+ atomic_read(&g_siq_th_run));
+ if (ret != 0) {
+ tloge("wait event interruptible failed!\n");
+ return -EINTR;
+ }
+ if (atomic_read(&g_siq_th_run) == STOP_SIQ_THREAD)
+ return 0;
+
+ mutex_lock(&g_siq_lock);
+ do {
+ i = get_undo_siq_index();
+ if (i >= MAX_SIQ_NUM)
+ break;
+ siq_dump((phys_addr_t)(1), g_siq_queue[i]);
+ g_siq_queue[i] = 0;
+ } while (true);
+ atomic_set(&g_siq_th_run, 0);
+ mutex_unlock(&g_siq_lock);
+ }
+}
+
+#ifdef CONFIG_TEE_AUDIT
+#define MAX_UPLOAD_INFO_LEN 4
+#define INFO_HIGH_OFFSET 24U
+#define INFO_MID_OFFSET 16U
+#define INFO_LOW_OFFSET 8U
+
+static void upload_audit_event(unsigned int eventindex)
+{
+#ifdef CONFIG_HW_KERNEL_STP
+ struct stp_item item;
+ int ret;
+ char att_info[MAX_UPLOAD_INFO_LEN + 1] = {0};
+
+ att_info[0] = (unsigned char)(eventindex >> INFO_HIGH_OFFSET);
+ att_info[1] = (unsigned char)(eventindex >> INFO_MID_OFFSET);
+ att_info[2] = (unsigned char)(eventindex >> INFO_LOW_OFFSET);
+ att_info[3] = (unsigned char)eventindex;
+ att_info[MAX_UPLOAD_INFO_LEN] = '\0';
+ item.id = item_info[ITRUSTEE].id; /* 0x00000185 */
+ item.status = STP_RISK;
+ item.credible = STP_REFERENCE;
+ item.version = 0;
+ ret = strcpy_s(item.name, STP_ITEM_NAME_LEN, STP_NAME_ITRUSTEE);
+ if (ret) {
+ tloge("strncpy failed %x\n", ret);
+ return;
+ }
+ tlogd("stp get size %lx succ\n", sizeof(item_info[ITRUSTEE].name));
+ ret = kernel_stp_upload(item, att_info);
+ if (ret)
+ tloge("stp %x event upload failed\n", eventindex);
+ else
+ tloge("stp %x event upload succ\n", eventindex);
+#else
+ (void)eventindex;
+#endif
+}
+#endif
+
+static void cmd_result_check(const struct tc_ns_smc_cmd *cmd, int cmd_index)
+{
+ if (cmd->ret_val == (int)TEEC_PENDING || cmd->ret_val == (int)TEEC_PENDING2)
+ tlogd("wakeup command %u\n", cmd->event_nr);
+
+ if (cmd->ret_val == (int)TEE_ERROR_TAGET_DEAD) {
+ bool ta_killed = g_cmd_data->in[cmd_index].cmd_id == GLOBAL_CMD_ID_KILL_TASK;
+ tloge("error smc call: ret = %x and cmd.err_origin=%x, [ta is %s]\n",
+ cmd->ret_val, cmd->err_origin, (ta_killed == true) ? "killed" : "crash");
+ cmd_monitor_ta_crash((ta_killed == true) ? TYPE_KILLED_TA : TYPE_CRASH_TA,
+ cmd->uuid, sizeof(struct tc_uuid));
+ ta_crash_report_log();
+ } else if (cmd->ret_val == (int)TEEC_ERROR_TUI_NOT_AVAILABLE) {
+ do_ns_tui_release();
+ } else if (cmd->ret_val == (int)TEE_ERROR_AUDIT_FAIL) {
+ tloge("error smc call: ret = %x and err-origin=%x\n",
+ cmd->ret_val, cmd->err_origin);
+#ifdef CONFIG_TEE_AUDIT
+ tloge("error smc call: status = %x and err-origin=%x\n",
+ cmd->eventindex, cmd->err_origin);
+ upload_audit_event(cmd->eventindex);
+#endif
+ }
+}
+
+static void set_shadow_smc_param(struct smc_in_params *in_params,
+ const struct smc_out_params *out_params, int *n_idled)
+{
+ if (out_params->exit_reason == SMC_EXIT_PREEMPTED) {
+ in_params->x0 = TSP_REQUEST;
+ in_params->x1 = SMC_OPS_SCHEDTO;
+ in_params->x2 = (unsigned long)current->pid;
+ in_params->x3 = out_params->ta;
+ in_params->x4 = out_params->target;
+ } else if (out_params->exit_reason == SMC_EXIT_NORMAL) {
+ in_params->x0 = TSP_REQUEST;
+ in_params->x1 = SMC_OPS_SCHEDTO;
+ in_params->x2 = (unsigned long)current->pid;
+ in_params->x3 = 0;
+ in_params->x4 = 0;
+ if (*n_idled > IDLED_COUNT) {
+ *n_idled = 0;
+ in_params->x1 = SMC_OPS_PROBE_ALIVE;
+ }
+ }
+}
+
+static void shadow_wo_pm(const void *arg, struct smc_out_params *out_params,
+ int *n_idled)
+{
+ struct smc_in_params in_params = {
+ TSP_REQUEST, SMC_OPS_START_SHADOW, current->pid, 0, *(unsigned long *)arg
+ };
+
+ set_shadow_smc_param(&in_params, out_params, n_idled);
+ tlogd("%s: [cpu %d] x0=%lx x1=%lx x2=%lx x3=%lx x4=%lx\n",
+ __func__, raw_smp_processor_id(), in_params.x0, in_params.x1,
+ in_params.x2, in_params.x3, in_params.x4);
+
+#ifdef CONFIG_THIRDPARTY_COMPATIBLE
+ if (g_sys_crash) {
+ out_params->ret = TSP_CRASH;
+ return;
+ }
+#endif
+ smc_req(&in_params, out_params, 0);
+}
+
+static void set_preempted_counter(int *n_preempted, int *n_idled,
+ struct pending_entry *pe)
+{
+ *n_idled = 0;
+ (*n_preempted)++;
+
+ if (*n_preempted > PREEMPT_COUNT) {
+ tlogd("counter too large: retry 10K times on CPU%d\n", smp_processor_id());
+ *n_preempted = 0;
+ }
+#ifndef CONFIG_PREEMPT
+ /* yield cpu to avoid soft lockup */
+ cond_resched();
+#endif
+ if (match_ta_affinity(pe))
+ tloge("set shadow pid %d affinity after preempted\n",
+ pe->task->pid);
+}
+
+static int proc_shadow_thread_normal_exit(struct pending_entry *pe,
+ int *n_preempted, int *n_idled, int *ret_val)
+{
+ long long timeout;
+ int rc;
+
+ if (power_down_cc() != 0) {
+ tloge("power down cc failed\n");
+ *ret_val = -1;
+ return CLEAN_WITHOUT_PM;
+ }
+ *n_preempted = 0;
+
+ timeout = HZ * (long)(HZ_COUNT + ((uint8_t)current->pid & LOW_BYTE));
+ rc = (int)wait_event_freezable_timeout(pe->wq,
+ atomic_read(&pe->run), (long)timeout);
+ if (rc == 0)
+ (*n_idled)++;
+ if (atomic_read(&pe->run) == SHADOW_EXIT_RUN) {
+ tlogd("shadow thread work quit, be killed\n");
+ return CLEAN_WITHOUT_PM;
+ } else {
+ atomic_set(&pe->run, 0);
+ return RETRY_WITH_PM;
+ }
+
+ return 0;
+}
+
+static bool check_shadow_crash(uint64_t crash_reason, int *ret_val)
+{
+ if (crash_reason != TSP_CRASH)
+ return false;
+
+ tloge("TEEOS shadow has crashed!\n");
+ if (power_down_cc() != 0)
+ tloge("power down cc failed\n");
+
+ g_sys_crash = true;
+ cmd_monitor_ta_crash(TYPE_CRASH_TEE, NULL, 0);
+ report_log_system_error();
+ *ret_val = -1;
+ return true;
+}
+
+static void show_other_exit_reason(const struct smc_out_params *params)
+{
+ if (params->exit_reason == SMC_EXIT_SHADOW) {
+ tlogd("probe shadow thread non exit, just quit\n");
+ return;
+ }
+
+ tloge("exit on unknown code %ld\n", (long)params->exit_reason);
+}
+
+static int shadow_thread_fn(void *arg)
+{
+ int n_preempted = 0;
+ int ret = 0;
+ struct smc_out_params params = { 0, SMC_EXIT_MAX, 0, 0 };
+ int n_idled = 0;
+ struct pending_entry *pe = NULL;
+
+ set_freezable();
+ pe = init_pending_entry();
+ if (!pe) {
+ kfree(arg);
+ tloge("init pending entry failed\n");
+ return -ENOMEM;
+ }
+ isb();
+ wmb();
+
+retry:
+ if (power_on_cc() != 0) {
+ ret = -EINVAL;
+ tloge("power on cc failed\n");
+ goto clean_wo_pm;
+ }
+
+retry_wo_pm:
+ shadow_wo_pm(arg, ¶ms, &n_idled);
+ if (check_shadow_crash(params.ret, &ret))
+ goto clean_wo_pm;
+
+ if (params.exit_reason == SMC_EXIT_PREEMPTED) {
+ set_preempted_counter(&n_preempted, &n_idled, pe);
+ goto retry_wo_pm;
+ } else if (params.exit_reason == SMC_EXIT_NORMAL) {
+ ret = proc_shadow_thread_normal_exit(pe, &n_preempted, &n_idled, &ret);
+ if (ret == CLEAN_WITHOUT_PM) {
+ goto clean_wo_pm;
+ } else if (ret == RETRY_WITH_PM) {
+ if (match_ta_affinity(pe))
+ tlogd("set shadow pid %d\n", pe->task->pid);
+ goto retry;
+ }
+ } else {
+ show_other_exit_reason(¶ms);
+ }
+
+ if (power_down_cc() != 0) {
+ tloge("power down cc failed\n");
+ ret = -1;
+ }
+clean_wo_pm:
+ kfree(arg);
+ release_pending_entry(pe);
+ return ret;
+}
+
+static void shadow_work_func(struct kthread_work *work)
+{
+ struct task_struct *shadow_thread = NULL;
+ struct shadow_work *s_work =
+ container_of(work, struct shadow_work, kthwork);
+ uint64_t *target_arg = kzalloc(sizeof(uint64_t), GFP_KERNEL);
+
+ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)target_arg)) {
+ tloge("%s: kmalloc failed\n", __func__);
+ return;
+ }
+
+ *target_arg = s_work->target;
+ shadow_thread = kthread_create(shadow_thread_fn,
+ (void *)(uintptr_t)target_arg, "shadow th/%lu",
+ g_shadow_thread_id++);
+ if (IS_ERR_OR_NULL(shadow_thread)) {
+ kfree(target_arg);
+ tloge("couldn't create shadow_thread %ld\n",
+ PTR_ERR(shadow_thread));
+ return;
+ }
+ tlogd("%s: create shadow thread %lu for target %llx\n",
+ __func__, g_shadow_thread_id, *target_arg);
+ tz_kthread_bind_mask(shadow_thread);
+#if CONFIG_CPU_AFF_NR
+ struct cpumask shadow_mask;
+ unsigned int i;
+
+ cpumask_clear(&shadow_mask);
+ for (i = 0; i < CONFIG_CPU_AFF_NR; i++)
+ cpumask_set_cpu(i, &shadow_mask);
+
+ koadpt_kthread_bind_mask(shadow_thread, &shadow_mask);
+#endif
+ wake_up_process(shadow_thread);
+}
+
+static int proc_smc_wakeup_ca(pid_t ca, int which)
+{
+ if (ca <= 0) {
+ tlogw("wakeup for ca <= 0\n");
+ } else {
+ struct pending_entry *pe = find_pending_entry(ca);
+
+ if (!pe) {
+ (void)raw_smc_send(TSP_REE_SIQ, (phys_addr_t)ca, 0, false);
+ tlogd("invalid ca pid=%d for pending entry\n",
+ (int)ca);
+ return -1;
+ }
+ atomic_set(&pe->run, which);
+ wake_up(&pe->wq);
+ tlogd("wakeup pending thread %ld\n", (long)ca);
+ put_pending_entry(pe);
+ }
+ return 0;
+}
+
+void wakeup_pe(struct pending_entry *pe)
+{
+ if (!pe)
+ return;
+
+ atomic_set(&pe->run, 1);
+ wake_up(&pe->wq);
+}
+
+int smc_wakeup_broadcast(void)
+{
+ foreach_pending_entry(wakeup_pe);
+ return 0;
+}
+
+int smc_wakeup_ca(pid_t ca)
+{
+ tee_trace_add_event(SPI_WAKEUP, (uint64_t)ca);
+ return proc_smc_wakeup_ca(ca, 1);
+}
+
+int smc_shadow_exit(pid_t ca)
+{
+ return proc_smc_wakeup_ca(ca, SHADOW_EXIT_RUN);
+}
+
+void fiq_shadow_work_func(uint64_t target)
+{
+ struct smc_cmd_ret secret = { SMC_EXIT_MAX, 0, target };
+ tee_trace_add_event(INTERRUPT_HANDLE_SPI_REE_SCHEDULED, target);
+ secs_suspend_status(target);
+ if (power_on_cc() != 0) {
+ tloge("power on cc failed\n");
+ return;
+ }
+
+ livepatch_down_read_sem();
+ smp_smc_send(TSP_REQUEST, (unsigned long)SMC_OPS_START_FIQSHD,
+ (unsigned long)(uint32_t)(current->pid), &secret, false);
+ livepatch_up_read_sem();
+
+ if (power_down_cc() != 0)
+ tloge("power down cc failed\n");
+
+ return;
+}
+
+int smc_queue_shadow_worker(uint64_t target)
+{
+ struct shadow_work work = {
+ KTHREAD_WORK_INIT(work.kthwork, shadow_work_func),
+ .target = target,
+ };
+
+#if (KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE)
+ if (!queue_kthread_work(&g_ipi_helper_worker, &work.kthwork)) {
+#else
+ if (!kthread_queue_work(&g_ipi_helper_worker, &work.kthwork)) {
+#endif
+ tloge("ipi helper work fail queue, was already pending\n");
+ return -1;
+ }
+
+#if (KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE)
+ flush_kthread_work(&work.kthwork);
+#else
+ kthread_flush_work(&work.kthwork);
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_DRM_ADAPT
+#define DRM_USR_PRIOR (-5)
+static void set_drm_strategy(void)
+{
+ if (!g_drm_mask_flag) {
+ cpumask_clear(&g_drm_cpu_mask);
+ cpumask_set_cpu(CPU_FOUR, &g_drm_cpu_mask);
+ cpumask_set_cpu(CPU_FIVE, &g_drm_cpu_mask);
+ cpumask_set_cpu(CPU_SIX, &g_drm_cpu_mask);
+ cpumask_set_cpu(CPU_SEVEN, &g_drm_cpu_mask);
+ g_drm_mask_flag = 1;
+ }
+
+ if (current->group_leader &&
+ strstr(current->group_leader->comm, "drm@1.")) {
+ set_cpus_allowed_ptr(current, &g_drm_cpu_mask);
+ set_user_nice(current, DRM_USR_PRIOR);
+ }
+}
+#endif
+
+static int smc_ops_normal(struct cmd_reuse_info *info,
+ const struct tc_ns_smc_cmd *cmd, u64 ops)
+{
+ if (ops != SMC_OPS_NORMAL)
+ return 0;
+
+ if (info->cmd_usage == RESEND) {
+ if (reuse_smc_in_entry((uint32_t)info->cmd_index) != 0) {
+ tloge("reuse smc entry failed\n");
+ release_smc_entry((uint32_t)info->cmd_index);
+ return -ENOMEM;
+ }
+ } else {
+ info->cmd_index = occupy_free_smc_in_entry(cmd);
+ if (info->cmd_index == -1) {
+ tloge("there's no more smc entry\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (info->cmd_usage != CLEAR) {
+ info->cmd_index = info->saved_index;
+ info->cmd_usage = CLEAR;
+ } else {
+ info->saved_index = info->cmd_index;
+ }
+
+ tlogd("submit new cmd: cmd.ca=%u cmd-id=%x ev-nr=%u "
+ "cmd-index=%u saved-index=%d\n",
+ cmd->ca_pid, cmd->cmd_id,
+ g_cmd_data->in[info->cmd_index].event_nr, info->cmd_index,
+ info->saved_index);
+ return 0;
+}
+
+static int smp_smc_send_cmd_done(int cmd_index, struct tc_ns_smc_cmd *cmd,
+ struct tc_ns_smc_cmd *in)
+{
+ cmd_result_check(cmd, cmd_index);
+ switch (cmd->ret_val) {
+ case TEEC_PENDING2: {
+ unsigned int agent_id = cmd->agent_id;
+ /* If the agent does not exist post
+ * the answer right back to the TEE
+ */
+ if (agent_process_work(cmd, agent_id) != 0)
+ tloge("agent process work failed\n");
+ return PENDING2_RETRY;
+ }
+ case TEE_ERROR_TAGET_DEAD:
+ case TEEC_PENDING:
+ /* just copy out, and let out to proceed */
+ default:
+ if (memcpy_s(in, sizeof(*in), cmd, sizeof(*cmd)) != EOK) {
+ tloge("memcpy failed,%s line:%d", __func__, __LINE__);
+ cmd->ret_val = -1;
+ }
+
+ break;
+ }
+
+ return 0;
+}
+
+#define KERNEL_INDEX 5
+static void print_crash_msg(union crash_inf *crash_info)
+{
+ static const char *tee_critical_app[] = {
+ "gtask",
+ "teesmcmgr",
+ "hmsysmgr",
+ "hmfilemgr",
+ "platdrv",
+ "kernel", /* index must be same with KERNEL_INDEX */
+ "vltmm_service",
+ "tee_drv_server"
+ };
+ int app_num = sizeof(tee_critical_app) / sizeof(tee_critical_app[0]);
+ const char *crash_app_name = "NULL";
+ uint16_t off = crash_info->crash_msg.off;
+ int app_index = crash_info->crash_msg.app & LOW_BYTE;
+ int halt_reason = crash_info->crash_msg.halt_reason;
+
+ crash_info->crash_msg.off = 0;
+
+ if (app_index >= 0 && app_index < app_num)
+ crash_app_name = tee_critical_app[app_index];
+ else
+ tloge("index error: %x\n", crash_info->crash_msg.app);
+
+ if (app_index == KERNEL_INDEX) {
+ tloge("====crash app:%s user sym:%s kernel crash off/size: "
+ "<0x%x/0x%x>\n", crash_app_name,
+ crash_info->crash_msg.sym_name,
+ off, crash_info->crash_msg.size);
+ tloge("====crash halt reason: 0x%x far:0x%x fault:0x%x "
+ "elr:0x%x (ret_ip: 0x%llx)\n",
+ halt_reason, crash_info->crash_msg.far,
+ crash_info->crash_msg.fault, crash_info->crash_msg.elr,
+ crash_info->crash_reg[2]);
+ } else {
+ char syms[SYM_NAME_LEN_MAX] = {0};
+
+ if (memcpy_s(syms, SYM_NAME_LEN_MAX,
+ crash_info->crash_msg.sym_name, SYM_NAME_LEN_1) != EOK)
+ tloge("memcpy sym name failed!\n");
+
+ if (memcpy_s(syms + SYM_NAME_LEN_1,
+ SYM_NAME_LEN_MAX - SYM_NAME_LEN_1,
+ crash_info->crash_msg.sym_name_append, SYM_NAME_LEN_2) != EOK)
+ tloge("memcpy sym_name_append failed!\n");
+ tloge("====crash app:%s user_sym:%s + <0x%x/0x%x>\n",
+ crash_app_name, syms, off, crash_info->crash_msg.size);
+ tloge("====crash far:0x%x fault:%x\n",
+ crash_info->crash_msg.far, crash_info->crash_msg.fault);
+ }
+}
+
+void clr_system_crash_flag(void)
+{
+ g_sys_crash = false;
+}
+
+static int smp_smc_send_process(struct tc_ns_smc_cmd *cmd, u64 ops,
+ struct smc_cmd_ret *cmd_ret, int cmd_index)
+{
+ int ret;
+ tlogd("smc send start cmd_id = %u, ca = %u\n",
+ cmd->cmd_id, cmd->ca_pid);
+
+ if (power_on_cc() != 0) {
+ tloge("power on cc failed\n");
+ cmd->ret_val = -1;
+ return -1;
+ }
+
+ ret = smp_smc_send(TSP_REQUEST, (unsigned long)ops,
+ (unsigned long)(uint32_t)(current->pid), cmd_ret, ops != SMC_OPS_ABORT_TASK);
+
+ if (power_down_cc() != 0) {
+ tloge("power down cc failed\n");
+ cmd->ret_val = -1;
+ return -1;
+ }
+
+ tlogd("smc send ret = %x, cmd ret.exit=%ld, cmd index=%d\n",
+ ret, (long)cmd_ret->exit, cmd_index);
+ isb();
+ wmb();
+ if (ret == (int)TSP_CRASH) {
+ union crash_inf crash_info;
+ crash_info.crash_reg[0] = cmd_ret->exit;
+ crash_info.crash_reg[1] = cmd_ret->ta;
+ crash_info.crash_reg[2] = cmd_ret->target;
+
+ tloge("TEEOS has crashed!\n");
+ print_crash_msg(&crash_info);
+
+ g_sys_crash = true;
+ cmd_monitor_ta_crash(TYPE_CRASH_TEE, NULL, 0);
+
+ tee_wake_up_reboot();
+#ifndef CONFIG_TEE_REBOOT
+ report_log_system_error();
+#endif
+ cmd->ret_val = TEE_ERROR_IS_DEAD;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int init_for_smc_send(struct tc_ns_smc_cmd *in,
+ struct pending_entry **pe, struct tc_ns_smc_cmd *cmd,
+ bool reuse)
+{
+#ifdef CONFIG_DRM_ADAPT
+ set_drm_strategy();
+#endif
+ *pe = init_pending_entry();
+ if (!(*pe)) {
+ tloge("init pending entry failed\n");
+ return -ENOMEM;
+ }
+
+ in->ca_pid = (unsigned int)current->pid;
+ if (reuse)
+ return 0;
+
+ if (memcpy_s(cmd, sizeof(*cmd), in, sizeof(*in)) != EOK) {
+ tloge("memcpy in cmd failed\n");
+ release_pending_entry(*pe);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static bool is_ca_killed(int cmd_index)
+{
+ (void)cmd_index;
+ /* if CA has not been killed */
+ if (sigkill_pending(current)) {
+ /* signal pending, send abort cmd */
+ tloge("wait event timeout and find pending signal\n");
+ return true;
+ }
+ return false;
+}
+
+static void clean_smc_resrc(struct cmd_reuse_info info,
+ const struct tc_ns_smc_cmd *cmd,
+ struct pending_entry *pe)
+{
+ if (info.cmd_usage != CLEAR && cmd->ret_val != (int)TEEC_PENDING)
+ release_smc_entry((uint32_t)info.cmd_index);
+
+ release_pending_entry(pe);
+}
+
+static int set_abort_cmd(int index)
+{
+ acquire_smc_buf_lock(&g_cmd_data->smc_lock);
+ if (test_bit(index, (unsigned long *)g_cmd_data->doing_bitmap) == 0) {
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+ tloge("can't abort an unprocess cmd\n");
+ return -1;
+ }
+
+ g_cmd_data->in[index].cmd_id = GLOBAL_CMD_ID_KILL_TASK;
+ g_cmd_data->in[index].cmd_type = CMD_TYPE_GLOBAL;
+ /* these phy addrs are not necessary, clear them to avoid gtask check err */
+ g_cmd_data->in[index].operation_phys = 0;
+ g_cmd_data->in[index].operation_h_phys = 0;
+ g_cmd_data->in[index].login_data_phy = 0;
+ g_cmd_data->in[index].login_data_h_addr = 0;
+
+ clear_bit((unsigned int)index, (unsigned long *)g_cmd_data->doing_bitmap);
+ release_smc_buf_lock(&g_cmd_data->smc_lock);
+ tloge("set abort cmd success\n");
+
+ return 0;
+}
+
+static enum smc_ops_exit process_abort_cmd(int index, const struct pending_entry *pe)
+{
+ (void)pe;
+ if (set_abort_cmd(index) == 0)
+ return SMC_OPS_ABORT_TASK;
+
+ return SMC_OPS_SCHEDTO;
+}
+
+#define TO_STEP_SIZE 5
+#define INVALID_STEP_SIZE 0xFFFFFFFFU
+
+struct timeout_step_t {
+ unsigned long steps[TO_STEP_SIZE];
+ uint32_t size;
+ uint32_t cur;
+ bool timeout_reset;
+};
+
+static void init_timeout_step(uint32_t timeout, struct timeout_step_t *step)
+{
+ uint32_t i = 0;
+
+ if (timeout == 0) {
+ step->steps[0] = RESLEEP_TIMEOUT * HZ;
+ step->size = 1;
+ } else {
+ uint32_t timeout_in_jiffies;
+
+ if (timeout > RESLEEP_TIMEOUT * MSEC_PER_SEC)
+ timeout = RESLEEP_TIMEOUT * MSEC_PER_SEC;
+ timeout_in_jiffies = (uint32_t)msecs_to_jiffies(timeout);
+
+ /*
+ * [timeout_in_jiffies-1, timeout_in_jiffies+2] jiffies
+ * As REE and TEE tick have deviation, to make sure last REE timeout
+ * is after TEE timeout, we set a timeout step from
+ * 'timeout_in_jiffies -1' to 'timeout_in_jiffies + 2'
+ */
+ if (timeout_in_jiffies > 1) {
+ step->steps[i++] = timeout_in_jiffies - 1;
+ step->steps[i++] = 1;
+ } else {
+ step->steps[i++] = timeout_in_jiffies;
+ }
+ step->steps[i++] = 1;
+ step->steps[i++] = 1;
+
+ if (RESLEEP_TIMEOUT * HZ > (timeout_in_jiffies + 2))
+ step->steps[i++] = RESLEEP_TIMEOUT * HZ - 2 - timeout_in_jiffies;
+ step->size = i;
+ }
+ step->cur = 0;
+}
+
+enum pending_t {
+ PD_WAKEUP,
+ PD_TIMEOUT,
+ PD_DONE,
+ PD_RETRY,
+};
+
+enum smc_status_t {
+ ST_DONE,
+ ST_RETRY,
+};
+
+static long wait_event_internal(struct pending_entry *pe, struct timeout_step_t *step)
+{
+ if (!current->mm) {
+ /*
+ * smc svc thread need freezable, to solve the problem:
+ * When the system is in hibernation, the TEE image needs
+ * to be backed up in some scenarios, all smc cmds are not allowed to enter tee
+ */
+ return wait_event_freezable_timeout(pe->wq, atomic_read(&pe->run),
+ step->steps[step->cur]);
+ } else {
+ return wait_event_timeout(pe->wq, atomic_read(&pe->run),
+ step->steps[step->cur]);
+ }
+}
+static enum pending_t proc_ta_pending(struct pending_entry *pe,
+ struct timeout_step_t *step, uint64_t pending_args, uint32_t cmd_index,
+ u64 *ops)
+{
+ bool kernel_call = false;
+ bool woke_up = false;
+ /*
+ * if ->mm is NULL, it's a kernel thread and a kthread will never
+ * receive a signal.
+ */
+ uint32_t timeout = (uint32_t)pending_args;
+ bool timer_no_irq = (pending_args >> 32) == 0 ? false : true;
+ uint32_t cur_timeout;
+ if (step->cur == INVALID_STEP_SIZE)
+ init_timeout_step(timeout, step);
+ if (!current->mm)
+ kernel_call = true;
+resleep:
+ cur_timeout = jiffies_to_msecs(step->steps[step->cur]);
+ tee_trace_add_event(SMC_SLEEP, 0);
+ if (wait_event_internal(pe, step) == 0) {
+ if (step->cur < (step->size - 1)) {
+ step->cur++;
+ /*
+ * As there may no timer irq in TEE, we need a chance to
+ * run timer's irq handler initiatively by SMC_OPS_SCHEDTO.
+ */
+ if (timer_no_irq) {
+ *ops = SMC_OPS_SCHEDTO;
+ return PD_TIMEOUT;
+ } else {
+ goto resleep;
+ }
+ }
+ if (is_ca_killed(cmd_index)) {
+ *ops = (u64)process_abort_cmd(cmd_index, pe);
+ return PD_WAKEUP;
+ }
+ } else {
+ woke_up = true;
+ tlogd("%s woke up\n", __func__);
+ }
+ atomic_set(&pe->run, 0);
+ if (!is_cmd_working_done(cmd_index)) {
+ *ops = SMC_OPS_SCHEDTO;
+ return PD_WAKEUP;
+ } else if (!kernel_call && !woke_up) {
+ tloge("cmd done, may miss a spi!\n");
+ show_cmd_bitmap();
+ }
+ tlogd("cmd is done\n");
+ return PD_DONE;
+}
+
+static void set_timeout_step(struct timeout_step_t *timeout_step)
+{
+ if (!timeout_step->timeout_reset)
+ return;
+
+ timeout_step->cur = INVALID_STEP_SIZE;
+ timeout_step->timeout_reset = false;
+}
+
+static enum smc_status_t proc_normal_exit(struct pending_entry *pe, u64 *ops,
+ struct timeout_step_t *timeout_step, struct smc_cmd_ret *cmd_ret,
+ int cmd_index)
+{
+ enum pending_t pd_ret;
+
+ /* notify and set affinity came first, goto retry directly */
+ if (match_ta_affinity(pe)) {
+ *ops = SMC_OPS_SCHEDTO;
+ return ST_RETRY;
+ }
+
+ pd_ret = proc_ta_pending(pe, timeout_step,
+ cmd_ret->ta, (uint32_t)cmd_index, ops);
+ if (pd_ret == PD_DONE)
+ return ST_DONE;
+
+ if (pd_ret == PD_WAKEUP)
+ timeout_step->timeout_reset = true;
+ return ST_RETRY;
+}
+
+static enum smc_status_t handle_cmd_working_done(
+ struct tc_ns_smc_cmd *cmd, u64 *ops, struct tc_ns_smc_cmd *in,
+ struct cmd_reuse_info *info)
+{
+ if (copy_smc_out_entry((uint32_t)info->cmd_index, cmd, &info->cmd_usage) != 0) {
+ cmd->ret_val = TEEC_ERROR_GENERIC;
+ return ST_DONE;
+ }
+
+ if (smp_smc_send_cmd_done(info->cmd_index, cmd, in) != 0) {
+ *ops = SMC_OPS_NORMAL; /* cmd will be reused */
+ return ST_RETRY;
+ }
+
+ return ST_DONE;
+}
+
+static int smp_smc_send_func(struct tc_ns_smc_cmd *in, bool reuse)
+{
+ struct cmd_reuse_info info = { 0, 0, CLEAR };
+ struct smc_cmd_ret cmd_ret = {0};
+ struct tc_ns_smc_cmd cmd = { {0}, 0 };
+ struct pending_entry *pe = NULL;
+ u64 ops;
+ struct timeout_step_t timeout_step =
+ {{0, 0, 0, 0}, TO_STEP_SIZE, -1, false};
+
+ if (init_for_smc_send(in, &pe, &cmd, reuse) != 0)
+ return TEEC_ERROR_GENERIC;
+
+ if (reuse) {
+ info.saved_index = (int)in->event_nr;
+ info.cmd_index = (int)in->event_nr;
+ info.cmd_usage = RESEND;
+ }
+ ops = SMC_OPS_NORMAL;
+
+#ifdef CONFIG_SCHED_SMT_EXPELLING
+ force_smt_expeller_prepare();
+#endif
+
+retry:
+#ifdef CONFIG_TEE_REBOOT
+ if (is_tee_rebooting() && in->cmd_id == GLOBAL_CMD_ID_SET_SERVE_CMD) {
+ return TEE_ERROR_IS_DEAD;
+ }
+#endif
+
+ set_timeout_step(&timeout_step);
+
+ if (smc_ops_normal(&info, &cmd, ops) != 0) {
+ release_pending_entry(pe);
+ return TEEC_ERROR_GENERIC;
+ }
+
+ if (smp_smc_send_process(&cmd, ops, &cmd_ret, info.cmd_index) == -1)
+ goto clean;
+
+ if (!is_cmd_working_done((uint32_t)info.cmd_index)) {
+ if (cmd_ret.exit == SMC_EXIT_NORMAL) {
+ if (proc_normal_exit(pe, &ops, &timeout_step, &cmd_ret,
+ info.cmd_index) == ST_RETRY)
+ goto retry;
+ } else if (cmd_ret.exit == SMC_EXIT_ABORT) {
+ ops = (u64)process_abort_cmd(info.cmd_index, pe);
+ goto retry;
+ } else {
+ tloge("invalid cmd work state\n");
+ cmd.ret_val = TEEC_ERROR_GENERIC;
+ goto clean;
+ }
+ }
+
+ if (handle_cmd_working_done(&cmd, &ops, in, &info) == ST_RETRY)
+ goto retry;
+clean:
+ clean_smc_resrc(info, &cmd, pe);
+ return cmd.ret_val;
+}
+
+static int smc_svc_thread_fn(void *arg)
+{
+ (void)arg;
+ set_freezable();
+ while (!kthread_should_stop()) {
+ struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
+ int ret;
+
+ smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
+ smc_cmd.cmd_id = GLOBAL_CMD_ID_SET_SERVE_CMD;
+ ret = smp_smc_send_func(&smc_cmd, false);
+ tlogd("smc svc return 0x%x\n", ret);
+ }
+ tloge("smc svc thread stop\n");
+ return 0;
+}
+
+void wakeup_tc_siq(uint32_t siq_mode)
+{
+ uint32_t i;
+
+ if (siq_mode == 0)
+ return;
+
+ mutex_lock(&g_siq_lock);
+ i = get_free_siq_index();
+ if (i >= MAX_SIQ_NUM) {
+ tloge("dump is too frequent\n");
+ mutex_unlock(&g_siq_lock);
+ return;
+ }
+ g_siq_queue[i] = siq_mode;
+ atomic_set(&g_siq_th_run, RUN_SIQ_THREAD);
+ mutex_unlock(&g_siq_lock);
+ wake_up_interruptible(&siq_th_wait);
+}
+
+/*
+ * This function first power on crypto cell, then send smc cmd to trustedcore.
+ * After finished, power off crypto cell.
+ */
+static int proc_tc_ns_smc(struct tc_ns_smc_cmd *cmd, bool reuse)
+{
+ int ret;
+ struct cmd_monitor *item = NULL;
+
+ if (g_sys_crash) {
+ tloge("ERROR: sys crash happened!!!\n");
+ return TEE_ERROR_IS_DEAD;
+ }
+
+ if (!cmd) {
+ tloge("invalid cmd\n");
+ return TEEC_ERROR_GENERIC;
+ }
+ tlogd(KERN_INFO "***smc call start on cpu %d ***\n",
+ raw_smp_processor_id());
+
+ item = cmd_monitor_log(cmd);
+ ret = smp_smc_send_func(cmd, reuse);
+ cmd_monitor_logend(item);
+
+ return ret;
+}
+
+int tc_ns_smc(struct tc_ns_smc_cmd *cmd)
+{
+ return proc_tc_ns_smc(cmd, false);
+}
+
+int tc_ns_smc_with_no_nr(struct tc_ns_smc_cmd *cmd)
+{
+ return proc_tc_ns_smc(cmd, true);
+}
+
+static void smc_work_no_wait(uint32_t type)
+{
+ (void) raw_smc_send(TSP_REQUEST, g_cmd_phys, type, true);
+}
+
+void send_smc_reset_cmd_buffer(void)
+{
+ send_smc_cmd_rebooting(TSP_REQUEST, g_cmd_phys, TC_NS_CMD_TYPE_SECURE_CONFIG, NULL);
+}
+
+static void smc_work_set_cmd_buffer(struct work_struct *work)
+{
+ (void)work;
+ smc_work_no_wait(TC_NS_CMD_TYPE_SECURE_CONFIG);
+}
+
+void smc_set_cmd_buffer(void)
+{
+ struct work_struct work;
+ /*
+ * If the TEE supports independent reset, the "TEE reset" clears the cmd_buffer information in gtask.
+ * Therefore, the tzdriver needs to be re-registered cmd_buffer.
+ * Even if ite has been registerd in the UEFI phase.
+ */
+#ifndef CONFIG_TEE_RESET
+ if (g_reserved_cmd_buffer)
+ return;
+#endif
+
+ INIT_WORK_ONSTACK(&work, smc_work_set_cmd_buffer);
+ /* Run work on CPU 0 */
+ schedule_work_on(0, &work);
+ flush_work(&work);
+ tlogd("smc set cmd buffer done\n");
+}
+
+static int alloc_cmd_buffer(void)
+{
+ if (g_reserved_cmd_buffer) {
+ tlogi("use reserved cmd buffer");
+ g_cmd_data = (struct tc_ns_smc_queue *)get_reserved_cmd_vaddr_of(g_cmd_phys, (uint64_t)g_cmd_size);
+ if (!g_cmd_data)
+ return -ENOMEM;
+
+ return 0;
+ }
+ g_cmd_data = (struct tc_ns_smc_queue *)(uintptr_t)get_cmd_mem_vaddr();
+ if (!g_cmd_data)
+ return -ENOMEM;
+
+ g_cmd_phys = get_cmd_mem_paddr((uint64_t)(uintptr_t)g_cmd_data);
+ return 0;
+}
+
+static int init_smc_related_rsrc(const struct device *class_dev)
+{
+ struct cpumask new_mask;
+ int ret;
+
+ /*
+ * TEE Dump will disable IRQ/FIQ for about 500 ms, it's not
+ * a good choice to ask CPU0/CPU1 to do the dump.
+ * So, bind this kernel thread to other CPUs rather than CPU0/CPU1.
+ */
+ cpumask_setall(&new_mask);
+ cpumask_clear_cpu(CPU_ZERO, &new_mask);
+ cpumask_clear_cpu(CPU_ONE, &new_mask);
+ koadpt_kthread_bind_mask(g_siq_thread, &new_mask);
+ /* some products specify the cpu that kthread need to bind */
+ tz_kthread_bind_mask(g_siq_thread);
+ g_ipi_helper_thread = kthread_create(kthread_worker_fn,
+ &g_ipi_helper_worker, "ipihelper");
+ if (IS_ERR_OR_NULL(g_ipi_helper_thread)) {
+ dev_err(class_dev, "couldn't create ipi helper threads %ld\n",
+ PTR_ERR(g_ipi_helper_thread));
+ ret = (int)PTR_ERR(g_ipi_helper_thread);
+ return ret;
+ }
+
+ tz_kthread_bind_mask(g_ipi_helper_thread);
+ wake_up_process(g_ipi_helper_thread);
+ wake_up_process(g_siq_thread);
+ init_cmd_monitor();
+ INIT_LIST_HEAD(&g_pending_head);
+ spin_lock_init(&g_pend_lock);
+
+ return 0;
+}
+
+static int parse_params_from_tee(void)
+{
+ int ret;
+ void *buffer = NULL;
+
+ /* enable uefi and reserved buffer, not check teeos compat level */
+ if (g_reserved_cmd_buffer) {
+ tlogw("uefi mode, not check teeos compat level\n");
+ return 0;
+ }
+
+ buffer = (void *)(g_cmd_data->in);
+ ret = check_teeos_compat_level((uint32_t *)buffer,
+ COMPAT_LEVEL_BUF_LEN);
+ if (ret != 0) {
+ tloge("check teeos compatibility failed\n");
+ return ret;
+ }
+ if (memset_s(buffer, sizeof(g_cmd_data->in),
+ 0, sizeof(g_cmd_data->in)) != EOK) {
+ tloge("Clean the command buffer failed\n");
+ ret = -EFAULT;
+ return ret;
+ }
+ return 0;
+}
+
+int smc_context_init(const struct device *class_dev)
+{
+ int ret;
+
+ if (!class_dev || IS_ERR_OR_NULL(class_dev))
+ return -ENOMEM;
+
+ ret = alloc_cmd_buffer();
+ if (ret != 0)
+ return ret;
+
+ /* Send the allocated buffer to TrustedCore for init */
+ smc_set_cmd_buffer();
+
+ ret = parse_params_from_tee();
+ if (ret != 0) {
+ tloge("parse params from tee failed\n");
+ goto free_mem;
+ }
+
+ g_siq_thread = kthread_create(siq_thread_fn, NULL, "siqthread/%d", 0);
+ if (unlikely(IS_ERR_OR_NULL(g_siq_thread))) {
+ dev_err(class_dev, "couldn't create siqthread %ld\n",
+ PTR_ERR(g_siq_thread));
+ ret = (int)PTR_ERR(g_siq_thread);
+ goto free_mem;
+ }
+
+ ret = init_smc_related_rsrc(class_dev);
+ if (ret != 0)
+ goto free_siq_worker;
+
+ return 0;
+
+free_siq_worker:
+ kthread_stop(g_siq_thread);
+ g_siq_thread = NULL;
+free_mem:
+ free_cmd_mem((uint64_t)(uintptr_t)g_cmd_data);
+ g_cmd_data = NULL;
+ return ret;
+}
+
+int init_smc_svc_thread(void)
+{
+ g_smc_svc_thread = kthread_create(smc_svc_thread_fn, NULL,
+ "smc_svc_thread");
+ if (unlikely(IS_ERR_OR_NULL(g_smc_svc_thread))) {
+ tloge("couldn't create smc_svc_thread %ld\n",
+ PTR_ERR(g_smc_svc_thread));
+ return (int)PTR_ERR(g_smc_svc_thread);
+ }
+#ifdef CONFIG_SCHED_SMT_EXPELLING
+ set_task_expeller(g_smc_svc_thread, SMT_EXPELLER_FORCE_LONG);
+#endif
+ tz_kthread_bind_mask(g_smc_svc_thread);
+ wake_up_process(g_smc_svc_thread);
+ return 0;
+}
+
+int teeos_log_exception_archive(unsigned int eventid,
+ const char *exceptioninfo)
+{
+#ifdef CONFIG_TEE_LOG_EXCEPTION
+ int ret;
+ struct imonitor_eventobj *teeos_obj = NULL;
+
+ teeos_obj = imonitor_create_eventobj(eventid);
+ if (exceptioninfo) {
+ tlogi("upload exception info: [%s]\n", exceptioninfo);
+ ret = imonitor_set_param(teeos_obj, 0, (long)(uintptr_t)exceptioninfo);
+ } else {
+ ret = imonitor_set_param(teeos_obj, 0, (long)(uintptr_t)"teeos something crash");
+ }
+ if (ret) {
+ tloge("imonitor_set_param failed\n");
+ imonitor_destroy_eventobj(teeos_obj);
+ return ret;
+ }
+ ret = imonitor_add_dynamic_path(teeos_obj, "/data/vendor/log/hisi_logs/tee");
+ if (ret) {
+ tloge("add path failed\n");
+ imonitor_destroy_eventobj(teeos_obj);
+ return ret;
+ }
+ ret = imonitor_add_dynamic_path(teeos_obj, "/data/log/tee");
+ if (ret) {
+ tloge("add path failed\n");
+ imonitor_destroy_eventobj(teeos_obj);
+ return ret;
+ }
+ ret = imonitor_send_event(teeos_obj);
+ imonitor_destroy_eventobj(teeos_obj);
+ return ret;
+#else
+ (void)eventid;
+ (void)exceptioninfo;
+ return 0;
+#endif
+}
+
+void svc_thread_release(void)
+{
+ if (!IS_ERR_OR_NULL(g_smc_svc_thread)) {
+ kthread_stop(g_smc_svc_thread);
+ g_smc_svc_thread = NULL;
+ }
+}
+
+void free_smc_data(void)
+{
+ struct pending_entry *pe = NULL, *temp = NULL;
+ if (g_reserved_cmd_buffer)
+ iounmap((void __iomem *)g_cmd_data);
+ else
+ free_cmd_mem((uint64_t)(uintptr_t)g_cmd_data);
+ smc_wakeup_broadcast();
+ svc_thread_release();
+ if (!IS_ERR_OR_NULL(g_siq_thread)) {
+ atomic_set(&g_siq_th_run, STOP_SIQ_THREAD);
+ wake_up_interruptible(&siq_th_wait);
+ kthread_stop(g_siq_thread);
+ g_siq_thread = NULL;
+ }
+
+#if (KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE)
+ flush_kthread_worker(&g_ipi_helper_worker);
+#else
+ kthread_flush_worker(&g_ipi_helper_worker);
+#endif
+ if (!IS_ERR_OR_NULL(g_ipi_helper_thread)) {
+ kthread_stop(g_ipi_helper_thread);
+ g_ipi_helper_thread = NULL;
+ }
+ free_cmd_monitor();
+
+ spin_lock(&g_pend_lock);
+ list_for_each_entry_safe(pe, temp, &g_pending_head, list) {
+ list_del(&pe->list);
+ put_task_struct(pe->task);
+ kfree(pe);
+ }
+ spin_unlock(&g_pend_lock);
+}
diff --git a/tzdriver/core/smc_smp.h b/tzdriver/core/smc_smp.h
new file mode 100644
index 0000000000000000000000000000000000000000..c4e24790fd35fd6952caf0c7a1f35aaef06a7b2f
--- /dev/null
+++ b/tzdriver/core/smc_smp.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: function declaration for sending smc cmd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef SMC_SMP_H
+#define SMC_SMP_H
+
+#include
+#include "teek_client_constants.h"
+#include "teek_ns_client.h"
+
+#if (KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE)
+#define CURRENT_CPUS_ALLOWED (¤t->cpus_mask)
+#else
+#define CURRENT_CPUS_ALLOWED (¤t->cpus_allowed)
+#endif
+
+enum tc_ns_cmd_type {
+ TC_NS_CMD_TYPE_INVALID = 0,
+ TC_NS_CMD_TYPE_NS_TO_SECURE,
+ TC_NS_CMD_TYPE_SECURE_TO_NS,
+ TC_NS_CMD_TYPE_SECURE_TO_SECURE,
+ TC_NS_CMD_TYPE_SECURE_CONFIG = 0xf,
+ TC_NS_CMD_TYPE_MAX
+};
+
+struct pending_entry {
+ atomic_t users;
+ struct task_struct *task;
+#ifdef CONFIG_TA_AFFINITY
+ struct cpumask ca_mask;
+ struct cpumask ta_mask;
+#endif
+ pid_t pid;
+ wait_queue_head_t wq;
+ atomic_t run;
+ struct list_head list;
+};
+
+#ifdef CONFIG_BIG_SESSION
+#define MAX_SMC_CMD CONFIG_BIG_SESSION
+#else
+#define MAX_SMC_CMD 18
+#endif
+
+#ifdef DIV_ROUND_UP
+#undef DIV_ROUND_UP
+#endif
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
+#define BITS_PER_BYTE 8
+
+#ifdef BITS_TO_LONGS
+#undef BITS_TO_LONGS
+#endif
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(uint64_t))
+
+#ifdef BIT_MASK
+#undef BIT_MASK
+#endif
+#define BIT_MASK(nr) (1UL << (((uint64_t)(nr)) % sizeof(uint64_t)))
+
+#ifdef BIT_WORD
+#undef BIT_WORD
+#endif
+#define BIT_WORD(nr) ((nr) / sizeof(uint64_t))
+
+#ifdef DECLARE_BITMAP
+#undef DECLARE_BITMAP
+#endif
+#define DECLARE_BITMAP(name, bits) uint64_t name[BITS_TO_LONGS(bits)]
+
+#define SIQ_DUMP_TIMEOUT 1U
+#define SIQ_DUMP_SHELL 2U
+
+typedef uint32_t smc_buf_lock_t;
+
+struct tc_ns_smc_queue {
+ /* set when CA send cmd_in, clear after cmd_out return */
+ DECLARE_BITMAP(in_bitmap, MAX_SMC_CMD);
+ /* set when gtask get cmd_in, clear after cmd_out return */
+ DECLARE_BITMAP(doing_bitmap, MAX_SMC_CMD);
+ /* set when gtask get cmd_out, clear after cmd_out return */
+ DECLARE_BITMAP(out_bitmap, MAX_SMC_CMD);
+ smc_buf_lock_t smc_lock;
+ volatile uint32_t last_in;
+ struct tc_ns_smc_cmd in[MAX_SMC_CMD];
+ volatile uint32_t last_out;
+ struct tc_ns_smc_cmd out[MAX_SMC_CMD];
+};
+
+#define SYM_NAME_LEN_MAX 16
+#define SYM_NAME_LEN_1 7
+#define SYM_NAME_LEN_2 4
+#define CRASH_REG_NUM 3
+#define LOW_FOUR_BITE 4
+
+union crash_inf {
+ uint64_t crash_reg[CRASH_REG_NUM];
+ struct {
+ uint8_t halt_reason : LOW_FOUR_BITE;
+ uint8_t app : LOW_FOUR_BITE;
+ char sym_name[SYM_NAME_LEN_1];
+ uint16_t off;
+ uint16_t size;
+ uint32_t far;
+ uint32_t fault;
+ union {
+ char sym_name_append[SYM_NAME_LEN_2];
+ uint32_t elr;
+ };
+ } crash_msg;
+};
+
+#define RESLEEP_TIMEOUT 15
+
+bool sigkill_pending(struct task_struct *tsk);
+int smc_context_init(const struct device *class_dev);
+void free_smc_data(void);
+int tc_ns_smc(struct tc_ns_smc_cmd *cmd);
+int tc_ns_smc_with_no_nr(struct tc_ns_smc_cmd *cmd);
+int teeos_log_exception_archive(unsigned int eventid, const char *exceptioninfo);
+void set_cmd_send_state(void);
+int init_smc_svc_thread(void);
+int smc_wakeup_ca(pid_t ca);
+int smc_wakeup_broadcast(void);
+int smc_shadow_exit(pid_t ca);
+int smc_queue_shadow_worker(uint64_t target);
+void fiq_shadow_work_func(uint64_t target);
+struct pending_entry *find_pending_entry(pid_t pid);
+void foreach_pending_entry(void (*func)(struct pending_entry *));
+void put_pending_entry(struct pending_entry *pe);
+void show_cmd_bitmap(void);
+void wakeup_tc_siq(uint32_t siq_mode);
+void smc_set_cmd_buffer(void);
+unsigned long raw_smc_send(uint32_t cmd, phys_addr_t cmd_addr, uint32_t cmd_type, uint8_t wait);
+void occupy_clean_cmd_buf(void);
+void clr_system_crash_flag(void);
+void svc_thread_release(void);
+int send_smc_cmd_rebooting(uint32_t cmd_id, phys_addr_t cmd_addr, uint32_t cmd_type,
+ const struct tc_ns_smc_cmd *in_cmd);
+void send_smc_reset_cmd_buffer(void);
+
+#endif
diff --git a/tzdriver/core/tc_client_driver.c b/tzdriver/core/tc_client_driver.c
new file mode 100644
index 0000000000000000000000000000000000000000..5d3c2b851ccdd7eef836d2d5eda2b727cc3bbeed
--- /dev/null
+++ b/tzdriver/core/tc_client_driver.c
@@ -0,0 +1,1500 @@
+/*
+ * Copyright (C) 2022 Huawei Technologies Co., Ltd.
+ * Decription: function for proc open,close session and invoke.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "tc_client_driver.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include