diff --git a/LICENSE b/LICENSE index c1d45370dd1c1665b7665b06168af41e716854e3..5be3b9026a3251184059781c680a1cef38449984 100644 --- a/LICENSE +++ b/LICENSE @@ -1,5 +1,6 @@ (1) The directories below are licensed under GPL-2.0-or-later. ./newip/ + ./tzdriver/ ./xpm/ ./qos_auth/ ./ucollection/ diff --git a/OAT.xml b/OAT.xml index c024cf3707521158847114f2773a341c988a4214..ed4c568bab84406b687f1644d68cef71d871f5d6 100644 --- a/OAT.xml +++ b/OAT.xml @@ -64,6 +64,7 @@ Note:If the text contains special characters, please escape them according to th + @@ -73,6 +74,8 @@ Note:If the text contains special characters, please escape them according to th + + @@ -93,6 +96,7 @@ Note:If the text contains special characters, please escape them according to th + diff --git a/tzdriver/CMakeLists.txt b/tzdriver/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..8f454c86ae63b4a3ce5fc524d2ab329cfed29ad1 --- /dev/null +++ b/tzdriver/CMakeLists.txt @@ -0,0 +1,2 @@ +add_device_ko(LOCAL_MODULE tzdriver + KO_SRC_FOLDER ${CMAKE_CURRENT_SOURCE_DIR}) \ No newline at end of file diff --git a/tzdriver/Kconfig b/tzdriver/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..3e1d3356398559be87f79333f2be00e279765121 --- /dev/null +++ b/tzdriver/Kconfig @@ -0,0 +1,37 @@ +menu "TEE OS" + +config TZDRIVER + tristate "Secure Execution Communicator driver" + default n + help + Provides a communication interface between userspace and + TrustZone Operating Environment. + +config SECBOOT_IMG + bool "tzdriver split secboot img into modem and ap" + default n + depends on KERNEL_CLIENT + help + Macro defined for splitting modem and ap img + +config SECBOOT_IMG_V2 + bool "tzdriver split modem and ap for v2" + default n + depends on KERNEL_CLIENT + help + Macro defined for splitting modem and ap img v2 + +config ASAN_DEBUG + bool "ASAN debug version" + default n + help + Macro defined for ASAN debug version + +source "drivers/tzdriver/auth/Kconfig" +source "drivers/tzdriver/core/Kconfig" +source "drivers/tzdriver/tlogger/Kconfig" +source "drivers/tzdriver/agent_rpmb/Kconfig" +source "drivers/tzdriver/ion/Kconfig" +source "drivers/tzdriver/tui/Kconfig" +endmenu + diff --git a/tzdriver/Makefile b/tzdriver/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..3f9ef62cdb7a14394dc9b427e38928c9e4e38d00 --- /dev/null +++ b/tzdriver/Makefile @@ -0,0 +1,14 @@ +ifeq ($(CONFIG_TZDRIVER),y) +KERNEL_DIR := $(srctree) + +EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../third_party/bounds_checking_function/include/ + +obj-$(CONFIG_TZDRIVER) += agent_rpmb/ +obj-$(CONFIG_TZDRIVER) += auth/ +obj-$(CONFIG_TZDRIVER) += core/ +obj-$(CONFIG_TZDRIVER) += tlogger/ +obj-$(CONFIG_TZDRIVER) += ion/ +obj-$(CONFIG_TZDRIVER) += tui/ +obj-$(CONFIG_TZDRIVER) += whitelist/ + +endif diff --git a/tzdriver/README.md b/tzdriver/README.md new file mode 100644 index 0000000000000000000000000000000000000000..116db16c31b24881dd6e7724cdab65eda84499ba --- /dev/null +++ b/tzdriver/README.md @@ -0,0 +1,78 @@ +# Tzdriver driver + +## Introduction + +Tzdriver is a kernel driver deployed on the REE side, supporting communication between REE and TEE. Tzdriver processes commands from Tee Client and sends instructions to switch from REE to TEE. Tzdriver supports data sharing between REE and TEE by managing shared memory. + +Tzdriver includes the following main modules: + +smc: Send smc instructions to switch the CPU from the REE side to the TEE side for operation. + +session_manager: Manage communication sessions between REE and TEE. + +mailbox:Data is shared between REE and TEE through the mailbox. + +cmd_monitor:Monitor the operation of SMC instructions and provides a timeout detection mechanism. + +tzdebug:Create debugfs debugging nodes to facilitate developers in debugging TEE functionality + +tlogger:TEE log driver module, supporting TEE log recording and printing. + +Figure 1: Tzdriver architecture diagram + +![](figures/tzdriver.drawio_en.png) + +## Directory + +``` +/kernel/linux/common_modules/tzdriver +├── core +│ ├── cmdmonitor.c # SMC instruction execution monitoring + ├── gp_ops.c # GP TEE specification processing logic + ├── mailbox_mempool.c # REE and TEE shared memory management + ├── session_manager.c # Session management for CA access to TA + ├── smc_smp.c # Send SMC command to switch to TEE + ├── tzdebug.c # Debugging module +├── tlogger # TEE log driver +``` + +## Configuration Option + +If you want to enable the Tzdriver driver, you need to modify the defconfig file of the device in the Linux kernel code repository and add configuration options for Tzdriver: + +``` +# +# TEEOS +# +CONFIG_TZDRIVER=y +CONFIG_CPU_AFF_NR=1 +CONFIG_KERNEL_CLIENT=y +CONFIG_TEELOG=y +CONFIG_PAGES_MEM=y +CONFIG_THIRDPARTY_COMPATIBLE=y +``` + +The meanings of each option are shown in the table below: + +**Table 1** Configuration Options Description + +| Parameters | Description | +| ---------------------------- | ------------------------------------------------------------ | +| CONFIG_TZDRIVER | Tzdriver module switch. | +| CONFIG_CPU_AFF_NR | CA binding core function, non-zero values represent restrictions on CPUID less than CONFIG_ CPU_ AFF_ NR's CPU can enter TEE, where 0 represents unlimited. Currently, Tzdriver only supports running on 0 cores, so the value is 1. | +| CONFIG_KERNEL_CLIENT | Support the kernel CA option. | +| CONFIG_TEELOG | TEE log switch, it is recommended to enable. | +| CONFIG_PAGES_MEM | TEE log memory management, it is recommended to enable. | +| CONFIG_THIRDPARTY_COMPATIBLE | Used for compatibility with third-party optee, such as the RK3568 chip, which requires this option to be enabled. | + +## Compile Command + +Tzdriver is compiled together with the kernel. Taking the rk3568 chip as an example, the "boot_linux.img" can be compiled separately. The compilation command is as follows + +``` +./build.sh --product-name rk3568 --ccache --build-target kernel --gn-args linux_kernel_version=\"linux-5.10\" +``` + +## Related code repository + +[tee_client](https://gitee.com/openharmony/tee_tee_client) diff --git a/tzdriver/README_zh.md b/tzdriver/README_zh.md new file mode 100644 index 0000000000000000000000000000000000000000..957bd2a18c4b2d56835ee25c213bae344309c641 --- /dev/null +++ b/tzdriver/README_zh.md @@ -0,0 +1,78 @@ +# Tzdriver驱动 + +## 简介 + +Tzdriver是部署在REE侧的内核驱动,支持REE和TEE之间通信。Tzdriver处理来自于Tee Client的命令,发送指令从REE切换到TEE。Tzdriver通过管理共享内存,支持REE和TEE之间共享数据。 + +Tzdriver驱动包含如下主要模块: + +smc:发送smc指令,将CPU从REE侧切换到TEE侧运行。 + +session_manager:管理REE与TEE之间的通信会话。 + +mailbox:REE和TEE之间通过mailbox共享数据。 + +cmd_monitor:监控smc指令的运行,提供超时检测机制。 + +tzdebug:创建debugfs调试节点,方便开发人员调试TEE功能。 + +tlogger:TEE日志驱动模块,支持TEE日志记录和打印。 + +图1 Tzdriver驱动架构图 + +![](figures/tzdriver.drawio.png) + +## 目录 + +``` +/kernel/linux/common_modules/tzdriver +├── core +│ ├── cmdmonitor.c # smc指令执行监控 + ├── gp_ops.c # GP TEE规范处理逻辑 + ├── mailbox_mempool.c # REE和TEE共享内存管理 + ├── session_manager.c # CA访问TA的session管理 + ├── smc_smp.c # 发送smc指令切换到TEE + ├── tzdebug.c # 调试模块 +├── tlogger # TEE日志驱动 +``` + +## 配置选项 + +如果要使能Tzdriver驱动,需要修改linux内核代码仓中设备的defconfig文件,增加Tzdriver的配置选项: + +``` +# +# TEEOS +# +CONFIG_TZDRIVER=y +CONFIG_CPU_AFF_NR=1 +CONFIG_KERNEL_CLIENT=y +CONFIG_TEELOG=y +CONFIG_PAGES_MEM=y +CONFIG_THIRDPARTY_COMPATIBLE=y +``` + +各选项其含义如下表所示: + +**表 1** 配置选项说明 + +| 参数 | 说明 | +| ---------------------------- | ------------------------------------------------------------ | +| CONFIG_TZDRIVER | Tzdriver模块开关。 | +| CONFIG_CPU_AFF_NR | CA绑核功能,非零值代表限制仅cpuid小于CONFIG_CPU_AFF_NR的CPU可以进入TEE,0代表无限制,当前只支持在0核运行,所以值为1。 | +| CONFIG_KERNEL_CLIENT | 支持内核CA选项。 | +| CONFIG_TEELOG | TEE日志开关,建议开启。 | +| CONFIG_PAGES_MEM | TEE日志内存管理,建议开启。 | +| CONFIG_THIRDPARTY_COMPATIBLE | 兼容第三方opteed的适配,例如适配RK3568芯片需要开启此选项。 | + +## 编译命令 + +Tzdriver驱动跟随kernel一起编译,以rk3568为例,可以单独编译boot_linux.img,编译命令如下 + +``` +./build.sh --product-name rk3568 --ccache --build-target kernel --gn-args linux_kernel_version=\"linux-5.10\" +``` + +## 相关仓 + +[tee_client](https://gitee.com/openharmony/tee_tee_client) diff --git a/tzdriver/agent.h b/tzdriver/agent.h new file mode 120000 index 0000000000000000000000000000000000000000..295b146dea8443325a41bc30398c33cfba293d59 --- /dev/null +++ b/tzdriver/agent.h @@ -0,0 +1 @@ +core/agent.h \ No newline at end of file diff --git a/tzdriver/agent_rpmb/Kconfig b/tzdriver/agent_rpmb/Kconfig new file mode 100755 index 0000000000000000000000000000000000000000..710dd61325b02752cd73e78e1628993856380a7a --- /dev/null +++ b/tzdriver/agent_rpmb/Kconfig @@ -0,0 +1,6 @@ +config RPMB_AGENT + bool "Tzdriver Rpmb Agent" + default n + depends on TZDRIVER + help + Tzdriver Rpmb Agent \ No newline at end of file diff --git a/tzdriver/agent_rpmb/Makefile b/tzdriver/agent_rpmb/Makefile new file mode 100755 index 0000000000000000000000000000000000000000..3cbd7b78e61efec18414f28d7c0b998aaccb48f0 --- /dev/null +++ b/tzdriver/agent_rpmb/Makefile @@ -0,0 +1,35 @@ +KERNEL_DIR := $(srctree) + +ifneq ($(TARGET_BUILD_VARIANT),user) + ccflags-y += -DDEF_ENG +endif + +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/platform_drivers/tzdriver +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/platform_drivers/tzdriver/core +EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../third_party/bounds_checking_function/include + +ifeq ($(CONFIG_MEDIATEK_SOLUTION),y) + MTK_PLATFORM := $(subst ",,$(CONFIG_MTK_PLATFORM)) + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/core + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/card + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/misc/mediatek/include + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/misc/mediatek/include/mt-plat/$(MTK_PLATFORM)/include + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/misc/mediatek/include/mt-plat + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/misc/mediatek/base/power/$(MTK_PLATFORM) + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/misc/mediatek/base/power/include + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/devfreq + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/scsi/ufs + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/host/mediatek/ComboA + ifeq ($(CONFIG_MTK_PLATFORM), "mt6761") + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/host/mediatek/ComboA/mt6765 + else + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/host/mediatek/ComboA/$(MTK_PLATFORM) + endif + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/host/mediatek/$(MTK_PLATFORM) + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/mmc/host/mediatek/$(MTK_PLATFORM)/$(MTK_PLATFORM) + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/platform_drivers/tzdriver/agent_rpmb/mplat + obj-$(CONFIG_RPMB_AGENT) += core/agent_rpmb.o mplat/rpmb_driver.o +else + EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/platform_drivers/tzdriver/agent_rpmb/generic + obj-$(CONFIG_RPMB_AGENT) += core/agent_rpmb.o generic/rpmb_driver.o +endif \ No newline at end of file diff --git a/tzdriver/agent_rpmb/core/agent_rpmb.c b/tzdriver/agent_rpmb/core/agent_rpmb.c new file mode 100755 index 0000000000000000000000000000000000000000..faba0eca48acb1eb5db6dfd6f4f9ceaed7594882 --- /dev/null +++ b/tzdriver/agent_rpmb/core/agent_rpmb.c @@ -0,0 +1,331 @@ +/* + * agent_rpmb.c + * + * rpmb agent manager function, such as register + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "agent_rpmb.h" +#include +#include /* for struct mmc_ioc_rpmb */ +#include /* for struct mmc_card */ +#include +#include +#include +#include +#include +#include + +#include "teek_client_constants.h" +#include "teek_ns_client.h" +#include "agent.h" +#include "tc_ns_log.h" +#include "smc_smp.h" +#include "rpmb_driver.h" + +enum rpmb_cmd { + SEC_GET_DEVINFO, + SEC_SEND_IOCMD, + SEC_RPMB_LOCK, + SEC_RPMB_UNLOCK, + SEC_RPMB_ABNORMAL, +}; + +#define RPMB_EMMC_CID_SIZE 32 + +struct rpmb_devinfo { + uint8_t cid[RPMB_EMMC_CID_SIZE]; /* eMMC card ID */ + + uint8_t rpmb_size_mult; /* EXT CSD-slice 168 "RPMB Size" */ + uint8_t rel_wr_sec_cnt; /* EXT CSD-slice 222 "Reliable Write Sector Count" */ + uint8_t tmp[2]; + uint32_t blk_size; /* RPMB blocksize */ + + uint32_t max_blk_idx; /* The highest block index supported by current device */ + uint32_t access_start_blk_idx; /* The start block index SecureOS can access */ + + uint32_t access_total_blk; /* The total blocks SecureOS can access */ + uint32_t tmp2; + + uint32_t mdt; /* 1: EMMC 2: UFS */ + + /* the device's support bit map, for example, if it support 1,2,32, then the value is 0x80000003 */ + uint32_t support_bit_map; + + uint32_t version; + uint32_t tmp3; +}; +struct rpmb_ioc { + struct storage_blk_ioc_rpmb_data ioc_rpmb; /* sizeof() = 72 */ + + uint32_t buf_offset[STORAGE_IOC_MAX_RPMB_CMD]; + uint32_t tmp; +}; + +#define RPMB_CTRL_MAGIC 0x5A5A5A5A +struct rpmb_ctrl_t { + uint32_t magic; + uint32_t cmd_sn; + uint8_t lock_flag; + uint8_t tmp[3]; + enum rpmb_op_type op_type; + union __args { + struct rpmb_devinfo get_devinfo; + struct rpmb_ioc send_ioccmd; + } args; + enum rpmb_cmd cmd; + uint32_t reserved; + uint32_t buf_len; + int32_t ret; + uint32_t reserved2; + uint32_t buf_start[0]; +}; /* sizeof() = 8 * 16 = 128 */ + +static struct rpmb_ctrl_t *m_rpmb_ctrl = NULL; +/* + * the data_ptr from SecureOS is physical address, + * so, we MUST update to the virtual address, + * otherwise, segment default + */ +static void update_dataptr(struct rpmb_ctrl_t *trans_ctrl) +{ + uint32_t i; + uint32_t offset = 0; + uint8_t *dst = NULL; + + if (trans_ctrl == NULL) + return; + + for (i = 0; i < STORAGE_IOC_MAX_RPMB_CMD; i++) { + offset = trans_ctrl->args.send_ioccmd.buf_offset[i]; + if (offset > trans_ctrl->buf_len) + continue; + if (trans_ctrl->args.send_ioccmd.ioc_rpmb.data[i].buf != NULL) { + dst = (uint8_t *)trans_ctrl->buf_start + offset; + /* update the data_prt */ + trans_ctrl->args.send_ioccmd.ioc_rpmb.data[i].buf = dst; + } + } +} + +struct rpmb_agent_lock_info { + unsigned int dev_id; + bool lock_need_free; +}; +static struct rpmb_agent_lock_info lock_info = { 0 }; + +static u64 g_tee_rpmb_lock_done = 0; +static u64 g_tee_rpmb_lock_release = 0; +#define RPMB_TIMEOUT_TIME_TEE 800000000 + + +static void process_rpmb_lock(const struct tee_agent_kernel_ops *agent_instance) +{ + struct smc_event_data *event_data = NULL; + + rpmb_driver_counter_lock(); + g_tee_rpmb_lock_done = dfx_getcurtime(); + + tlogd("obtain rpmb device lock\n"); + + event_data = find_event_control(agent_instance->agent_id); + if (event_data != NULL) { + lock_info.dev_id = event_data->cmd.dev_file_id; + lock_info.lock_need_free = true; + tlogd("rpmb counter lock context: dev_id=%d\n", + lock_info.dev_id); + } + put_agent_event(event_data); +} + +static void process_rpmb_unlock(int operation) +{ + u64 temp_cost_time; + + /* clear the lock info */ + lock_info.dev_id = 0; + lock_info.lock_need_free = false; + rpmb_driver_counter_unlock(); + + g_tee_rpmb_lock_release = dfx_getcurtime(); + temp_cost_time = g_tee_rpmb_lock_release - g_tee_rpmb_lock_done; + if (temp_cost_time > RPMB_TIMEOUT_TIME_TEE) { + tloge("rpmb tee cost time is more than 800ms, start[%llu], unlock[%llu], cost[%llu], operation[%d]\n", + g_tee_rpmb_lock_done, g_tee_rpmb_lock_release, + temp_cost_time, operation); + tee_report_rpmb(); + } + tlogd("free rpmb device lock\n"); +} + +#define GET_RPMB_LOCK_MASK 0x01 +#define FREE_RPMB_LOCK_MASK 0x02 +static void send_ioccmd(const struct tee_agent_kernel_ops *agent_instance) +{ + uint8_t lock_flag; + int32_t ret; + + if (agent_instance == NULL || m_rpmb_ctrl == NULL) { + tloge("bad parameters\n"); + return; + } + + lock_flag = m_rpmb_ctrl->lock_flag; + + if (lock_flag & GET_RPMB_LOCK_MASK) + process_rpmb_lock(agent_instance); + + ret = rpmb_ioctl_cmd(RPMB_FUNC_ID_SECURE_OS, m_rpmb_ctrl->op_type, + &m_rpmb_ctrl->args.send_ioccmd.ioc_rpmb); + if (ret) + tloge("rpmb ioctl failed: %d\n", ret); + + if (lock_flag & FREE_RPMB_LOCK_MASK) + process_rpmb_unlock(m_rpmb_ctrl->op_type); + m_rpmb_ctrl->ret = ret; +} + +static int rpmb_check_data(struct rpmb_ctrl_t *trans_ctrl) +{ + if (trans_ctrl == NULL) + return 0; + + if (trans_ctrl->magic != RPMB_CTRL_MAGIC) { + tloge("rpmb check magic error, now is 0x%x\n", + trans_ctrl->magic); + return -1; + } + + return 0; +} + +static void rpmb_handle_cmd(struct tee_agent_kernel_ops *agent_instance) +{ + switch (m_rpmb_ctrl->cmd) { + case SEC_SEND_IOCMD: + tlogd("rpmb agent cmd is send ioc\n"); + send_ioccmd(agent_instance); + break; + case SEC_RPMB_LOCK: + tlogd("rpmb agent cmd is lock\n"); + process_rpmb_lock(agent_instance); + m_rpmb_ctrl->ret = 0; + break; + case SEC_RPMB_UNLOCK: + tlogd("rpmb agent cmd is unlock\n"); + process_rpmb_unlock(SEC_RPMB_UNLOCK); + m_rpmb_ctrl->ret = 0; + break; + default: + tloge("rpmb agent cmd not supported 0x%x\n", m_rpmb_ctrl->cmd); + break; + } +} + +static int rpmb_agent_work(struct tee_agent_kernel_ops *agent_instance) +{ + struct rpmb_ctrl_t *trans_ctrl = NULL; + errno_t rc = EOK; + uint32_t copy_len; + + if (agent_instance == NULL || agent_instance->agent_buff == NULL) { + tloge("agent buff invalid\n"); + return -1; + } + + trans_ctrl = (struct rpmb_ctrl_t *)agent_instance->agent_buff; + if (rpmb_check_data(trans_ctrl) != 0) { + trans_ctrl->ret = TEEC_ERROR_BAD_FORMAT; + return -1; + } + + if (m_rpmb_ctrl == NULL) { + m_rpmb_ctrl = kzalloc(agent_instance->agent_buff_size, + GFP_KERNEL); + if (m_rpmb_ctrl == NULL) { + tloge("memory alloc failed\n"); + trans_ctrl->ret = TEEC_ERROR_OUT_OF_MEMORY; + return -1; + } + } + rc = memcpy_s((void *)m_rpmb_ctrl, + agent_instance->agent_buff_size, (void *)trans_ctrl, + sizeof(*m_rpmb_ctrl) + trans_ctrl->buf_len); + if (rc != EOK) { + tloge("memcpy_s failed: 0x%x\n", rc); + trans_ctrl->ret = TEEC_ERROR_SECURITY; + goto clean; + } + update_dataptr(m_rpmb_ctrl); + rpmb_handle_cmd(agent_instance); + copy_len = agent_instance->agent_buff_size - + offsetof(struct rpmb_ctrl_t, buf_start); + rc = memcpy_s((void *)trans_ctrl->buf_start, copy_len, + (void *)m_rpmb_ctrl->buf_start, copy_len); + if (rc != EOK) { + tloge("memcpy_s 2 failed: 0x%x\n", rc); + trans_ctrl->ret = TEEC_ERROR_SECURITY; + goto clean; + } + trans_ctrl->ret = m_rpmb_ctrl->ret; + + return 0; +clean: + trans_ctrl->ret = TEEC_ERROR_SECURITY; + kfree(m_rpmb_ctrl); + m_rpmb_ctrl = NULL; + return -1; +} + +static int rpmb_agent_exit(struct tee_agent_kernel_ops *agent_instance) +{ + tloge("rpmb agent is exit is being invoked\n"); + + if (m_rpmb_ctrl != NULL) { + kfree(m_rpmb_ctrl); + m_rpmb_ctrl = NULL; + } + + return 0; +} + +static int rpmb_agent_crash_work( + struct tee_agent_kernel_ops *agent_instance, + struct tc_ns_client_context *context, unsigned int dev_file_id) +{ + (void)agent_instance; + (void)context; + tlogd("check free lock or not, dev_id=%d\n", dev_file_id); + if (lock_info.lock_need_free && (lock_info.dev_id == dev_file_id)) { + tloge("CA crash, need to free lock\n"); + process_rpmb_unlock(SEC_RPMB_ABNORMAL); + } + return 0; +} + +static struct tee_agent_kernel_ops rpmb_agent_ops = { + .agent_name = "rpmb", + .agent_id = TEE_RPMB_AGENT_ID, + .tee_agent_init = NULL, + .tee_agent_work = rpmb_agent_work, + .tee_agent_exit = rpmb_agent_exit, + .tee_agent_crash_work = rpmb_agent_crash_work, + .agent_buff_size = 8 * PAGE_SIZE, + .list = LIST_HEAD_INIT(rpmb_agent_ops.list) +}; + +int rpmb_agent_register(void) +{ + tee_agent_kernel_register(&rpmb_agent_ops); + return 0; +} +EXPORT_SYMBOL(rpmb_agent_register); \ No newline at end of file diff --git a/tzdriver/agent_rpmb/core/agent_rpmb.h b/tzdriver/agent_rpmb/core/agent_rpmb.h new file mode 100755 index 0000000000000000000000000000000000000000..2cf0ce7f7d362ed609f3a820fb37af5a9aefc776 --- /dev/null +++ b/tzdriver/agent_rpmb/core/agent_rpmb.h @@ -0,0 +1,29 @@ +/* + * agent_rpmb.h + * + * rpmb agent manager function, such as register + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef AGENT_RPMB_H +#define AGENT_RPMB_H + +#ifdef CONFIG_RPMB_AGENT +int rpmb_agent_register(void); +#else +static inline int rpmb_agent_register(void) +{ + return 0; +} +#endif + +#endif \ No newline at end of file diff --git a/tzdriver/agent_rpmb/generic/rpmb_driver.c b/tzdriver/agent_rpmb/generic/rpmb_driver.c new file mode 100755 index 0000000000000000000000000000000000000000..08617f2f53e105edc50f297ea88b13caf86e9265 --- /dev/null +++ b/tzdriver/agent_rpmb/generic/rpmb_driver.c @@ -0,0 +1,26 @@ +/* + * rpmb_driver.c + * + * rpmb driver function, such as ioctl + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "rpmb_driver.h" + +int rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation, + struct storage_blk_ioc_rpmb_data *storage_data) +{ + if (storage_data == NULL) + return -1; + + return vendor_rpmb_ioctl_cmd(id, operation, storage_data); +} \ No newline at end of file diff --git a/tzdriver/agent_rpmb/generic/rpmb_driver.h b/tzdriver/agent_rpmb/generic/rpmb_driver.h new file mode 100755 index 0000000000000000000000000000000000000000..dff0bac4d3050604371ebdd5b453e1c44a2878e2 --- /dev/null +++ b/tzdriver/agent_rpmb/generic/rpmb_driver.h @@ -0,0 +1,65 @@ +/* + * rpmb_driver.h + * + * rpmb driver function, such as ioctl + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __RPMB_DRIVER_H +#define __RPMB_DRIVER_H + +#include + +#ifdef CONFIG_VENDOR_RPMB +int vendor_rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation, + struct storage_blk_ioc_rpmb_data *storage_data); + +static inline void tee_report_rpmb(void) +{ + rpmb_dump_io_latency(); +} +#else +static inline int vendor_rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation, + struct storage_blk_ioc_rpmb_data *storage_data) +{ + return 0xFF08; +} + +static inline void tee_report_rpmb(void) +{ +} +#endif + +#if defined(CONFIG_VENDOR_RPMB) && !defined(CONFIG_RPMB_REQ_LOCK_DISABLE) +static inline void rpmb_driver_counter_lock(void) +{ + mutex_lock(&rpmb_counter_lock); +} + +static inline void rpmb_driver_counter_unlock(void) +{ + mutex_unlock(&rpmb_counter_lock); +} +#else +static inline void rpmb_driver_counter_lock(void) +{ +} + +static inline void rpmb_driver_counter_unlock(void) +{ +} +#endif + +int rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation, + struct storage_blk_ioc_rpmb_data *storage_data); +#endif \ No newline at end of file diff --git a/tzdriver/agent_rpmb/mdc/rpmb.h b/tzdriver/agent_rpmb/mdc/rpmb.h new file mode 100755 index 0000000000000000000000000000000000000000..481705de04f185653d2d924bd55a4ae5550b8dd6 --- /dev/null +++ b/tzdriver/agent_rpmb/mdc/rpmb.h @@ -0,0 +1,76 @@ +/* + * rpmb.h + * + * rpmb base data and structs defination + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __RPMB_H__ +#define __RPMB_H__ + +#include +#include + +#define MAX_CDB_CMD_LENGTH 16 +#define UFS_IOC_MAX_RPMB_CMD 3 +#define STORAGE_IOC_MAX_RPMB_CMD 3 +#define MAX_IOC_RPMB_BYTES (4 * 1024) + +enum rpmb_op_type { + RPMB_OP_RD = 0, + RPMB_OP_WR_DATA = 1, + RPMB_OP_WR_CNT = 2 +}; + +enum func_id { + RPMB_FUNC_ID_RESERVED, + RPMB_FUNC_ID_SE, + RPMB_FUNC_ID_SECURE_OS, + RPMB_FUNC_ID_MAX, +}; + +enum rpmb_version { + RPMB_VER_INVALID = 0, + RPMB_VER_UFS_21 = 21, + RPMB_VER_UFS_30 = 30, + RPMB_VER_MAX = 999 +}; + +struct storage_blk_ioc_data { + unsigned char *buf; + u64 buf_bytes; + u32 blocks; +}; + +struct ufs_blk_ioc_data { + struct sg_io_v4 siv; + unsigned char *buf; + u64 buf_bytes; +}; + +struct storage_blk_ioc_rpmb_data { + struct storage_blk_ioc_data data[STORAGE_IOC_MAX_RPMB_CMD]; +}; + +struct ufs_blk_ioc_rpmb_data { + struct ufs_blk_ioc_data data[UFS_IOC_MAX_RPMB_CMD]; + u8 sdb_command[UFS_IOC_MAX_RPMB_CMD][MAX_CDB_CMD_LENGTH]; +}; + +extern struct mutex rpmb_counter_lock; + +extern int vendor_rpmb_ioctl_cmd( + enum func_id id, + enum rpmb_op_type operation, + struct storage_blk_ioc_rpmb_data *storage_data); + +#endif /* __RPMB_H__ */ \ No newline at end of file diff --git a/tzdriver/agent_rpmb/mdc/rpmb_driver.c b/tzdriver/agent_rpmb/mdc/rpmb_driver.c new file mode 100755 index 0000000000000000000000000000000000000000..198d290fdd2964166010ff75a93f2e524c1b15d8 --- /dev/null +++ b/tzdriver/agent_rpmb/mdc/rpmb_driver.c @@ -0,0 +1,46 @@ +/* + * rpmb_driver.c + * + * rpmb driver function, such as ioctl + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "rpmb_driver.h" +#include +#include "tc_ns_log.h" + +typedef int *(rpmb_ioctl_func)(enum func_id id, enum rpmb_op_type operation, + struct storage_blk_ioc_rpmb_data *storage_data); + +int rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation, + struct storage_blk_ioc_rpmb_data *storage_data) +{ + static rpmb_ioctl_func *rpmb_ioctl = NULL; + + if (storage_data == NULL) + return NULL; + + if (rpmb_ioctl == NULL) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) + rpmb_ioctl = + (rpmb_ioctl_func *)(uintptr_t)__symbol_get("vendor_rpmb_ioctl_cmd"); +#else + rpmb_ioctl = + (rpmb_ioctl_func *)(uintptr_t)kallsyms_lookup_name("vendor_rpmb_ioctl_cmd"); +#endif + if (rpmb_ioctl == NULL) { + tloge("fail to find symbol vendor_rpmb_ioctl_cmd\n"); + return NULL; + } + } + return rpmb_ioctl(id, operation, storage_data); +} diff --git a/tzdriver/agent_rpmb/mdc/rpmb_driver.h b/tzdriver/agent_rpmb/mdc/rpmb_driver.h new file mode 100755 index 0000000000000000000000000000000000000000..44a223165ca5ed7983943bba6b12cad77a6c2926 --- /dev/null +++ b/tzdriver/agent_rpmb/mdc/rpmb_driver.h @@ -0,0 +1,34 @@ +/* + * rpmb_driver.h + * + * rpmb driver function, such as ioctl + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __RPMB_DRIVER_H +#define __RPMB_DRIVER_H + +#include "rpmb.h" + +static inline void rpmb_driver_counter_lock(void) +{ +} + +static inline void rpmb_driver_counter_unlock(void) +{ +} + +int rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation, + struct storage_blk_ioc_rpmb_data *storage_data); + +#endif \ No newline at end of file diff --git a/tzdriver/agent_rpmb/mplat/rpmb_driver.c b/tzdriver/agent_rpmb/mplat/rpmb_driver.c new file mode 100755 index 0000000000000000000000000000000000000000..bba8deba3200dfa6402940a26ab1cfd7e71add2d --- /dev/null +++ b/tzdriver/agent_rpmb/mplat/rpmb_driver.c @@ -0,0 +1,511 @@ +/* + * rpmb_driver.c + * + * rpmb driver function, such as ioctl + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "rpmb_driver.h" +#include + +#include /* for struct mmc_card */ +#include +#include + +#ifdef CONFIG_MTK_UFS_SUPPORT +#include "ufs-mtk.h" +#endif +#include +#include "core.h" +#include "card.h" +#include "mmc_ops.h" +#include "mtk_sd.h" +#include "tc_ns_log.h" +#include "queue.h" + +#define IOC_CMD_0 0 +#define IOC_CMD_1 1 +#define IOC_CMD_2 2 +#define STORAGE_IOC_MAX_RPMB_CMD 3 +#define RPMB_EMMC_CID_SIZE 32 +#define RPMB_CTRL_MAGIC 0x5A5A5A5A +#define RPMB_REQ 1 /* RPMB request mark */ +#define RPMB_RESP (1 << 1) /* RPMB response mark*/ +#define RPMB_PROGRAM_KEY 0x1 /* Program RPMB Authentication Key */ +#define RPMB_GET_WRITE_COUNTER 0x2 /* Read RPMB write counter */ +#define RPMB_WRITE_DATA 0x3 /* Write data to RPMB partition */ +#define RPMB_READ_DATA 0x4 /* Read data from RPMB partition */ +#define RPMB_RESULT_READ 0x5 /* Read result request (Internal) */ + +struct emmc_rpmb_blk_data { + spinlock_t lock; + struct device *parent; + struct gendisk *disk; + struct mmc_queue queue; + struct list_head part; + uint32_t flags; + uint32_t usage; + uint32_t read_only; + uint32_t part_type; + uint32_t reset_done; + uint32_t part_curr; // keep curr partition + struct device_attribute force_ro; + struct device_attribute power_ro_lock; + int32_t area_type; +}; + +static int32_t emmc_rpmb_switch(struct mmc_card *card, + struct emmc_rpmb_blk_data *md) +{ + int32_t ret; + struct emmc_rpmb_blk_data *main_md = NULL; + + if (card == NULL) + return -1; + + main_md = dev_get_drvdata(&card->dev); + if (main_md == NULL) + return -1; + + if (main_md->part_curr == md->part_type) + return 0; + +#if defined(CONFIG_MTK_EMMC_CQ_SUPPORT) || defined(CONFIG_MTK_EMMC_HW_CQ) + if (mmc_card_cmdq(card)) { + ret = mmc_cmdq_disable(card); + if (ret) { + tloge("CQ disabled failed!!! ret: 0x%x\n", ret); + return ret; + } + } +#endif + + if (mmc_card_mmc(card) != 0) { + uint8_t cfg = card->ext_csd.part_config; + + cfg &= ~EXT_CSD_PART_CONFIG_ACC_MASK; + cfg |= md->part_type; + + ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_PART_CONFIG, + cfg, card->ext_csd.part_time); + if (ret) + return ret; + + card->ext_csd.part_config = cfg; + } + +#if defined(CONFIG_MTK_EMMC_CQ_SUPPORT) || defined(CONFIG_MTK_EMMC_HW_CQ) + /* enable cmdq at user partition */ + if (!mmc_card_cmdq(card) && (md->part_type <= 0)) { + ret = mmc_cmdq_enable(card); + if (ret) + tloge("%s enable CMDQ error %d, so just work without\n", + mmc_hostname(card->host), ret); + } +#endif + +#if defined(CONFIG_MTK_EMMC_HW_CQ) + card->part_curr = md->part_type; +#endif + main_md->part_curr = md->part_type; + return 0; +} + +#define RPMB_BLOCK_SIZE 512 +static void set_sbc(__u16 blks, __u16 type, u8 req_type, + struct mmc_command *sbc) +{ + sbc->opcode = MMC_SET_BLOCK_COUNT; + sbc->arg = blks; + if ((req_type == RPMB_REQ && type == RPMB_WRITE_DATA) || + type == RPMB_PROGRAM_KEY) + sbc->arg |= 1 << 31; + sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; +} + +static void rpmb_send_req_cmd(struct mmc_card *card, + struct storage_blk_ioc_rpmb_data *storage_data, + __u16 blks, __u16 type, struct mmc_request *request) +{ + request->cmd->opcode = MMC_WRITE_MULTIPLE_BLOCK; + request->data->flags |= MMC_DATA_WRITE; + if (type == RPMB_RESULT_READ) { + /* this is the step2 for write data cmd and write key cmd */ + sg_copy_from_buffer(request->data->sg, 1, + storage_data->data[IOC_CMD_1].buf, RPMB_BLOCK_SIZE * blks); + } else { + /* this is step 1 for read data and read counter */ + sg_copy_from_buffer(request->data->sg, 1, + storage_data->data[IOC_CMD_0].buf, RPMB_BLOCK_SIZE * blks); + } + mmc_set_data_timeout(request->data, card); + mmc_wait_for_req(card->host, request); +} + +static void resp_get_sg(struct storage_blk_ioc_rpmb_data *storage_data, + __u16 blks, __u16 type, struct scatterlist *sg) +{ + bool read_type = (type == RPMB_READ_DATA) || + (type == RPMB_GET_WRITE_COUNTER); + bool write_type = (type == RPMB_WRITE_DATA) || + (type == RPMB_PROGRAM_KEY); + if (read_type) { + if (storage_data->data[IOC_CMD_1].buf != NULL) + sg_copy_to_buffer(sg, 1, storage_data->data[IOC_CMD_1].buf, + RPMB_BLOCK_SIZE * blks); + else + tloge("invalid data1buff, is null\n"); + } else if (write_type) { + if (storage_data->data[IOC_CMD_2].buf != NULL) + sg_copy_to_buffer(sg, 1, storage_data->data[IOC_CMD_2].buf, + RPMB_BLOCK_SIZE * blks); + else + tloge("invalid data1buff, is null\n"); + } else { + /* do nothing */ + tloge("invalid reqtype %d\n", type); + } +} + +static void rpmb_send_resp_cmd(struct mmc_card *card, + struct storage_blk_ioc_rpmb_data *storage_data, + __u16 blks, __u16 type, struct mmc_request *request) +{ + request->cmd->opcode = MMC_READ_MULTIPLE_BLOCK; + request->data->flags |= MMC_DATA_READ; + mmc_set_data_timeout(request->data, card); + mmc_wait_for_req(card->host, request); + resp_get_sg(storage_data, blks, type, request->data->sg); +} + +static int emmc_rpmb_send_command(struct mmc_card *card, + struct storage_blk_ioc_rpmb_data *storage_data, + __u16 blks, __u16 type, u8 req_type) +{ + struct mmc_command cmd = {0}; + struct mmc_command sbc = {0}; + struct mmc_data data = {0}; + struct mmc_request request = {NULL}; + struct scatterlist sg; + u8 *transfer_buf = NULL; + + if (blks == 0) { + tloge("Invalid blks: 0\n"); + return -EINVAL; + } + + set_sbc(blks, type, req_type, &sbc); + request.sbc = &sbc; + + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + request.cmd = &cmd; + + data.blksz = RPMB_BLOCK_SIZE; + data.blocks = blks; + data.sg = &sg; + data.sg_len = 1; + request.data = &data; + + request.stop = NULL; + + transfer_buf = kzalloc(RPMB_BLOCK_SIZE * blks, GFP_KERNEL); + if (transfer_buf == NULL) + return -ENOMEM; + + sg_init_one(&sg, transfer_buf, RPMB_BLOCK_SIZE * blks); + + if (req_type == RPMB_REQ) + rpmb_send_req_cmd(card, storage_data, blks, type, &request); + else + rpmb_send_resp_cmd(card, storage_data, blks, type, &request); + + kfree(transfer_buf); + + if (cmd.error) + return cmd.error; + else if (data.error) + return data.error; + else + return 0; +} + +static int emmc_rpmb_cmd_proc(struct mmc_card *card, unsigned short type, + struct storage_blk_ioc_rpmb_data *storage_data) +{ + int err = 0; + + /* STEP 1: send request to RPMB partition */ + if (type == RPMB_WRITE_DATA) { + err = emmc_rpmb_send_command(card, storage_data, + storage_data->data[IOC_CMD_0].blocks, type, RPMB_REQ); + } else { + /* assemble the frame */ + storage_data->data[IOC_CMD_0].blocks = storage_data->data[IOC_CMD_1].blocks; + err = emmc_rpmb_send_command(card, storage_data, + 1, type, RPMB_REQ); + } + if (err != 0) { + tloge("step 1, request failed err-%d\n", err); + goto out; + } + + /* STEP 2: check write result. Only for WRITE_DATA or Program key */ + if (type == RPMB_WRITE_DATA || type == RPMB_PROGRAM_KEY) { + err = emmc_rpmb_send_command(card, storage_data, + 1, RPMB_RESULT_READ, RPMB_REQ); + if (err != 0) { + tloge("step 2, request result failed err-%d\n", err); + goto out; + } + } + + /* STEP 3: get response from RPMB partition */ + if (type == RPMB_READ_DATA) + err = emmc_rpmb_send_command(card, storage_data, + storage_data->data[IOC_CMD_0].blocks, type, RPMB_RESP); + else + err = emmc_rpmb_send_command(card, storage_data, 1, + type, RPMB_RESP); + if (err != 0) + tloge("step 3, response failed err-%d\n", err); + +out: + return err; +} + +static int rpmb_operation_emmc(enum rpmb_op_type operation, + struct storage_blk_ioc_rpmb_data *storage_data) +{ + struct emmc_rpmb_blk_data *part_md = NULL; + int ret; + struct emmc_rpmb_blk_data *md = NULL; + + struct mmc_card *card = get_card_from_mtk_msdc_host(); + if (card == NULL) + return -1; + + md = dev_get_drvdata(&card->dev); + if (md == NULL) + return -1; + + list_for_each_entry(part_md, &md->part, part) { + if (part_md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) + break; + } + + if (part_md->part_type != EXT_CSD_PART_CONFIG_ACC_RPMB) + return -1; + + mmc_get_card(card); + ret = emmc_rpmb_switch(card, part_md); + if (ret != 0) { + tloge("emmc switch to rpmb failed ret-%x\n", ret); + goto error; + } + + switch (operation) { + case RPMB_OP_RD: + ret = emmc_rpmb_cmd_proc(card, RPMB_READ_DATA, storage_data); + break; + case RPMB_OP_WR_CNT: + ret = emmc_rpmb_cmd_proc(card, RPMB_GET_WRITE_COUNTER, + storage_data); + break; + case RPMB_OP_WR_DATA: + ret = emmc_rpmb_cmd_proc(card, RPMB_WRITE_DATA, storage_data); + break; + default: + tloge("receive an unknown operation %d\n", operation); + goto error; + } + if (ret != 0) + tloge("emmc rpmb cmd proc failed ret-%x\n", ret); + +error: + ret = emmc_rpmb_switch(card, dev_get_drvdata(&card->dev)); + if (ret != 0) + tloge("emmc switch to main failed ret-%x\n", ret); + + mmc_put_card(card); + + return ret; +} + +static int rpmb_req_read_data_ufs( + struct storage_blk_ioc_rpmb_data *storage_data) +{ + struct rpmb_data data; + struct rpmb_dev *rawdev_ufs_rpmb = NULL; + int ret; + uint16_t blk_cnt; + + rawdev_ufs_rpmb = ufs_mtk_rpmb_get_raw_dev(); + + blk_cnt = storage_data->data[1].blocks; + tlogd("rpmb read data ufs, blk_cnt: %u\n", blk_cnt); + + data.req_type = RPMB_READ_DATA; + data.icmd.nframes = 1; + data.icmd.frames = (struct rpmb_frame *)storage_data->data[IOC_CMD_0].buf; + + /* + * We need to fill-in block_count by ourselves for UFS case. + */ + data.icmd.frames->block_count = cpu_to_be16(blk_cnt); + + data.ocmd.nframes = blk_cnt; + data.ocmd.frames = (struct rpmb_frame *)storage_data->data[IOC_CMD_1].buf; + + ret = rpmb_cmd_req(rawdev_ufs_rpmb, &data); + if (ret != 0) + tloge("rpmb req ufs error, ret:0x%x\n", ret); + + tlogd("result 0x%x\n", cpu_to_be16(data.ocmd.frames->result)); + + return ret; +} + +static int rpmb_req_write_data_ufs( + struct storage_blk_ioc_rpmb_data *storage_data) +{ + struct rpmb_data data; + struct rpmb_dev *rawdev_ufs_rpmb = NULL; + int ret; + uint16_t blk_cnt; + + rawdev_ufs_rpmb = ufs_mtk_rpmb_get_raw_dev(); + + blk_cnt = storage_data->data[IOC_CMD_0].blocks; + + tlogd("blk_cnt: %d\n", blk_cnt); + + /* + * Alloc output frame to avoid overwriting input frame + * buffer provided by TEE + */ + data.ocmd.frames = kzalloc(sizeof(struct rpmb_frame), 0); + if (data.ocmd.frames == NULL) + return RPMB_ALLOC_ERROR; + + data.ocmd.nframes = 1; + + data.req_type = RPMB_WRITE_DATA; + data.icmd.nframes = blk_cnt; + data.icmd.frames = (struct rpmb_frame *)storage_data->data[IOC_CMD_0].buf; + + ret = rpmb_cmd_req(rawdev_ufs_rpmb, &data); + if (ret != 0) + tloge("rpmb_req write_data_ufs error, ret:0x%x\n", ret); + + /* + * Microtrust TEE will check write counter in the first frame, + * thus we copy response frame to the first frame. + */ + if (storage_data->data[IOC_CMD_2].buf == NULL) { + ret = -1; + goto free; + } + + ret = memcpy_s(storage_data->data[IOC_CMD_2].buf, + storage_data->data[IOC_CMD_2].buf_bytes, + data.ocmd.frames, sizeof(*(data.ocmd.frames))); + if (ret != EOK) + tloge("frames copy fail, ret:0x%x\n", ret); + + tlogd("result 0x%x\n", cpu_to_be16(data.ocmd.frames->result)); + +free: + kfree(data.ocmd.frames); + + return ret; +} + +static int rpmb_req_get_wc_ufs(u8 *key, u32 *wc, + struct storage_blk_ioc_rpmb_data *storage_data) +{ + struct rpmb_data data; + struct rpmb_dev *rawdev_ufs_rpmb = NULL; + int ret; + + tlogd("rpmb_req_get_wc_ufs start!!!\n"); + + rawdev_ufs_rpmb = ufs_mtk_rpmb_get_raw_dev(); + + /* + * Initial frame buffers + */ + data.icmd.frames = (struct rpmb_frame *)storage_data->data[IOC_CMD_0].buf; + data.ocmd.frames = (struct rpmb_frame *)storage_data->data[IOC_CMD_1].buf; + + /* + * Prepare frame contents. + * Input frame (in view of device) only needs nonce + */ + data.req_type = RPMB_GET_WRITE_COUNTER; + data.icmd.nframes = 1; + + /* Output frame (in view of device) */ + data.ocmd.nframes = 1; + ret = rpmb_cmd_req(rawdev_ufs_rpmb, &data); + if (ret != 0) + tloge("rpmb_req_get_wc_ufs error!!! ret:0x%x\n", ret); + + tlogd("end\n"); + + return ret; +} + +#ifdef CONFIG_MTK_UFS_SUPPORT +static int rpmb_operation_ufs(enum rpmb_op_type operation, + struct storage_blk_ioc_rpmb_data *storage_data) +{ + int ret = -1; + + switch (operation) { + case RPMB_OP_RD: + ret = rpmb_req_read_data_ufs(storage_data); + break; + case RPMB_OP_WR_CNT: + ret = rpmb_req_get_wc_ufs(NULL, NULL, storage_data); + break; + case RPMB_OP_WR_DATA: + ret = rpmb_req_write_data_ufs(storage_data); + break; + default: + tloge("receive an unknown command id %d.\n", operation); + break; + } + + return ret; +} +#endif + +int rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation, + struct storage_blk_ioc_rpmb_data *storage_data) +{ + int ret = 0; + int boot_type; + + if (storage_data == NULL) + return -1; + + boot_type = get_boot_type(); + if (boot_type == BOOTDEV_SDMMC) + ret = rpmb_operation_emmc(operation, storage_data); +#ifdef CONFIG_MTK_UFS_SUPPORT + else if (boot_type == BOOTDEV_UFS) + ret = rpmb_operation_ufs(operation, storage_data); +#endif + return ret; +} \ No newline at end of file diff --git a/tzdriver/agent_rpmb/mplat/rpmb_driver.h b/tzdriver/agent_rpmb/mplat/rpmb_driver.h new file mode 100755 index 0000000000000000000000000000000000000000..3d86f17839f12923e85bd2ea04fa72b8a792833c --- /dev/null +++ b/tzdriver/agent_rpmb/mplat/rpmb_driver.h @@ -0,0 +1,34 @@ +/* + * rpmb_driver.h + * + * rpmb driver function, such as ioctl + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __RPMB_DRIVER_H +#define __RPMB_DRIVER_H + +#include + +static inline void rpmb_driver_counter_lock(void) +{ +} + +static inline void rpmb_driver_counter_unlock(void) +{ +} + +int rpmb_ioctl_cmd(enum func_id id, enum rpmb_op_type operation, + struct storage_blk_ioc_rpmb_data *storage_data); + +#endif \ No newline at end of file diff --git a/tzdriver/apply_tzdriver.sh b/tzdriver/apply_tzdriver.sh new file mode 100755 index 0000000000000000000000000000000000000000..bf5ec7a35f8ac5e24c0abe79deff4b2c2d5f40dc --- /dev/null +++ b/tzdriver/apply_tzdriver.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2022 Huawei Device Co., Ltd. +# + +set -e + +OHOS_SOURCE_ROOT=$1 +KERNEL_BUILD_ROOT=$2 +PRODUCT_NAME=$3 +KERNEL_VERSION=$4 +TZDRIVER_SOURCE_ROOT=$OHOS_SOURCE_ROOT/kernel/linux/common_modules/tzdriver + +function main() +{ + pushd . + + if [ ! -d "$KERNEL_BUILD_ROOT/drivers/tzdriver" ]; then + mkdir $KERNEL_BUILD_ROOT/drivers/tzdriver + fi + + cd $KERNEL_BUILD_ROOT/drivers/tzdriver + ln -s -f $(realpath --relative-to=$KERNEL_BUILD_ROOT/drivers/tzdriver/ $TZDRIVER_SOURCE_ROOT)/* ./ + + popd +} + +main diff --git a/tzdriver/auth/Kconfig b/tzdriver/auth/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..6ab72c04c832fd0b9abe54ec38f4f8616f8e721c --- /dev/null +++ b/tzdriver/auth/Kconfig @@ -0,0 +1,28 @@ +# Auth Configuration +config CLIENT_AUTH + bool "Client Application Hash Auth" + default n + depends on TZDRIVER + help + TEEOS CA code hash auth + +config ANDROID_HIDL + bool "Android Hidl Adapt" + default n + depends on CLIENT_AUTH + help + TEEOS hidl proc auth + +config CADAEMON_AUTH + bool "Teec Daemon Path Hash Auth" + default n + depends on TZDRIVER + help + TEEOS TEECD path hash auth + +config TZDRIVER_OHOS + bool "Is in OH" + default n + depends on TZDRIVER + help + OH Cadaemon uid diff --git a/tzdriver/auth/Makefile b/tzdriver/auth/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..0e68fe89777f932e4607f2d4f86379c428fdb696 --- /dev/null +++ b/tzdriver/auth/Makefile @@ -0,0 +1,30 @@ +KERNEL_DIR :=$(srctree) + +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/core +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/tlogger +EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../third_party/bounds_checking_function/include +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/include +EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../base/security/selinux/include +EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../base/security/selinux + +EXTRA_CFLAGS += -DSELINUX_CA_HIDL_LABEL=\"u:r:hal_libteec_default:s0\" +EXTRA_CFLAGS += -DSELINUX_TEECD_LABEL=\"u:r:tee:s0\" +ifneq ($(CONFIG_TZDRIVER_OHOS),y) +EXTRA_CFLAGS += -DCONFIG_SELINUX_AUTH_ENABLE +endif +ifeq ($(CONFIG_CADAEMON_AUTH),y) +EXTRA_CFLAGS += -DCADAEMON_PATH_UID_AUTH_CTX=\"/system/bin/sa_main:6668\" +EXTRA_CFLAGS += -DSELINUX_CADAEMON_LABEL=NULL +endif +ifeq ($(CONFIG_TZDRIVER_OHOS),y) +EXTRA_CFLAGS += -DTEECD_PATH_UID_AUTH_CTX=\"/vendor/bin/teecd:6668\" +else +EXTRA_CFLAGS += -DTEECD_PATH_UID_AUTH_CTX=\"/vendor/bin/teecd:0\" +endif + +obj-$(CONFIG_CLIENT_AUTH) += client_hash_auth.o + +ifeq ($(findstring y, $(CONFIG_TEECD_AUTH) $(CONFIG_CLIENT_AUTH)), y) + obj-y += auth_base_impl.o +endif diff --git a/tzdriver/auth/auth_base_impl.c b/tzdriver/auth/auth_base_impl.c new file mode 100644 index 0000000000000000000000000000000000000000..f70a857c904ace44078c0042627f713255c484da --- /dev/null +++ b/tzdriver/auth/auth_base_impl.c @@ -0,0 +1,426 @@ +/* + * auth_base_impl.c + * + * function for base hash operation + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "auth_base_impl.h" +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE) +#include +#endif +#if defined (CONFIG_SELINUX_AUTH_ENABLE) && defined (CONFIG_SECURITY_SELINUX) +#include +#endif +#include +#include "tc_ns_log.h" +#include "tc_ns_client.h" +#include "agent.h" /* for get_proc_dpath */ +#include "ko_adapt.h" + +/* for crypto */ +struct crypto_shash *g_shash_handle; +bool g_shash_handle_state = false; +struct mutex g_shash_handle_lock; + +void init_crypto_hash_lock(void) +{ + mutex_init(&g_shash_handle_lock); +} + +void mutex_crypto_hash_lock(void) +{ + mutex_lock(&g_shash_handle_lock); +} + +void mutex_crypto_hash_unlock(void) +{ + mutex_unlock(&g_shash_handle_lock); +} + +/* begin: prepare crypto context */ +struct crypto_shash *get_shash_handle(void) +{ + return g_shash_handle; +} + +void free_shash_handle(void) +{ + if (g_shash_handle) { + crypto_free_shash(g_shash_handle); + g_shash_handle_state = false; + g_shash_handle = NULL; + } +} + +int tee_init_shash_handle(char *hash_type) +{ + long rc; + + if (!hash_type) { + tloge("tee init crypto: error input parameter\n"); + return -EFAULT; + } + + mutex_crypto_hash_lock(); + if (g_shash_handle_state) { + mutex_crypto_hash_unlock(); + return 0; + } + + g_shash_handle = crypto_alloc_shash(hash_type, 0, 0); + if (IS_ERR_OR_NULL(g_shash_handle)) { + rc = PTR_ERR(g_shash_handle); + tloge("Can not allocate %s reason: %ld\n", hash_type, rc); + mutex_crypto_hash_unlock(); + return rc; + } + g_shash_handle_state = true; + + mutex_crypto_hash_unlock(); + return 0; +} +/* end: prepare crypto context */ + +/* begin: Calculate the SHA256 file digest */ +static int prepare_desc(struct sdesc **desc) +{ + size_t size; + size_t shash_size; + + shash_size = crypto_shash_descsize(g_shash_handle); + size = sizeof((*desc)->shash) + shash_size; + if (size < sizeof((*desc)->shash) || size < shash_size) { + tloge("size flow\n"); + return -ENOMEM; + } + + *desc = kzalloc(size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)(*desc))) { + tloge("alloc desc failed\n"); + return -ENOMEM; + } + + return EOK; +} + +#define PINED_PAGE_NUMBER 1 +static int get_proc_user_pages(struct mm_struct *mm, unsigned long start_code, + struct page **ptr_page, struct task_struct *cur_struct) +{ +#if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE) + (void)cur_struct; + return get_user_pages_remote(mm, start_code, + (unsigned long)PINED_PAGE_NUMBER, FOLL_FORCE, ptr_page, NULL, NULL); +#elif (KERNEL_VERSION(4, 9, 0) <= LINUX_VERSION_CODE) + return get_user_pages_remote(cur_struct, mm, start_code, + (unsigned long)PINED_PAGE_NUMBER, FOLL_FORCE, ptr_page, NULL, NULL); +#elif (KERNEL_VERSION(4, 4, 197) == LINUX_VERSION_CODE) + return get_user_pages_locked(cur_struct, mm, start_code, + (unsigned long)PINED_PAGE_NUMBER, FOLL_FORCE, ptr_page, NULL); +#else + return get_user_pages_locked(cur_struct, mm, start_code, + (unsigned long)PINED_PAGE_NUMBER, 0, 1, ptr_page, NULL); +#endif +} + +static int update_task_hash(struct mm_struct *mm, + struct task_struct *cur_struct, struct shash_desc *shash) +{ + int rc = -1; + unsigned long in_size; + struct page *ptr_page = NULL; + void *ptr_base = NULL; + + unsigned long start_code = mm->start_code; + unsigned long end_code = mm->end_code; + unsigned long code_size = end_code - start_code; + if (code_size == 0) { + tloge("bad code size\n"); + return -EINVAL; + } + + while (start_code < end_code) { + /* Get a handle of the page we want to read */ + rc = get_proc_user_pages(mm, start_code, &ptr_page, cur_struct); + if (rc != PINED_PAGE_NUMBER) { + tloge("get user pages error[0x%x]\n", rc); + rc = -EFAULT; + break; + } + + ptr_base = kmap_atomic(ptr_page); + if (!ptr_base) { + rc = -EFAULT; + put_page(ptr_page); + break; + } + + in_size = (code_size > PAGE_SIZE) ? PAGE_SIZE : code_size; + rc = crypto_shash_update(shash, ptr_base, in_size); + if (rc) { + kunmap_atomic(ptr_base); + put_page(ptr_page); + break; + } + + kunmap_atomic(ptr_base); + put_page(ptr_page); + start_code += in_size; + code_size = end_code - start_code; + } + return rc; +} + +int calc_task_hash(unsigned char *digest, uint32_t dig_len, + struct task_struct *cur_struct, uint32_t pub_key_len) +{ + struct mm_struct *mm = NULL; + struct sdesc *desc = NULL; + bool check_value = false; + int rc; + + check_value = (!cur_struct || !digest || + dig_len != SHA256_DIGEST_LENTH); + if (check_value) { + tloge("tee hash: input param is error\n"); + return -EFAULT; + } + + mm = get_task_mm(cur_struct); + if (!mm) { + if (memset_s(digest, dig_len, 0, MAX_SHA_256_SZ)) + return -EFAULT; + tloge("kernel proc need not check\n"); + return EOK; + } + + if (pub_key_len != sizeof(uint32_t)) { + tloge("apk need not check\n"); + mmput(mm); + return EOK; + } + + if (prepare_desc(&desc) != EOK) { + mmput(mm); + tloge("prepare desc failed\n"); + return -ENOMEM; + } + + desc->shash.tfm = g_shash_handle; + if (crypto_shash_init(&desc->shash)) { + tloge("shash init failed\n"); + rc = -ENOMEM; + goto free_res; + } + + down_read(&mm_sem_lock(mm)); + if (update_task_hash(mm, cur_struct, &desc->shash)) { + up_read(&mm_sem_lock(mm)); + rc = -ENOMEM; + goto free_res; + } + up_read(&mm_sem_lock(mm)); + + rc = crypto_shash_final(&desc->shash, digest); +free_res: + mmput(mm); + kfree(desc); + return rc; +} +/* end: Calculate the SHA256 file digest */ + +#if defined(CONFIG_SELINUX_AUTH_ENABLE) && defined (CONFIG_SECURITY_SELINUX) +static int check_proc_selinux_access(const char * s_ctx) +{ + if (s_ctx == NULL) { + tloge("bad params\n"); + return CHECK_ACCESS_FAIL; + } + + int rc; + u32 sid; + u32 tid; + u32 s_ctx_len = strnlen(s_ctx, MAX_SCTX_LEN); + if (s_ctx_len == 0 || s_ctx_len >= MAX_SCTX_LEN) { + tloge("invalid selinux ctx\n"); + return CHECK_ACCESS_FAIL; + } + + security_task_getsecid(current, &sid); + rc = security_secctx_to_secid(s_ctx, s_ctx_len, &tid); + if (rc != 0) { + tloge("secctx to sid failed, rc %d", rc); + return CHECK_ACCESS_FAIL; + } + if (sid != tid) { + tloge("check selinux label failed\n"); + return CHECK_ACCESS_FAIL; + } + + return EOK; +} +#else +static int check_proc_selinux_access(const char * s_ctx) +{ + (void)s_ctx; + return 0; +} +#endif + +static int get_proc_uid(uid_t *proc_uid) +{ +#ifdef CONFIG_LIBLINUX + if (current->cred == NULL) { + tloge("cred is NULL\n"); + return CHECK_ACCESS_FAIL; + } + *proc_uid = current->cred->uid.val; +#else + const struct cred *cred = NULL; + get_task_struct(current); + cred = koadpt_get_task_cred(current); + if (cred == NULL) { + tloge("cred is NULL\n"); + put_task_struct(current); + return CHECK_ACCESS_FAIL; + } + + *proc_uid = cred->uid.val; + put_cred(cred); + put_task_struct(current); +#endif + return CHECK_ACCESS_SUCC; +} + +static int check_proc_uid_path(const char *auth_ctx) +{ + int ret = 0; + char str_path_uid[MAX_PATH_SIZE] = { 0 }; + char *pro_dpath = NULL; + char *k_path = NULL; + u32 auth_ctx_len; + uid_t proc_uid; + + if (auth_ctx == NULL) { + tloge("bad params\n"); + return CHECK_ACCESS_FAIL; + } + + auth_ctx_len = (u32)strnlen(auth_ctx, MAX_PATH_SIZE); + if (auth_ctx_len == 0 || auth_ctx_len >= MAX_PATH_SIZE) { + tloge("invalid uid path\n"); + return CHECK_ACCESS_FAIL; + } + + k_path = kmalloc(MAX_PATH_SIZE, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)k_path)) { + tloge("path kmalloc fail\n"); + return CHECK_ACCESS_FAIL; + } + + pro_dpath = get_proc_dpath(k_path, MAX_PATH_SIZE); + if (IS_ERR_OR_NULL(pro_dpath)) { + kfree(k_path); + return CHECK_ACCESS_FAIL; + } + + ret = get_proc_uid(&proc_uid); + if (ret != CHECK_ACCESS_SUCC) { + tloge("get proc uid failed\n"); + goto clean; + } + + if (snprintf_s(str_path_uid, MAX_PATH_SIZE, MAX_PATH_SIZE - 1, "%s:%u", + pro_dpath, (unsigned int)proc_uid) < 0) { + tloge("snprintf_s path uid failed, ret %d\n", ret); + ret = CHECK_ACCESS_FAIL; + goto clean; + } + + if (strnlen(str_path_uid, MAX_PATH_SIZE) != auth_ctx_len || strncmp(str_path_uid, auth_ctx, auth_ctx_len) != 0) + ret = ENTER_BYPASS_CHANNEL; + else + ret = CHECK_ACCESS_SUCC; + +clean: + kfree(k_path); + return ret; +} + +#ifdef CONFIG_CADAEMON_AUTH +int check_cadaemon_auth(void) +{ + int ret = check_proc_uid_path(CADAEMON_PATH_UID_AUTH_CTX); + if (ret != 0) { + tloge("check cadaemon path failed, ret %d\n", ret); + return ret; + } + ret = check_proc_selinux_access(SELINUX_CADAEMON_LABEL); + if (ret != 0) { + tloge("check cadaemon selinux label failed!, ret %d\n", ret); + return -EACCES; + } + return 0; +} +#endif + +int check_hidl_auth(void) +{ + int ret = check_proc_uid_path(CA_HIDL_PATH_UID_AUTH_CTX); + if (ret != CHECK_ACCESS_SUCC) + return ret; + +#if defined(CONFIG_SELINUX_AUTH_ENABLE) && defined (CONFIG_SECURITY_SELINUX) + ret = check_proc_selinux_access(SELINUX_CA_HIDL_LABEL); + if (ret != EOK) { + tloge("check hidl selinux label failed, ret %d\n", ret); + return CHECK_SECLABEL_FAIL; + } +#endif + + return CHECK_ACCESS_SUCC; +} + +#ifdef CONFIG_TEECD_AUTH +int check_teecd_auth(void) +{ + int ret = check_proc_uid_path(TEECD_PATH_UID_AUTH_CTX); + if (ret != 0) { + tloge("check teecd path failed, ret %d\n", ret); + return ret; + } + +#if defined(CONFIG_SELINUX_AUTH_ENABLE) && defined (CONFIG_SECURITY_SELINUX) + ret = check_proc_selinux_access(SELINUX_TEECD_LABEL); + if (ret != 0) { + tloge("check teecd selinux label failed!, ret %d\n", ret); + return -EACCES; + } +#endif + return CHECK_ACCESS_SUCC; +} +#endif \ No newline at end of file diff --git a/tzdriver/auth/auth_base_impl.h b/tzdriver/auth/auth_base_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..8fbd7f44bf76a634ee66ee411997d9101f0140e2 --- /dev/null +++ b/tzdriver/auth/auth_base_impl.h @@ -0,0 +1,102 @@ +/* + * auth_base_impl.h + * + * function definition for base hash operation + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef AUTH_BASE_IMPL_H +#define AUTH_BASE_IMPL_H + +#ifndef SELINUX_CA_HIDL_LABEL +#define SELINUX_CA_HIDL_LABEL "" +#endif + +#ifndef SELINUX_TEECD_LABEL +#define SELINUX_TEECD_LABEL "" +#endif + +#ifndef CA_HIDL_PATH_UID_AUTH_CTX +#define CA_HIDL_PATH_UID_AUTH_CTX "" +#endif + +#ifndef TEECD_PATH_UID_AUTH_CTX +#define TEECD_PATH_UID_AUTH_CTX "" +#endif + +#ifndef CADAEMON_PATH_UID_AUTH_CTX +#define CADAEMON_PATH_UID_AUTH_CTX "" +#endif + +#if ((defined CONFIG_CLIENT_AUTH) || (defined CONFIG_TEECD_AUTH)) +#include +#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE) +#include +#endif +#include +#include + +#define CHECK_ACCESS_SUCC 0 +#define CHECK_ACCESS_FAIL 0xffff +#define CHECK_PATH_HASH_FAIL 0xff01 +#define CHECK_SECLABEL_FAIL 0xff02 +#define CHECK_CODE_HASH_FAIL 0xff03 +#define ENTER_BYPASS_CHANNEL 0xff04 + +#define BUF_MAX_SIZE 1024 +#define MAX_PATH_SIZE 512 +#define SHA256_DIGEST_LENTH 32 +#define MAX_SCTX_LEN 128 + +struct sdesc { + struct shash_desc shash; + char ctx[]; +}; + +int calc_path_hash(bool is_hidl_srvc, unsigned char *digest, unsigned int dig_len); +int calc_task_hash(unsigned char *digest, uint32_t dig_len, + struct task_struct *cur_struct, uint32_t pub_key_len); + +int tee_init_shash_handle(char *hash_type); +void free_shash_handle(void); +struct crypto_shash *get_shash_handle(void); + +void init_crypto_hash_lock(void); +void mutex_crypto_hash_lock(void); +void mutex_crypto_hash_unlock(void); +int check_hidl_auth(void); +int check_teecd_auth(void); +#else + +static inline void free_shash_handle(void) +{ + return; +} + +static void init_crypto_hash_lock(void) +{ + return; +} + +static inline int check_teecd_auth(void) +{ + return 0; +} + +#endif /* CLIENT_AUTH || TEECD_AUTH */ + +#ifdef CONFIG_CADAEMON_AUTH +int check_cadaemon_auth(void); +#endif + +#endif + diff --git a/tzdriver/auth/client_hash_auth.c b/tzdriver/auth/client_hash_auth.c new file mode 100644 index 0000000000000000000000000000000000000000..819a3892e5b422062eed059c62ce9d0c7ed07c66 --- /dev/null +++ b/tzdriver/auth/client_hash_auth.c @@ -0,0 +1,595 @@ +/* + * client_hash_auth.c + * + * function for CA code hash auth + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "client_hash_auth.h" +#include +#include +#include +#include +#ifdef CONFIG_AUTH_SUPPORT_UNAME +#include +#endif +#ifdef CONFIG_CLIENT_AUTH +#include +#include +#include +#include +#include +#include +#include +#endif +#ifdef CONFIG_AUTH_HASH +#include +#endif +#include + +#include "tc_ns_log.h" +#include "auth_base_impl.h" + +#ifdef CONFIG_AUTH_HASH +#define SHA256_DIGEST_LENGTH 32 +#define FIXED_PKG_NAME_LENGTH 256 +struct sdesc_hash { + struct shash_desc shash; + char ctx[]; +}; +#endif + +#if defined (CONFIG_ANDROID_HIDL) || defined (CONFIG_MDC_HAL_AUTH) + +static int check_proc_state(bool is_hidl, struct task_struct **hidl_struct, + const struct tc_ns_client_context *context) +{ + bool check_value = false; + + if (is_hidl) { + rcu_read_lock(); + *hidl_struct = pid_task(find_vpid(context->calling_pid), + PIDTYPE_PID); + check_value = !*hidl_struct || + (*hidl_struct)->state == TASK_DEAD; + if (check_value) { + tloge("task is dead\n"); + rcu_read_unlock(); + return -EFAULT; + } + + get_task_struct(*hidl_struct); + rcu_read_unlock(); + return EOK; + } + + return EOK; +} + +static int get_hidl_client_task(bool is_hidl_task, struct tc_ns_client_context *context, + struct task_struct **cur_struct) +{ + int ret; + struct task_struct *hidl_struct = NULL; + + ret = check_proc_state(is_hidl_task, &hidl_struct, context); + if (ret) + return ret; + + if (hidl_struct) + *cur_struct = hidl_struct; + else + *cur_struct = current; + + return EOK; +} + +#endif + +#ifdef CONFIG_CLIENT_AUTH +#define LIBTEEC_CODE_PAGE_SIZE 8 +#define DEFAULT_TEXT_OFF 0 +#define LIBTEEC_NAME_MAX_LEN 50 + +const char g_libso[KIND_OF_SO][LIBTEEC_NAME_MAX_LEN] = { + "libteec_vendor.so", +#ifndef CONFIG_CMS_CAHASH_AUTH +#ifndef CONFIG_CADAEMON_AUTH + "libteec.huawei.so", +#else + "libteec.so", +#endif +#endif +}; + +static int find_lib_code_area(struct mm_struct *mm, + struct vm_area_struct **lib_code_area, int so_index) +{ + struct vm_area_struct *vma = NULL; + bool is_valid_vma = false; + bool is_so_exists = false; + bool param_check = (!mm || !mm->mmap || + !lib_code_area || so_index >= KIND_OF_SO); + + if (param_check) { + tloge("illegal input params\n"); + return -EFAULT; + } + for (vma = mm->mmap; vma; vma = vma->vm_next) { + is_valid_vma = (vma->vm_file && + vma->vm_file->f_path.dentry && + vma->vm_file->f_path.dentry->d_name.name); + if (is_valid_vma) { + is_so_exists = !strcmp(g_libso[so_index], + vma->vm_file->f_path.dentry->d_name.name); + if (is_so_exists && (vma->vm_flags & VM_EXEC)) { + *lib_code_area = vma; + tlogd("so name is %s\n", + vma->vm_file->f_path.dentry->d_name.name); + return EOK; + } + } + } + return -EFAULT; +} + +struct get_code_info { + unsigned long code_start; + unsigned long code_end; + unsigned long code_size; +}; +static int update_so_hash(struct mm_struct *mm, + struct task_struct *cur_struct, struct shash_desc *shash, int so_index) +{ + struct vm_area_struct *vma = NULL; + int rc = -EFAULT; + struct get_code_info code_info; + unsigned long in_size; + struct page *ptr_page = NULL; + void *ptr_base = NULL; + + if (find_lib_code_area(mm, &vma, so_index)) { + tlogd("get lib code vma area failed\n"); + return -EFAULT; + } + + code_info.code_start = vma->vm_start; + code_info.code_end = vma->vm_end; + code_info.code_size = code_info.code_end - code_info.code_start; + + while (code_info.code_start < code_info.code_end) { + // Get a handle of the page we want to read +#if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE) + rc = get_user_pages_remote(mm, code_info.code_start, + 1, FOLL_FORCE, &ptr_page, NULL, NULL); +#else + rc = get_user_pages_remote(cur_struct, mm, code_info.code_start, + 1, FOLL_FORCE, &ptr_page, NULL, NULL); +#endif + if (rc != 1) { + tloge("get user pages locked error[0x%x]\n", rc); + rc = -EFAULT; + break; + } + + ptr_base = kmap_atomic(ptr_page); + if (!ptr_base) { + rc = -EFAULT; + put_page(ptr_page); + break; + } + in_size = (code_info.code_size > PAGE_SIZE) ? PAGE_SIZE : code_info.code_size; + + rc = crypto_shash_update(shash, ptr_base, in_size); + if (rc) { + kunmap_atomic(ptr_base); + put_page(ptr_page); + break; + } + kunmap_atomic(ptr_base); + put_page(ptr_page); + code_info.code_start += in_size; + code_info.code_size = code_info.code_end - code_info.code_start; + } + return rc; +} + +/* Calculate the SHA256 library digest */ +static int calc_task_so_hash(unsigned char *digest, uint32_t dig_len, + struct task_struct *cur_struct, int so_index) +{ + struct mm_struct *mm = NULL; + int rc; + size_t size; + size_t shash_size; + struct sdesc *desc = NULL; + + if (!digest || dig_len != SHA256_DIGEST_LENTH) { + tloge("tee hash: digest is NULL\n"); + return -EFAULT; + } + + shash_size = crypto_shash_descsize(get_shash_handle()); + size = sizeof(desc->shash) + shash_size; + if (size < sizeof(desc->shash) || size < shash_size) { + tloge("size overflow\n"); + return -ENOMEM; + } + + desc = kzalloc(size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)desc)) { + tloge("alloc desc failed\n"); + return -ENOMEM; + } + + desc->shash.tfm = get_shash_handle(); + if (crypto_shash_init(&desc->shash)) { + kfree(desc); + return -EFAULT; + } + + mm = get_task_mm(cur_struct); + if (!mm) { + tloge("so does not have mm struct\n"); + if (memset_s(digest, MAX_SHA_256_SZ, 0, dig_len)) + tloge("memset digest failed\n"); + kfree(desc); + return -EFAULT; + } + + down_read(&mm_sem_lock(mm)); + rc = update_so_hash(mm, cur_struct, &desc->shash, so_index); + up_read(&mm_sem_lock(mm)); + mmput(mm); + if (!rc) + rc = crypto_shash_final(&desc->shash, digest); + kfree(desc); + return rc; +} + +static int proc_calc_hash(uint8_t kernel_api, struct tc_ns_session *session, + struct task_struct *cur_struct, uint32_t pub_key_len) +{ + int rc, i; + int so_found = 0; + + mutex_crypto_hash_lock(); + if (kernel_api == TEE_REQ_FROM_USER_MODE) { + for (i = 0; so_found < NUM_OF_SO && i < KIND_OF_SO; i++) { + rc = calc_task_so_hash(session->auth_hash_buf + MAX_SHA_256_SZ * so_found, + (uint32_t)SHA256_DIGEST_LENTH, cur_struct, i); + if (!rc) + so_found++; + } + if (so_found != NUM_OF_SO) + tlogd("so library found: %d\n", so_found); + } else { + tlogd("request from kernel\n"); + } + +#ifdef CONFIG_ASAN_DEBUG + tloge("so auth disabled for ASAN debug\n"); + uint32_t so_hash_len = MAX_SHA_256_SZ * NUM_OF_SO; + errno_t sret = memset_s(session->auth_hash_buf, so_hash_len, 0, so_hash_len); + if (sret) { + mutex_crypto_hash_unlock(); + tloge("memset so hash failed\n"); + return -EFAULT; + } +#endif + + rc = calc_task_hash(session->auth_hash_buf + MAX_SHA_256_SZ * NUM_OF_SO, + (uint32_t)SHA256_DIGEST_LENTH, cur_struct, pub_key_len); + if (rc) { + mutex_crypto_hash_unlock(); + tloge("tee calc ca hash failed\n"); + return -EFAULT; + } + mutex_crypto_hash_unlock(); + return EOK; +} + +int calc_client_auth_hash(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context, struct tc_ns_session *session) +{ + int ret; + struct task_struct *cur_struct = NULL; + bool check = false; +#if defined(CONFIG_ANDROID_HIDL) || defined(CONFIG_MDC_HAL_AUTH) + bool is_hidl_srvc = false; +#endif + check = (!dev_file || !context || !session); + if (check) { + tloge("bad params\n"); + return -EFAULT; + } + + if (tee_init_shash_handle("sha256")) { + tloge("init code hash error\n"); + return -EFAULT; + } + +#if defined(CONFIG_ANDROID_HIDL) || defined(CONFIG_MDC_HAL_AUTH) + if(!current->mm) { + tlogd("kernel thread need not check\n"); + ret = ENTER_BYPASS_CHANNEL; + } else { +#ifdef CONFIG_CADAEMON_AUTH + /* for OH */ + ret = check_cadaemon_auth(); +#else + /* for HO and MDC/DC */ + ret = check_hidl_auth(); +#endif + } + if (ret != CHECK_ACCESS_SUCC) { + if (ret != ENTER_BYPASS_CHANNEL) { + tloge("hidl service may be exploited ret 0x%x\n", ret); + return -EACCES; + } + /* native\kernel ca task this branch */ + } else { + /* android hidl\mdc secmgr(libteec\kms) task this branch */ + is_hidl_srvc = true; + } + ret = get_hidl_client_task(is_hidl_srvc, context, &cur_struct); + if (ret) + return -EFAULT; +#else + cur_struct = current; +#endif + + ret = proc_calc_hash(dev_file->kernel_api, session, cur_struct, dev_file->pub_key_len); +#if defined(CONFIG_ANDROID_HIDL) || defined(CONFIG_MDC_HAL_AUTH) + if (is_hidl_srvc) + put_task_struct(cur_struct); +#endif + return ret; +} +#endif + +#ifdef CONFIG_AUTH_HASH +#define UID_LEN 16 +static int construct_hashdata(struct tc_ns_dev_file *dev_file, + uint8_t *buf, uint32_t buf_len) +{ + int ret; + ret = memcpy_s(buf, buf_len, dev_file->pkg_name, dev_file->pkg_name_len); + if (ret) { + tloge("memcpy_s failed\n"); + goto error; + } + buf += dev_file->pkg_name_len; + buf_len -= dev_file->pkg_name_len; + ret = memcpy_s(buf, buf_len, dev_file->pub_key, dev_file->pub_key_len); + if (ret) { + tloge("memcpy_s failed\n"); + goto error; + } + return 0; +error: + return -EFAULT; +} + +static struct sdesc_hash *init_sdesc(struct crypto_shash *alg) +{ + struct sdesc_hash *sdesc; + size_t size; + + size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); + sdesc = kmalloc(size, GFP_KERNEL); + if (sdesc == NULL) + return ERR_PTR(-ENOMEM); + sdesc->shash.tfm = alg; + return sdesc; +} + +static int calc_hash(struct crypto_shash *alg, + const unsigned char *data, unsigned int datalen, unsigned char *digest) +{ + struct sdesc_hash *sdesc; + int ret; + + sdesc = init_sdesc(alg); + if (IS_ERR(sdesc)) { + pr_info("can't alloc sdesc\n"); + return PTR_ERR(sdesc); + } + + ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); + kfree(sdesc); + return ret; +} + +static int do_sha256(const unsigned char *data, uint32_t datalen, + unsigned char *out_digest, uint8_t digest_len) +{ + int ret; + struct crypto_shash *alg; + const char *hash_alg_name = "sha256"; + if (digest_len != SHA256_DIGEST_LENGTH) { + tloge("error digest_len\n"); + return -1; + } + + alg = crypto_alloc_shash(hash_alg_name, 0, 0); + if(IS_ERR_OR_NULL(alg)) { + tloge("can't alloc alg %s, PTR_ERR alg is %ld\n", hash_alg_name, PTR_ERR(alg)); + return PTR_ERR(alg); + } + ret = calc_hash(alg, data, datalen, out_digest); + if (ret != 0) { + tloge("calc hash failed\n"); + crypto_free_shash(alg); + alg = NULL; + return -1; + } + crypto_free_shash(alg); + alg = NULL; + return 0; +} + +int set_login_information_hash(struct tc_ns_dev_file *hash_dev_file) +{ + int ret = 0; + uint8_t *indata = NULL; + if (hash_dev_file == NULL) { + tloge("wrong caller info, cal hash stopped\n"); + return -1; + } + mutex_lock(&hash_dev_file->cainfo_hash_setup_lock); + + if (!(hash_dev_file->cainfo_hash_setup)) { + unsigned char digest[SHA256_DIGEST_LENGTH] = {0}; + uint8_t digest_len = sizeof(digest); + + uint32_t indata_len; +#ifdef CONFIG_AUTH_SUPPORT_UNAME + /* username using fixed length to cal hash */ + if (hash_dev_file->pub_key_len >= FIXED_PKG_NAME_LENGTH) { + tloge("username is too loog\n"); + ret = -1; + goto error; + } + indata_len = hash_dev_file->pkg_name_len + FIXED_PKG_NAME_LENGTH; +#else + indata_len = hash_dev_file->pkg_name_len + hash_dev_file->pub_key_len; +#endif + indata = kzalloc(indata_len, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)indata)) { + tloge("indata kmalloc fail\n"); + ret = -1; + goto error; + } + + ret = construct_hashdata(hash_dev_file, indata, indata_len); + if (ret != 0) { + tloge("construct hashdata failed\n"); + goto error; + } + + ret = do_sha256((unsigned char *)indata, indata_len, digest, digest_len); + if (ret != 0) { + tloge("do sha256 failed\n"); + goto error; + } + + ret = memcpy_s(hash_dev_file->pkg_name, MAX_PACKAGE_NAME_LEN, digest, digest_len); + if (ret != 0) { + tloge("memcpy_s failed\n"); + goto error; + } + hash_dev_file->pkg_name_len = SHA256_DIGEST_LENGTH; + hash_dev_file->cainfo_hash_setup = true; + } + +error: + if (!ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)indata)) + kfree(indata); + + mutex_unlock(&hash_dev_file->cainfo_hash_setup_lock); + return ret; +} +#endif + +#ifdef CONFIG_AUTH_SUPPORT_UNAME +#define PASSWD_FILE "/etc/passwd" +#define UID_POS 2U +#define DECIMAL 10 +static int uid_compare(uint32_t uid, const char* uid_str, uint32_t uid_len) +{ + uint32_t uid_num = 0; + for (uint32_t i = 0; i < uid_len; i++) { + bool is_number = uid_str[i] >= '0' && uid_str[i] <= '9'; + if (!is_number) { + tloge("passwd info wrong format: uid missing\n"); + return -1; + } + uid_num = DECIMAL * uid_num + (uid_str[i] - '0'); + } + return (uid_num == uid) ? 0 : -1; +} + +/* "username:[encrypted password]:uid:gid:[comments]:home directory:login shell" */ +static uint32_t parse_uname(uint32_t uid, char *username, int buffer_len) +{ + char *str = username; + char *token = strsep(&str, ":"); + char *temp_name = token; // first tokon is username, need to check uid + int index = 0; + while(token != NULL && index < UID_POS) { + token = strsep(&str, ":"); + index++; + } + if (token == NULL) + return -1; + if (uid_compare(uid, token, strlen(token)) != 0) + return -1; + if (strcpy_s(username, buffer_len, temp_name) != EOK) + return -1; + return strlen(temp_name); +} +static int read_line(char *buf, int buf_len, struct file *fp, loff_t *offset) +{ + if (offset == NULL) { + tloge("offset is null while read file\n"); + return -1; + } + ssize_t ret = kernel_read(fp, buf, buf_len, offset); + if (ret < 0) + return -1; + ssize_t i = 0; + /* read buf_len, need to find first '\n' */ + while (i < ret) { + if (i >= buf_len) + break; + if (buf[i] == '\n') + break; + i++; + } + if (i < ret) + *offset -= (loff_t)(ret - i); + if (i < buf_len) + buf[i] = '\0'; + return 0; +} + +/* get username by uid, +* on linux, user info is stored in system file "/etc/passwd", +* each line represents a user, fields are separated by ':', +* formatted as such: "username:[encrypted password]:uid:gid:[comments]:home directory:login shell" +*/ +int tc_ns_get_uname(uint32_t uid, char *username, int buffer_len, uint32_t *out_len) +{ + if (username == NULL || out_len == NULL || buffer_len != FIXED_PKG_NAME_LENGTH) { + tloge("params is null\n"); + return -1; + } + struct file *f = NULL; + loff_t offset = 0; + f = filp_open(PASSWD_FILE, O_RDONLY, 0); + if (IS_ERR(f)) { + tloge("kernel open passwd file failed\n"); + return -1; + } + while (read_line(username, buffer_len, f, &offset) == 0) { + uint32_t ret = parse_uname(uid, username, buffer_len); + if (ret >= 0) { + *out_len = ret; + filp_close(f, NULL); + return 0; + } + } + filp_close(f, NULL); + return -1; +} +#endif \ No newline at end of file diff --git a/tzdriver/auth/client_hash_auth.h b/tzdriver/auth/client_hash_auth.h new file mode 100644 index 0000000000000000000000000000000000000000..f8b0d724c6dd6f4b86825bf54b0c3a9108c5a95a --- /dev/null +++ b/tzdriver/auth/client_hash_auth.h @@ -0,0 +1,53 @@ +/* + * client_hash_auth.h + * + * function definition for CA code hash auth + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef CLIENT_HASH_CALC_H +#define CLIENT_HASH_CALC_H + +#include "tc_ns_client.h" +#include "teek_ns_client.h" + +#ifdef CONFIG_CLIENT_AUTH +#include "auth_base_impl.h" + +int calc_client_auth_hash(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context, struct tc_ns_session *session); + +#else + +static inline int calc_client_auth_hash(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context, struct tc_ns_session *session) +{ + (void)dev_file; + (void)context; + (void)session; + return 0; +} + +#endif + +#ifdef CONFIG_AUTH_SUPPORT_UNAME +#define MAX_NAME_LENGTH 256 + +int tc_ns_get_uname(uint32_t uid, char *username, int buffer_len, uint32_t *out_len); +#endif + +#ifdef CONFIG_AUTH_HASH +int set_login_information_hash(struct tc_ns_dev_file *hash_dev_file); +#endif + +#endif diff --git a/tzdriver/core/Kconfig b/tzdriver/core/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..1903c3e2dc8ee7f0d83637d2117614efef831a63 --- /dev/null +++ b/tzdriver/core/Kconfig @@ -0,0 +1,70 @@ +# Framework Configuration +config CPU_AFF_NR + int "Default Cpu Affinity" + default 0 + depends on TZDRIVER + help + Default Cpu Affinity + +config DRM_ADAPT + bool "Drm Feature Adapt" + default n + depends on TZDRIVER + help + Drm Feature Adapt + +config TA_AFFINITY + bool "TA affinity" + default n + depends on TZDRIVER + help + TA Cpu Affinity bind range, consistent with CONFIG_MAX_NUM_NODES in TEE + +config TA_AFFINITY_CPU_NUMS + int "TA affinity max support cpus" + default 8 + depends on TA_AFFINITY + help + consistent with CONFIG_MAX_NUM_NODES in TEE + +config TEECD_AUTH + bool "Teec Daemon Path Hash Auth" + default n + depends on TZDRIVER + help + TEEOS TEECD path hash auth + +config TEE_AUDIT + bool "Audit TA" + default n + depends on AUTH_ENHANCE + help + Audit TA in case of evil TA + +config KERNEL_CLIENT + bool "Kernel Client Interface" + default n + depends on TZDRIVER + help + Kernel Client Interface + +config BIG_SESSION + bool "open more sessions" + default n + depends on TZDRIVER + help + TEEOS open more sessions + +config FFA_SUPPORT + bool "FFA Support Enable" + default n + depends on TZDRIVER + help + FFA Support Enable + +config THIRDPARTY_COMPATIBLE + bool "Compatible with OPTEE" + default n + depends on TZDRIVER + help + Compatible with OPTEE diff --git a/tzdriver/core/Makefile b/tzdriver/core/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..6edab430d76f0c20e37d74373fa1cf19e988f134 --- /dev/null +++ b/tzdriver/core/Makefile @@ -0,0 +1,32 @@ +KERNEL_DIR := $(srctree) + +ifneq ($(TARGET_BUILD_VARIANT), user) + ccflags-y += -DDEF_ENG +endif + +EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../third_party/bounds_checking_function/include +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/agent_rpmb/core +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/auth +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/tlogger +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/tui +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/ion +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/core +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/include + +ifeq ($(CONFIG_TZDRIVER_INTERNAL), y) + include $(KERNEL_DIR)/drivers/tzdriver/tzdriver_internal/internal.mk +endif + +obj-y += teek_client_api.o +obj-y += smc_smp.o tc_client_driver.o session_manager.o mailbox_mempool.o teek_app_load.o +obj-y += agent.o gp_ops.o mem.o cmdmonitor.o tzdebug.o tz_spi_notify.o tz_pm.o tee_compat_check.o +obj-y += reserved_mempool.o +obj-y += teek_client_ext.o +obj-y += shared_mem.o + +ifdef CONFIG_FFA_SUPPORT +obj-y += ffa_abi.o +else +obj-y += smc_abi.o +endif diff --git a/tzdriver/core/agent.c b/tzdriver/core/agent.c new file mode 100644 index 0000000000000000000000000000000000000000..57fa5d070e25954b3399505556e77d808aa0fbde --- /dev/null +++ b/tzdriver/core/agent.c @@ -0,0 +1,1368 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: agent manager function, such as register and send cmd + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "agent.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE) +#include +#include +#endif +#if (KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE) +#include +#else +#include +#endif +#include +#include +#ifdef CONFIG_MM_VLTMM +#include +#endif +#ifdef CONFIG_MEMORY_VLTMM +#include +#endif +#include "teek_client_constants.h" +#include "teek_ns_client.h" +#include "smc_smp.h" +#include "mem.h" +#include "tui.h" +#include "tc_ns_log.h" +#include "mailbox_mempool.h" +#include "tc_client_driver.h" +#include "cmdmonitor.h" +#include "agent_rpmb.h" +#include "ko_adapt.h" +#include "internal_functions.h" +#include "auth_base_impl.h" + +#ifdef CONFIG_CMS_CAHASH_AUTH +#define HASH_FILE_MAX_SIZE CONFIG_HASH_FILE_SIZE +#else +#define HASH_FILE_MAX_SIZE (16 * 1024) +#endif +#define AGENT_BUFF_SIZE (4 * 1024) +#define AGENT_MAX 32 +#define PAGE_ORDER_RATIO 2 + +static struct list_head g_tee_agent_list; + +struct agent_control { + spinlock_t lock; + struct list_head agent_list; +}; + +static struct agent_control g_agent_control; + +int __attribute__((weak)) is_allowed_agent_ca(const struct ca_info *ca, + bool check_agent_id) +{ + (void)ca; + (void)check_agent_id; + + return -EFAULT; +} + +static int check_mm_struct(struct mm_struct *mm) +{ + if (!mm) + return -EINVAL; + + if (!mm->exe_file) { + mmput(mm); + return -EINVAL; + } + + return 0; +} + +#ifdef CONFIG_LIBLINUX +char *get_proc_dpath(char *path, int path_len) +{ + int rc; + char cmdstring[MAX_PATH_SIZE] = { 0 }; + + if (!path || path_len != MAX_PATH_SIZE) { + tloge("bad params\n"); + return NULL; + } + + if (memset_s(path, path_len, '\0', MAX_PATH_SIZE) != 0) { + tloge("memset error\n"); + return NULL; + } + + rc = sprintf_s(cmdstring, MAX_PATH_SIZE, "/proc/%d/exe", current->tgid); + if (rc < 0) { + tloge("set path in get proc dpath failed\n"); + return NULL; + } + + if (liblinux_pal_vfs_readlink(cmdstring, path, MAX_PATH_SIZE) == 0) { + tloge("get CA realpath in get proc dpath failed\n"); + return NULL; + } + + return path; +} +#else +char *get_proc_dpath(char *path, int path_len) +{ + char *dpath = NULL; + struct path base_path = { + .mnt = NULL, + .dentry = NULL + }; + struct mm_struct *mm = NULL; + struct file *exe_file = NULL; + + if (!path || path_len != MAX_PATH_SIZE) { + tloge("bad params\n"); + return NULL; + } + + if (memset_s(path, path_len, '\0', MAX_PATH_SIZE) != 0) { + tloge("memset error\n"); + return NULL; + } + + mm = get_task_mm(current); + if (check_mm_struct(mm) != 0) { + tloge("check mm_struct failed\n"); + return NULL; + } +#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE) + exe_file = mm->exe_file; +#else + exe_file = get_mm_exe_file(mm); +#endif + if (!exe_file) { + mmput(mm); + return NULL; + } + + base_path = exe_file->f_path; + path_get(&base_path); + dpath = d_path(&base_path, path, MAX_PATH_SIZE); + path_put(&base_path); +#if (KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE) + fput(exe_file); +#endif + mmput(mm); + + return dpath; +} +#endif + +static int get_ca_path_and_uid(struct ca_info *ca) +{ + char *path = NULL; + const struct cred *cred = NULL; + int message_size; + char *tpath = NULL; + + tpath = kmalloc(MAX_PATH_SIZE, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)tpath)) { + tloge("tpath kmalloc fail\n"); + return -ENOMEM; + } + + path = get_proc_dpath(tpath, MAX_PATH_SIZE); + if (IS_ERR_OR_NULL(path)) { + tloge("get process path failed\n"); + kfree(tpath); + return -ENOMEM; + } + + message_size = snprintf_s(ca->path, MAX_PATH_SIZE, + MAX_PATH_SIZE - 1, "%s", path); + if (message_size <= 0) { + tloge("pack path failed\n"); + kfree(tpath); + return -EFAULT; + } + + get_task_struct(current); + cred = koadpt_get_task_cred(current); + if (!cred) { + tloge("cred is NULL\n"); + kfree(tpath); + put_task_struct(current); + return -EACCES; + } + + ca->uid = cred->uid.val; + tlogd("ca_task->comm is %s, path is %s, ca uid is %u\n", + current->comm, path, cred->uid.val); + + put_cred(cred); + put_task_struct(current); + kfree(tpath); + return 0; +} + +int check_ext_agent_access(uint32_t agent_id) +{ + int ret; + struct ca_info agent_ca = { {0}, 0, 0 }; + + ret = get_ca_path_and_uid(&agent_ca); + if (ret != 0) { + tloge("get cp path or uid failed\n"); + return ret; + } + agent_ca.agent_id = agent_id; + + return is_allowed_agent_ca(&agent_ca, true); +} + +static int get_buf_len(const uint8_t *inbuf, uint32_t *buf_len) +{ + if (copy_from_user(buf_len, inbuf, sizeof(*buf_len))) { + tloge("copy from user failed\n"); + return -EFAULT; + } + + if (*buf_len > HASH_FILE_MAX_SIZE) { + tloge("ERROR: file size[0x%x] too big\n", *buf_len); + return -EFAULT; + } + + return 0; +} + +static int send_set_smc_cmd(struct mb_cmd_pack *mb_pack, + struct tc_ns_smc_cmd *smc_cmd, unsigned int cmd_id, + const uint8_t *buf_to_tee, uint32_t buf_len) +{ + int ret = 0; + + mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT | + (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM); + mb_pack->operation.params[0].value.a = + (unsigned int)mailbox_virt_to_phys((uintptr_t)buf_to_tee); + mb_pack->operation.params[0].value.b = + (uint64_t)mailbox_virt_to_phys((uintptr_t)buf_to_tee) >> ADDR_TRANS_NUM; + mb_pack->operation.params[1].value.a = buf_len; + smc_cmd->cmd_type = CMD_TYPE_GLOBAL; + smc_cmd->cmd_id = cmd_id; + smc_cmd->operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd->operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; + if (tc_ns_smc(smc_cmd) != 0) { + ret = -EPERM; + tloge("set native hash failed\n"); + } + + return ret; +} + +int tc_ns_set_native_hash(unsigned long arg, unsigned int cmd_id) +{ + int ret; + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + uint8_t *inbuf = (uint8_t *)(uintptr_t)arg; + uint32_t buf_len = 0; + uint8_t *buf_to_tee = NULL; + struct mb_cmd_pack *mb_pack = NULL; + + ret = check_teecd_auth(); +#ifdef CONFIG_CADAEMON_AUTH + if (ret != 0) + ret = check_cadaemon_auth(); +#endif + if (ret != 0) { + tloge("teecd or cadaemon auth failed, ret %d\n", ret); + return -EACCES; + } + + if (!inbuf) + return -EINVAL; + + if (get_buf_len(inbuf, &buf_len) != 0) + return -EFAULT; + + buf_to_tee = mailbox_alloc(buf_len, 0); + if (!buf_to_tee) { + tloge("failed to alloc memory!\n"); + return -ENOMEM; + } + + if (copy_from_user(buf_to_tee, inbuf, buf_len)) { + tloge("copy from user failed\n"); + mailbox_free(buf_to_tee); + return -EFAULT; + } + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) { + tloge("alloc cmd pack failed\n"); + mailbox_free(buf_to_tee); + return -ENOMEM; + } + + ret = send_set_smc_cmd(mb_pack, &smc_cmd, cmd_id, buf_to_tee, buf_len); + mailbox_free(buf_to_tee); + mailbox_free(mb_pack); + + return ret; +} + +int tc_ns_late_init(unsigned long arg) +{ + int ret = 0; + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + uint32_t index = (uint32_t)arg; /* index is uint32, no truncate risk */ + struct mb_cmd_pack *mb_pack = NULL; + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) { + tloge("alloc cmd pack failed\n"); + return -ENOMEM; + } + + mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT; + mb_pack->operation.params[0].value.a = index; + + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.cmd_id = GLOBAL_CMD_ID_LATE_INIT; + smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; + + if (tc_ns_smc(&smc_cmd)) { + ret = -EPERM; + tloge("late int failed\n"); + } + mailbox_free(mb_pack); + + return ret; +} + +void send_event_response_single(const struct tc_ns_dev_file *dev_file) +{ + struct smc_event_data *event_data = NULL; + struct smc_event_data *tmp = NULL; + unsigned long flags; + unsigned int agent_id = 0; + + if (!dev_file) + return; + + spin_lock_irqsave(&g_agent_control.lock, flags); + list_for_each_entry_safe(event_data, tmp, &g_agent_control.agent_list, + head) { + if (event_data->owner == dev_file) { + agent_id = event_data->agent_id; + break; + } + } + spin_unlock_irqrestore(&g_agent_control.lock, flags); + send_event_response(agent_id); + return; +} + +struct smc_event_data *find_event_control(unsigned int agent_id) +{ + struct smc_event_data *event_data = NULL; + struct smc_event_data *tmp_data = NULL; + unsigned long flags; + + spin_lock_irqsave(&g_agent_control.lock, flags); + list_for_each_entry(event_data, &g_agent_control.agent_list, head) { + if (event_data->agent_id == agent_id) { + tmp_data = event_data; + get_agent_event(event_data); + break; + } + } + spin_unlock_irqrestore(&g_agent_control.lock, flags); + + return tmp_data; +} + +static void unmap_agent_buffer(struct smc_event_data *event_data) +{ + if (!event_data) { + tloge("event data is NULL\n"); + return; + } + + if (IS_ERR_OR_NULL(event_data->agent_buff_user)) + return; + + if (vm_munmap((unsigned long)(uintptr_t)event_data->agent_buff_user, + event_data->agent_buff_size) != 0) + tloge("unmap failed\n"); + + event_data->agent_buff_user = NULL; +} + +static void free_event_control(unsigned int agent_id) +{ + struct smc_event_data *event_data = NULL; + struct smc_event_data *tmp_event = NULL; + unsigned long flags; + bool find = false; + + spin_lock_irqsave(&g_agent_control.lock, flags); + list_for_each_entry_safe(event_data, tmp_event, &g_agent_control.agent_list, head) { + if (event_data->agent_id == agent_id) { + list_del(&event_data->head); + find = true; + break; + } + } + spin_unlock_irqrestore(&g_agent_control.lock, flags); + + if (!find) + return; + + unmap_agent_buffer(event_data); + mailbox_free(event_data->agent_buff_kernel); + event_data->agent_buff_kernel = NULL; + put_agent_event(event_data); +} + +static int init_agent_context(unsigned int agent_id, + const struct tc_ns_smc_cmd *smc_cmd, + struct smc_event_data **event_data) +{ + *event_data = find_event_control(agent_id); + if (!(*event_data)) { + tloge("agent %u not exist\n", agent_id); + return -EINVAL; + } + tlogd("agent-0x%x: returning client command", agent_id); + + /* store tui working device for terminate tui when device is closed. */ + if (is_tui_agent(agent_id)) { + tloge("TEE_TUI_AGENT_ID: pid-%d", current->pid); + set_tui_caller_info(smc_cmd->dev_file_id, current->pid); + } + + isb(); + wmb(); + + return 0; +} + +static int wait_agent_response(struct smc_event_data *event_data) +{ + int ret = 0; + /* only userspace CA need freeze */ + bool need_freeze = !(current->flags & PF_KTHREAD); + bool sig_pending = !sigisemptyset(¤t->pending.signal); + bool answered = true; + int rc; + + do { + answered = true; + /* + * wait_event_freezable will be interrupted by signal and + * freezer which is called to free a userspace task in suspend. + * Freezing a task means wakeup a task by fake_signal_wake_up + * and let it have an opportunity to enter into 'refrigerator' + * by try_to_freeze used in wait_event_freezable. + * + * What scenes can be freezed ? + * 1. CA is waiting agent -> suspend -- OK + * 2. suspend -> CA start agent request -- OK + * 3. CA is waiting agent -> CA is killed -> suspend -- NOK + */ + if (need_freeze && !sig_pending) { + rc = wait_event_freezable(event_data->ca_pending_wq, + atomic_read(&event_data->ca_run)); + if (rc != -ERESTARTSYS) + continue; + if (!sigisemptyset(¤t->pending.signal)) + sig_pending = true; + tloge("agent wait event is interrupted by %s\n", + sig_pending ? "signal" : "freezer"); + /* + * When freezing a userspace task, fake_signal_wake_up + * only set TIF_SIGPENDING but not set a real signal. + * After task thawed, CA need wait agent response again + * so TIF_SIGPENDING need to be cleared. + */ + if (!sig_pending) + clear_thread_flag(TIF_SIGPENDING); + answered = false; + } else { + rc = wait_event_timeout(event_data->ca_pending_wq, + atomic_read(&event_data->ca_run), + (long)(RESLEEP_TIMEOUT * HZ)); + if (rc) + continue; + tloge("agent wait event is timeout\n"); + /* if no kill signal, just resleep before agent wake */ + if (!sigkill_pending(current)) { + answered = false; + } else { + tloge("CA is killed, no need to \ +wait agent response\n"); + event_data->ret_flag = 0; + ret = -EFAULT; + } + } + } while (!answered); + + return ret; +} + +int agent_process_work(const struct tc_ns_smc_cmd *smc_cmd, + unsigned int agent_id) +{ + struct smc_event_data *event_data = NULL; + int ret; + + if (!smc_cmd) { + tloge("smc_cmd is null\n"); + return -EINVAL; + } + + if (init_agent_context(agent_id, smc_cmd, &event_data)) + return -EINVAL; + + if (atomic_read(&event_data->agent_ready) == AGENT_CRASHED) { + tloge("agent 0x%x is killed and restarting\n", agent_id); + put_agent_event(event_data); + return -EFAULT; + } + + event_data->ret_flag = 1; + /* Wake up the agent that will process the command */ + tlogd("agent process work: wakeup the agent"); + wake_up(&event_data->wait_event_wq); + tlogd("agent 0x%x request, goto sleep, pe->run=%d\n", + agent_id, atomic_read(&event_data->ca_run)); + + ret = wait_agent_response(event_data); + atomic_set(&event_data->ca_run, 0); + put_agent_event(event_data); + + /* + * when agent work is done, reset cmd monitor time + * add agent call count, cause it's a new smc cmd. + */ + cmd_monitor_reset_context(); + return ret; +} + +int is_agent_alive(unsigned int agent_id) +{ + struct smc_event_data *event_data = NULL; + + event_data = find_event_control(agent_id); + if (event_data) { + put_agent_event(event_data); + return AGENT_ALIVE; + } + + return AGENT_DEAD; +} + +int tc_ns_wait_event(unsigned int agent_id) +{ + int ret = -EINVAL; + struct smc_event_data *event_data = NULL; + + tlogd("agent %u waits for command\n", agent_id); + + event_data = find_event_control(agent_id); + if (event_data) { + /* only when agent wait event, it's in ready state to work */ + atomic_set(&(event_data->agent_ready), AGENT_READY); + ret = wait_event_interruptible(event_data->wait_event_wq, event_data->ret_flag); + put_agent_event(event_data); + } + + return ret; +} + +int tc_ns_sync_sys_time(const struct tc_ns_client_time *tc_ns_time) +{ + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + int ret = 0; + struct mb_cmd_pack *mb_pack = NULL; + + if (!tc_ns_time) { + tloge("tc_ns_time is NULL input buffer\n"); + return -EINVAL; + } + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) { + tloge("alloc mb pack failed\n"); + return -ENOMEM; + } + + mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT; + mb_pack->operation.params[0].value.a = tc_ns_time->seconds; + mb_pack->operation.params[0].value.b = tc_ns_time->millis; + + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.cmd_id = GLOBAL_CMD_ID_ADJUST_TIME; + smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; + if (tc_ns_smc(&smc_cmd)) { + tloge("tee adjust time failed, return error\n"); + ret = -EPERM; + } + mailbox_free(mb_pack); + + return ret; +} + +int sync_system_time_from_user(const struct tc_ns_client_time *user_time) +{ + int ret = 0; + struct tc_ns_client_time time = { 0 }; + + if (!user_time) { + tloge("user time is NULL input buffer\n"); + return -EINVAL; + } + + if (copy_from_user(&time, user_time, sizeof(time))) { + tloge("copy from user failed\n"); + return -EFAULT; + } + + ret = tc_ns_sync_sys_time(&time); + if (ret != 0) + tloge("sync system time from user failed, ret = 0x%x\n", ret); + + return ret; +} + +void sync_system_time_from_kernel(void) +{ + int ret = 0; + struct tc_ns_client_time time = { 0 }; + + struct time_spec kernel_time = {0}; + get_time_spec(&kernel_time); + + time.seconds = (uint32_t)kernel_time.ts.tv_sec; + time.millis = (uint32_t)(kernel_time.ts.tv_nsec / MS_TO_NS); + + ret = tc_ns_sync_sys_time(&time); + if (ret != 0) + tloge("sync system time from kernel failed, ret = 0x%x\n", ret); + + return; +} + +static struct smc_event_data *check_response_access(unsigned int agent_id) +{ + struct smc_event_data *event_data = find_event_control(agent_id); + + if (!event_data) { + tloge("Can't get event_data\n"); + return NULL; + } + return event_data; +} + +static void process_send_event_response(struct smc_event_data *event_data) +{ + if (event_data->ret_flag == 0) + return; + + event_data->ret_flag = 0; + /* Send the command back to the TA session waiting for it */ + tlogd("agent wakeup ca\n"); + atomic_set(&event_data->ca_run, 1); + /* make sure reset working_ca before wakeup CA */ + wake_up(&event_data->ca_pending_wq); +} + +int tc_ns_send_event_response(unsigned int agent_id) +{ + struct smc_event_data *event_data = NULL; + + event_data = check_response_access(agent_id); + if (!event_data) { + tlogd("agent %u pre-check failed\n", agent_id); + return -EINVAL; + } + + tlogd("agent %u sends answer back\n", agent_id); + process_send_event_response(event_data); + put_agent_event(event_data); + + return 0; +} + +void send_event_response(unsigned int agent_id) +{ + struct smc_event_data *event_data = find_event_control(agent_id); + + if (!event_data) { + tloge("Can't get event_data\n"); + return; + } + + tlogi("agent 0x%x sends answer back\n", agent_id); + atomic_set(&event_data->agent_ready, AGENT_CRASHED); + process_send_event_response(event_data); + put_agent_event(event_data); +} + +static void init_restart_agent_node(struct tc_ns_dev_file *dev_file, + struct smc_event_data *event_data) +{ + tlogi("agent: 0x%x restarting\n", event_data->agent_id); + event_data->ret_flag = 0; + event_data->owner = dev_file; + atomic_set(&event_data->agent_ready, AGENT_REGISTERED); + init_waitqueue_head(&(event_data->wait_event_wq)); + init_waitqueue_head(&(event_data->send_response_wq)); + init_waitqueue_head(&(event_data->ca_pending_wq)); + atomic_set(&(event_data->ca_run), 0); +} + +static int create_new_agent_node(struct tc_ns_dev_file *dev_file, + struct smc_event_data **event_data, unsigned int agent_id, + void **agent_buff, uint32_t agent_buff_size) +{ + *agent_buff = mailbox_alloc(agent_buff_size, MB_FLAG_ZERO); + if (!(*agent_buff)) { + tloge("alloc agent buff failed\n"); + return -ENOMEM; + } + *event_data = kzalloc(sizeof(**event_data), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)(*event_data))) { + mailbox_free(*agent_buff); + *agent_buff = NULL; + *event_data = NULL; + tloge("alloc event data failed\n"); + return -ENOMEM; + } + (*event_data)->agent_id = agent_id; + (*event_data)->ret_flag = 0; + (*event_data)->agent_buff_kernel = *agent_buff; + (*event_data)->agent_buff_size = agent_buff_size; + (*event_data)->owner = dev_file; + atomic_set(&(*event_data)->agent_ready, AGENT_REGISTERED); + init_waitqueue_head(&(*event_data)->wait_event_wq); + init_waitqueue_head(&(*event_data)->send_response_wq); + INIT_LIST_HEAD(&(*event_data)->head); + init_waitqueue_head(&(*event_data)->ca_pending_wq); + atomic_set(&(*event_data)->ca_run, 0); + + return 0; +} + +#ifdef CONFIG_LIBLINUX +static unsigned long agent_buffer_map(unsigned long buffer, uint32_t size) +{ + struct vm_area_struct *vma = NULL; + unsigned long user_addr; + int ret; + + void *priv = NULL; + pgprot_t pro; + pro.pgprot = VM_READ | VM_WRITE; + + size = PAGE_ALIGN(size); + if (!size) + return -ENOMEM; + + user_addr = liblinux_pal_usermap_prepare(user_addr, size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, &priv); + if (IS_ERR_OR_NULL((const void *)user_addr)) { + tloge("agent usermap prepare failed\n"); + return user_addr; + } + liblinux_pal_usermap_finish((const void *)priv, !IS_ERR_VALUE(ret)); + + ret = remap_pfn_range(NULL, user_addr, buffer >> PAGE_SHIFT, size, pro); + if (ret) { + tloge("remap agent buffer failed, err=%d", ret); + goto err_out; + } + + return user_addr; +err_out: + if (vm_munmap(user_addr, size)) + tloge("munmap failed\n"); + return -EFAULT; +} +#else +static unsigned long agent_buffer_map(unsigned long buffer, uint32_t size) +{ + struct vm_area_struct *vma = NULL; + unsigned long user_addr; + int ret; + + user_addr = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, 0); + if (IS_ERR_VALUE((uintptr_t)user_addr)) { + tloge("vm mmap failed\n"); + return user_addr; + } + + down_read(&mm_sem_lock(current->mm)); + vma = find_vma(current->mm, user_addr); + if (!vma) { + tloge("user_addr is not valid in vma"); + goto err_out; + } + + ret = remap_pfn_range(vma, user_addr, buffer >> PAGE_SHIFT, size, + vma->vm_page_prot); + if (ret != 0) { + tloge("remap agent buffer failed, err=%d", ret); + goto err_out; + } + + up_read(&mm_sem_lock(current->mm)); + return user_addr; +err_out: + up_read(&mm_sem_lock(current->mm)); + if (vm_munmap(user_addr, size)) + tloge("munmap failed\n"); + return -EFAULT; +} +#endif + +static bool is_valid_agent(unsigned int agent_id, + unsigned int buffer_size, bool user_agent) +{ + (void)agent_id; + if (user_agent && (buffer_size > SZ_4K)) { + tloge("size: %u of user agent's shared mem is invalid\n", + buffer_size); + return false; + } + + return true; +} + +static int is_agent_already_exist(unsigned int agent_id, + struct smc_event_data **event_data, struct tc_ns_dev_file *dev_file, bool *find_flag) +{ + unsigned long flags; + bool flag = false; + struct smc_event_data *agent_node = NULL; + + spin_lock_irqsave(&g_agent_control.lock, flags); + list_for_each_entry(agent_node, &g_agent_control.agent_list, head) { + if (agent_node->agent_id == agent_id) { + if (atomic_read(&agent_node->agent_ready) != AGENT_CRASHED) { + tloge("no allow agent proc to reg twice\n"); + spin_unlock_irqrestore(&g_agent_control.lock, flags); + return -EINVAL; + } + flag = true; + get_agent_event(agent_node); + /* + * We find the agent event_data aready in agent_list, it indicate agent + * didn't unregister normally, so the event_data will be reused. + */ + init_restart_agent_node(dev_file, agent_node); + break; + } + } + spin_unlock_irqrestore(&g_agent_control.lock, flags); + *find_flag = flag; + if (flag) + *event_data = agent_node; + return 0; +} + +static void add_event_node_to_list(struct smc_event_data *event_data) +{ + unsigned long flags; + + spin_lock_irqsave(&g_agent_control.lock, flags); + list_add_tail(&event_data->head, &g_agent_control.agent_list); + atomic_set(&event_data->usage, 1); + spin_unlock_irqrestore(&g_agent_control.lock, flags); +} + +static int register_agent_to_tee(unsigned int agent_id, const void *agent_buff, uint32_t agent_buff_size) +{ + int ret = 0; + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + struct mb_cmd_pack *mb_pack = NULL; + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) { + tloge("alloc mailbox failed\n"); + return -ENOMEM; + } + + mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT | + (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM); + mb_pack->operation.params[0].value.a = + mailbox_virt_to_phys((uintptr_t)agent_buff); + mb_pack->operation.params[0].value.b = + (uint64_t)mailbox_virt_to_phys((uintptr_t)agent_buff) >> ADDR_TRANS_NUM; + mb_pack->operation.params[1].value.a = agent_buff_size; + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.cmd_id = GLOBAL_CMD_ID_REGISTER_AGENT; + smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; + smc_cmd.agent_id = agent_id; + + if (tc_ns_smc(&smc_cmd)) { + ret = -EPERM; + tloge("register agent to tee failed\n"); + } + mailbox_free(mb_pack); + + return ret; +} + +static int get_agent_buffer(struct smc_event_data *event_data, + bool user_agent, void **buffer) +{ + /* agent first start or restart, both need a remap */ + if (user_agent) { + event_data->agent_buff_user = + (void *)(uintptr_t)agent_buffer_map( + mailbox_virt_to_phys((uintptr_t)event_data->agent_buff_kernel), + event_data->agent_buff_size); + if (IS_ERR(event_data->agent_buff_user)) { + tloge("vm map agent buffer failed\n"); + return -EFAULT; + } + *buffer = event_data->agent_buff_user; + } else { + *buffer = event_data->agent_buff_kernel; + } + + return 0; +} + +int tc_ns_register_agent(struct tc_ns_dev_file *dev_file, + unsigned int agent_id, unsigned int buffer_size, + void **buffer, bool user_agent) +{ + struct smc_event_data *event_data = NULL; + int ret = -EINVAL; + bool find_flag = false; + void *agent_buff = NULL; + uint32_t size_align; + + /* dev can be null */ + if (!buffer) + return ret; + + if (!is_valid_agent(agent_id, buffer_size, user_agent)) + return ret; + + size_align = ALIGN(buffer_size, SZ_4K); + + if (is_agent_already_exist(agent_id, &event_data, dev_file, &find_flag)) + return ret; + if (!find_flag) { + ret = create_new_agent_node(dev_file, &event_data, + agent_id, &agent_buff, size_align); + if (ret != 0) + return ret; + } + + if (get_agent_buffer(event_data, user_agent, buffer)) + goto release_rsrc; + + /* find_flag is false means it's a new agent register */ + if (!find_flag) { + /* + * Obtain share memory which is released + * in tc_ns_unregister_agent + */ + ret = register_agent_to_tee(agent_id, agent_buff, size_align); + if (ret != 0) { + unmap_agent_buffer(event_data); + goto release_rsrc; + } + add_event_node_to_list(event_data); + } + if (find_flag) + put_agent_event(event_data); /* match get action */ + return 0; + +release_rsrc: + if (find_flag) + put_agent_event(event_data); /* match get action */ + else + kfree(event_data); /* here event_data can never be NULL */ + + if (agent_buff) + mailbox_free(agent_buff); + return ret; +} + +int tc_ns_unregister_agent(unsigned int agent_id) +{ + struct smc_event_data *event_data = NULL; + int ret = 0; + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + struct mb_cmd_pack *mb_pack = NULL; + + event_data = find_event_control(agent_id); + if (!event_data || !event_data->agent_buff_kernel) { + tloge("agent is not found or kaddr is not allocated\n"); + return -EINVAL; + } + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) { + tloge("alloc mailbox failed\n"); + put_agent_event(event_data); + return -ENOMEM; + } + mb_pack->operation.paramtypes = TEE_PARAM_TYPE_VALUE_INPUT | + (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM); + mb_pack->operation.params[0].value.a = + mailbox_virt_to_phys((uintptr_t)event_data->agent_buff_kernel); + mb_pack->operation.params[0].value.b = + (uint64_t)mailbox_virt_to_phys((uintptr_t)event_data->agent_buff_kernel) >> ADDR_TRANS_NUM; + mb_pack->operation.params[1].value.a = SZ_4K; + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.cmd_id = GLOBAL_CMD_ID_UNREGISTER_AGENT; + smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; + smc_cmd.agent_id = agent_id; + tlogd("unregistering agent 0x%x\n", agent_id); + + if (tc_ns_smc(&smc_cmd) == 0) { + free_event_control(agent_id); + } else { + ret = -EPERM; + tloge("unregister agent failed\n"); + } + put_agent_event(event_data); + mailbox_free(mb_pack); + return ret; +} + +bool is_system_agent(const struct tc_ns_dev_file *dev_file) +{ + struct smc_event_data *event_data = NULL; + struct smc_event_data *tmp = NULL; + bool system_agent = false; + unsigned long flags; + + if (!dev_file) + return system_agent; + + spin_lock_irqsave(&g_agent_control.lock, flags); + list_for_each_entry_safe(event_data, tmp, &g_agent_control.agent_list, + head) { + if (event_data->owner == dev_file) { + system_agent = true; + break; + } + } + spin_unlock_irqrestore(&g_agent_control.lock, flags); + + return system_agent; +} + +void send_crashed_event_response_all(const struct tc_ns_dev_file *dev_file) +{ + struct smc_event_data *event_data = NULL; + struct smc_event_data *tmp = NULL; + unsigned int agent_id[AGENT_MAX] = {0}; + unsigned int i = 0; + unsigned long flags; + + if (!dev_file) + return; + + spin_lock_irqsave(&g_agent_control.lock, flags); + list_for_each_entry_safe(event_data, tmp, &g_agent_control.agent_list, + head) { + if (event_data->owner == dev_file && i < AGENT_MAX) + agent_id[i++] = event_data->agent_id; + } + spin_unlock_irqrestore(&g_agent_control.lock, flags); + + for (i = 0; i < AGENT_MAX; i++) { + if (agent_id[i] != 0) + send_event_response(agent_id[i]); + } + + return; +} + +void tee_agent_clear_dev_owner(const struct tc_ns_dev_file *dev_file) +{ + struct smc_event_data *event_data = NULL; + struct smc_event_data *tmp = NULL; + unsigned long flags; + + spin_lock_irqsave(&g_agent_control.lock, flags); + list_for_each_entry_safe(event_data, tmp, &g_agent_control.agent_list, + head) { + if (event_data->owner == dev_file) { + event_data->owner = NULL; + break; + } + } + spin_unlock_irqrestore(&g_agent_control.lock, flags); +} + + +static int def_tee_agent_work(void *instance) +{ + int ret = 0; + struct tee_agent_kernel_ops *agent_instance = NULL; + + agent_instance = instance; + while (!kthread_should_stop()) { + tlogd("%s agent loop++++\n", agent_instance->agent_name); + ret = tc_ns_wait_event(agent_instance->agent_id); + if (ret != 0) { + tloge("%s wait event fail\n", + agent_instance->agent_name); + break; + } + if (agent_instance->tee_agent_work) { + ret = agent_instance->tee_agent_work(agent_instance); + if (ret != 0) + tloge("%s agent work fail\n", + agent_instance->agent_name); + } + ret = tc_ns_send_event_response(agent_instance->agent_id); + if (ret != 0) { + tloge("%s send event response fail\n", + agent_instance->agent_name); + break; + } + tlogd("%s agent loop----\n", agent_instance->agent_name); + } + + return ret; +} + +static int def_tee_agent_run(struct tee_agent_kernel_ops *agent_instance) +{ + struct tc_ns_dev_file dev = {0}; + int ret; + + /* 1. Register agent buffer to TEE */ + ret = tc_ns_register_agent(&dev, agent_instance->agent_id, + agent_instance->agent_buff_size, &agent_instance->agent_buff, + false); + if (ret != 0) { + tloge("register agent buffer fail,ret =0x%x\n", ret); + ret = -EINVAL; + goto out; + } + + /* 2. Creat thread to run agent */ + agent_instance->agent_thread = + kthread_create(def_tee_agent_work, agent_instance, + "agent_%s", agent_instance->agent_name); + if (IS_ERR_OR_NULL(agent_instance->agent_thread)) { + tloge("kthread create fail\n"); + ret = PTR_ERR(agent_instance->agent_thread); + agent_instance->agent_thread = NULL; + goto out; + } + tz_kthread_bind_mask(agent_instance->agent_thread); + wake_up_process(agent_instance->agent_thread); + return 0; + +out: + return ret; +} + +static int def_tee_agent_stop(struct tee_agent_kernel_ops *agent_instance) +{ + int ret; + + if (tc_ns_send_event_response(agent_instance->agent_id) != 0) + tloge("failed to send response for agent %u\n", + agent_instance->agent_id); + ret = tc_ns_unregister_agent(agent_instance->agent_id); + if (ret != 0) + tloge("failed to unregister agent %u\n", + agent_instance->agent_id); + if (!IS_ERR_OR_NULL(agent_instance->agent_thread)) + kthread_stop(agent_instance->agent_thread); + + return 0; +} + +static struct tee_agent_kernel_ops g_def_tee_agent_ops = { + .agent_name = "default", + .agent_id = 0, + .tee_agent_init = NULL, + .tee_agent_run = def_tee_agent_run, + .tee_agent_work = NULL, + .tee_agent_exit = NULL, + .tee_agent_stop = def_tee_agent_stop, + .tee_agent_crash_work = NULL, + .agent_buff_size = PAGE_SIZE, + .list = LIST_HEAD_INIT(g_def_tee_agent_ops.list) +}; + +static int tee_agent_kernel_init(void) +{ + struct tee_agent_kernel_ops *agent_ops = NULL; + int ret = 0; + + list_for_each_entry(agent_ops, &g_tee_agent_list, list) { + /* Check the agent validity */ + if (!agent_ops->agent_id || + !agent_ops->agent_name || + !agent_ops->tee_agent_work) { + tloge("agent is invalid\n"); + continue; + } + tlogd("ready to init %s agent, id=0x%x\n", + agent_ops->agent_name, agent_ops->agent_id); + + /* Set agent buff size */ + if (!agent_ops->agent_buff_size) + agent_ops->agent_buff_size = + g_def_tee_agent_ops.agent_buff_size; + + /* Initialize the agent */ + if (agent_ops->tee_agent_init) + ret = agent_ops->tee_agent_init(agent_ops); + else if (g_def_tee_agent_ops.tee_agent_init) + ret = g_def_tee_agent_ops.tee_agent_init(agent_ops); + else + tlogw("agent id %u has no init function\n", + agent_ops->agent_id); + if (ret != 0) { + tloge("tee_agent_init %s failed\n", + agent_ops->agent_name); + continue; + } + + /* Run the agent */ + if (agent_ops->tee_agent_run) + ret = agent_ops->tee_agent_run(agent_ops); + else if (g_def_tee_agent_ops.tee_agent_run) + ret = g_def_tee_agent_ops.tee_agent_run(agent_ops); + else + tlogw("agent id %u has no run function\n", + agent_ops->agent_id); + + if (ret != 0) { + tloge("tee_agent_run %s failed\n", + agent_ops->agent_name); + if (agent_ops->tee_agent_exit) + agent_ops->tee_agent_exit(agent_ops); + continue; + } + } + + return 0; +} + +static void tee_agent_kernel_exit(void) +{ + struct tee_agent_kernel_ops *agent_ops = NULL; + + list_for_each_entry(agent_ops, &g_tee_agent_list, list) { + /* Stop the agent */ + if (agent_ops->tee_agent_stop) + agent_ops->tee_agent_stop(agent_ops); + else if (g_def_tee_agent_ops.tee_agent_stop) + g_def_tee_agent_ops.tee_agent_stop(agent_ops); + else + tlogw("agent id %u has no stop function\n", + agent_ops->agent_id); + + /* Uninitialize the agent */ + if (agent_ops->tee_agent_exit) + agent_ops->tee_agent_exit(agent_ops); + else if (g_def_tee_agent_ops.tee_agent_exit) + g_def_tee_agent_ops.tee_agent_exit(agent_ops); + else + tlogw("agent id %u has no exit function\n", + agent_ops->agent_id); + } +} + +int tee_agent_clear_work(struct tc_ns_client_context *context, + unsigned int dev_file_id) +{ + struct tee_agent_kernel_ops *agent_ops = NULL; + + list_for_each_entry(agent_ops, &g_tee_agent_list, list) { + if (agent_ops->tee_agent_crash_work) + agent_ops->tee_agent_crash_work(agent_ops, + context, dev_file_id); + } + return 0; +} + +int tee_agent_kernel_register(struct tee_agent_kernel_ops *new_agent) +{ + if (!new_agent) + return -EINVAL; + + INIT_LIST_HEAD(&new_agent->list); + list_add_tail(&new_agent->list, &g_tee_agent_list); + + return 0; +} + +void agent_init(void) +{ + spin_lock_init(&g_agent_control.lock); + INIT_LIST_HEAD(&g_agent_control.agent_list); + INIT_LIST_HEAD(&g_tee_agent_list); + + rpmb_agent_register(); +#if defined(CONFIG_MM_VLTMM) || defined(CONFIG_MEMORY_VLTMM) + (void)vltmm_agent_register(); +#endif + if (tee_agent_kernel_init()) + tloge("tee agent kernel init failed\n"); + return; +} + +void free_agent(void) +{ + struct smc_event_data *event_data = NULL; + struct smc_event_data *temp = NULL; + unsigned long flags; + + tee_agent_kernel_exit(); + + spin_lock_irqsave(&g_agent_control.lock, flags); + list_for_each_entry_safe(event_data, temp, &g_agent_control.agent_list, head) { + list_del(&event_data->head); + unmap_agent_buffer(event_data); + mailbox_free(event_data->agent_buff_kernel); + event_data->agent_buff_kernel = NULL; + kfree(event_data); + } + spin_unlock_irqrestore(&g_agent_control.lock, flags); +} diff --git a/tzdriver/core/agent.h b/tzdriver/core/agent.h new file mode 100644 index 0000000000000000000000000000000000000000..1b0b8c253c715a75b7962c7aa6b4b08109414e28 --- /dev/null +++ b/tzdriver/core/agent.h @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: agent manager function definition, such as register and send cmd + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef AGENT_H +#define AGENT_H +#include +#include "teek_ns_client.h" + +#define MAX_PATH_SIZE 512 +#define AGENT_FS_ID 0x46536673 /* FSfs */ +#define AGENT_MISC_ID 0x4d495343 /* MISC */ + +#ifdef CONFIG_RPMB_AGENT +#define TEE_RPMB_AGENT_ID 0x4abe6198 /* RPMB */ +#endif + +#define AGENT_SOCKET_ID 0x69e85664 /* socket */ +#define SECFILE_LOAD_AGENT_ID 0x4c4f4144 /* SECFILE-LOAD-AGENT */ +#define TEE_SECE_AGENT_ID 0x53656345 /* npu agent id */ +#define TEE_FACE_AGENT1_ID 0x46616365 /* face agent id */ +#define TEE_FACE_AGENT2_ID 0x46616345 /* face agent id */ +#define TEE_VLTMM_AGENT_ID 0x564c544d /* vltmm agent id */ +#define SYSTEM_UID 1000 +#define MS_TO_NS 1000000 + +enum agent_state_type { + AGENT_CRASHED = 0, + AGENT_REGISTERED, + AGENT_READY, +}; + +enum agent_status { + AGENT_ALIVE = 1, + AGENT_DEAD = 0, +}; + +/* for secure agent */ +struct smc_event_data { + unsigned int agent_id; + atomic_t agent_ready; + wait_queue_head_t wait_event_wq; + int ret_flag; /* indicate whether agent is returned from TEE */ + wait_queue_head_t send_response_wq; + struct list_head head; + struct tc_ns_smc_cmd cmd; + struct tc_ns_dev_file *owner; + void *agent_buff_kernel; + void *agent_buff_user; /* used for unmap */ + unsigned int agent_buff_size; + atomic_t usage; + wait_queue_head_t ca_pending_wq; + /* indicate whether agent is allowed to return to TEE */ + atomic_t ca_run; +}; + +struct tee_agent_kernel_ops { + const char *agent_name; + unsigned int agent_id; + int (*tee_agent_init)(struct tee_agent_kernel_ops *agent_instance); + int (*tee_agent_run)(struct tee_agent_kernel_ops *agent_instance); + int (*tee_agent_work)(struct tee_agent_kernel_ops *agent_instance); + int (*tee_agent_stop)(struct tee_agent_kernel_ops *agent_instance); + int (*tee_agent_exit)(struct tee_agent_kernel_ops *agent_instance); + int (*tee_agent_crash_work)( + struct tee_agent_kernel_ops *agent_instance, + struct tc_ns_client_context *context, + unsigned int dev_file_id); + struct task_struct *agent_thread; + void *agent_data; + void *agent_buff; + unsigned int agent_buff_size; + struct list_head list; +}; + +struct ca_info { + char path[MAX_PATH_SIZE]; + uint32_t uid; + uint32_t agent_id; +}; + +static inline void get_agent_event(struct smc_event_data *event_data) +{ + if (event_data) + atomic_inc(&event_data->usage); +} + +static inline void put_agent_event(struct smc_event_data *event_data) +{ + if (event_data) { + if (atomic_dec_and_test(&event_data->usage)) + kfree(event_data); + } +} + +int is_allowed_agent_ca(const struct ca_info *ca, + bool check_agent_id); +void agent_init(void); +void free_agent(void); +struct smc_event_data *find_event_control(unsigned int agent_id); +void send_event_response(unsigned int agent_id); +int agent_process_work(const struct tc_ns_smc_cmd *smc_cmd, unsigned int agent_id); +int is_agent_alive(unsigned int agent_id); +int tc_ns_set_native_hash(unsigned long arg, unsigned int cmd_id); +int tc_ns_late_init(unsigned long arg); +int tc_ns_register_agent(struct tc_ns_dev_file *dev_file, unsigned int agent_id, + unsigned int buffer_size, void **buffer, bool user_agent); +int tc_ns_unregister_agent(unsigned int agent_id); +void send_crashed_event_response_all(const struct tc_ns_dev_file *dev_file); +int tc_ns_wait_event(unsigned int agent_id); +int tc_ns_send_event_response(unsigned int agent_id); +void send_event_response_single(const struct tc_ns_dev_file *dev_file); +int sync_system_time_from_user(const struct tc_ns_client_time *user_time); +void sync_system_time_from_kernel(void); +int tee_agent_clear_work(struct tc_ns_client_context *context, + unsigned int dev_file_id); +int tee_agent_kernel_register(struct tee_agent_kernel_ops *new_agent); +bool is_system_agent(const struct tc_ns_dev_file *dev_file); +void tee_agent_clear_dev_owner(const struct tc_ns_dev_file *dev_file); +char *get_proc_dpath(char *path, int path_len); +int check_ext_agent_access(uint32_t agent_id); + +#endif diff --git a/tzdriver/core/cmdmonitor.c b/tzdriver/core/cmdmonitor.c new file mode 100644 index 0000000000000000000000000000000000000000..b7d7b2897442b4a4541f7c4c1afb6f7091c5c6a2 --- /dev/null +++ b/tzdriver/core/cmdmonitor.c @@ -0,0 +1,613 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: cmdmonitor function, monitor every cmd which is sent to TEE. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "cmdmonitor.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE) +#include +#endif + +#ifdef CONFIG_TEE_LOG_EXCEPTION +#include +#define IMONITOR_TA_CRASH_EVENT_ID 901002003 +#define IMONITOR_MEMSTAT_EVENT_ID 940007001 +#define IMONITOR_TAMEMSTAT_EVENT_ID 940007002 +#endif + +#include "tc_ns_log.h" +#include "smc_smp.h" +#include "internal_functions.h" +#include "mailbox_mempool.h" +#include "tlogger.h" +#include "log_cfg_api.h" +#include "tui.h" + +static int g_cmd_need_archivelog; +static LIST_HEAD(g_cmd_monitor_list); +static int g_cmd_monitor_list_size; +/* report 2 hours */ +static const long long g_memstat_report_freq = 2 * 60 * 60 * 1000; +#define MAX_CMD_MONITOR_LIST 200 +#define MAX_AGENT_CALL_COUNT 5000 +static DEFINE_MUTEX(g_cmd_monitor_lock); + +/* independent wq to avoid block system_wq */ +static struct workqueue_struct *g_cmd_monitor_wq; +static struct delayed_work g_cmd_monitor_work; +static struct delayed_work g_cmd_monitor_work_archive; +static struct delayed_work g_mem_stat; +static int g_tee_detect_ta_crash; +static struct tc_uuid g_crashed_ta_uuid; + +void get_time_spec(struct time_spec *time) +{ + if (!time) + return; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) + time->ts = current_kernel_time(); +#else + ktime_get_coarse_ts64(&time->ts); +#endif +} + +static void schedule_memstat_work(struct delayed_work *work, + unsigned long delay) +{ + schedule_delayed_work(work, delay); +} + +static void schedule_cmd_monitor_work(struct delayed_work *work, + unsigned long delay) +{ + if (g_cmd_monitor_wq) + queue_delayed_work(g_cmd_monitor_wq, work, delay); + else + schedule_delayed_work(work, delay); +} + +void tzdebug_memstat(void) +{ + schedule_memstat_work(&g_mem_stat, usecs_to_jiffies(S_TO_MS)); +} + +void tzdebug_archivelog(void) +{ + schedule_cmd_monitor_work(&g_cmd_monitor_work_archive, + usecs_to_jiffies(0)); +} + +void cmd_monitor_ta_crash(int32_t type, const uint8_t *ta_uuid, uint32_t uuid_len) +{ + g_tee_detect_ta_crash = type; + if (g_tee_detect_ta_crash != TYPE_CRASH_TEE && + ta_uuid != NULL && uuid_len == sizeof(struct tc_uuid)) + (void)memcpy_s(&g_crashed_ta_uuid, sizeof(g_crashed_ta_uuid), + ta_uuid, uuid_len); + tzdebug_archivelog(); + fault_monitor_start(type); +} + +static int get_pid_name(pid_t pid, char *comm, size_t size) +{ + struct task_struct *task = NULL; + int sret; + + if (size <= TASK_COMM_LEN - 1 || !comm) + return -1; + + rcu_read_lock(); + +#ifndef CONFIG_TZDRIVER_MODULE + task = find_task_by_vpid(pid); +#else + task = pid_task(find_vpid(pid), PIDTYPE_PID); +#endif + if (task) + get_task_struct(task); + rcu_read_unlock(); + if (!task) { + tloge("get task failed\n"); + return -1; + } + + sret = strncpy_s(comm, size, task->comm, strlen(task->comm)); + if (sret != 0) + tloge("strncpy failed: errno = %d\n", sret); + put_task_struct(task); + + return sret; +} + +bool is_thread_reported(pid_t tid) +{ + bool ret = false; + struct cmd_monitor *monitor = NULL; + + mutex_lock(&g_cmd_monitor_lock); + list_for_each_entry(monitor, &g_cmd_monitor_list, list) { + if (monitor->tid == tid && !is_tui_in_use(monitor->tid)) { + ret = (monitor->is_reported || + monitor->agent_call_count > + MAX_AGENT_CALL_COUNT); + break; + } + } + mutex_unlock(&g_cmd_monitor_lock); + return ret; +} + +#ifdef CONFIG_TEE_LOG_EXCEPTION +#define FAIL_RET (-1) +#define SUCC_RET 0 + +static int send_memstat_packet(const struct tee_mem *meminfo) +{ + struct imonitor_eventobj *memstat = NULL; + uint32_t result = 0; + struct time_spec nowtime; + int ret; + get_time_spec(&nowtime); + + memstat = imonitor_create_eventobj(IMONITOR_MEMSTAT_EVENT_ID); + if (!memstat) { + tloge("create eventobj failed\n"); + return FAIL_RET; + } + + result |= (uint32_t)imonitor_set_param_integer_v2(memstat, "totalmem", meminfo->total_mem); + result |= (uint32_t)imonitor_set_param_integer_v2(memstat, "mem", meminfo->pmem); + result |= (uint32_t)imonitor_set_param_integer_v2(memstat, "freemem", meminfo->free_mem); + result |= (uint32_t)imonitor_set_param_integer_v2(memstat, "freememmin", meminfo->free_mem_min); + result |= (uint32_t)imonitor_set_param_integer_v2(memstat, "tanum", meminfo->ta_num); + result |= (uint32_t)imonitor_set_time(memstat, nowtime.ts.tv_sec); + if (result) { + tloge("set param integer1 failed ret=%u\n", result); + imonitor_destroy_eventobj(memstat); + return FAIL_RET; + } + + ret = imonitor_send_event(memstat); + imonitor_destroy_eventobj(memstat); + if (ret <= 0) { + tloge("imonitor send memstat packet failed\n"); + return FAIL_RET; + } + return SUCC_RET; +} + +void report_imonitor(const struct tee_mem *meminfo) +{ + int ret; + uint32_t result = 0; + uint32_t i; + struct imonitor_eventobj *pamemobj = NULL; + struct time_spec nowtime; + get_time_spec(&nowtime); + + if (!meminfo) + return; + + if (meminfo->ta_num > MEMINFO_TA_MAX) + return; + + if (send_memstat_packet(meminfo)) + return; + + for (i = 0; i < meminfo->ta_num; i++) { + pamemobj = imonitor_create_eventobj(IMONITOR_TAMEMSTAT_EVENT_ID); + if (!pamemobj) { + tloge("create obj failed\n"); + break; + } + + result |= (uint32_t)imonitor_set_param_string_v2(pamemobj, "NAME", meminfo->ta_mem_info[i].ta_name); + result |= (uint32_t)imonitor_set_param_integer_v2(pamemobj, "MEM", meminfo->ta_mem_info[i].pmem); + result |= (uint32_t)imonitor_set_param_integer_v2(pamemobj, "MEMMAX", meminfo->ta_mem_info[i].pmem_max); + result |= (uint32_t)imonitor_set_param_integer_v2(pamemobj, "MEMLIMIT", meminfo->ta_mem_info[i].pmem_limit); + result |= (uint32_t)imonitor_set_time(pamemobj, nowtime.ts.tv_sec); + if (result) { + tloge("set param integer2 failed ret=%d\n", result); + imonitor_destroy_eventobj(pamemobj); + return; + } + ret = imonitor_send_event(pamemobj); + imonitor_destroy_eventobj(pamemobj); + if (ret <= 0) { + tloge("imonitor send pamem packet failed\n"); + break; + } + } +} +#endif + +static void memstat_report(void) +{ + int ret; + struct tee_mem *meminfo = NULL; + + meminfo = mailbox_alloc(sizeof(*meminfo), MB_FLAG_ZERO); + if (!meminfo) { + tloge("mailbox alloc failed\n"); + return; + } + + ret = get_tee_meminfo(meminfo); +#ifdef CONFIG_TEE_LOG_EXCEPTION + if (ret == 0) { + tlogd("report imonitor\n"); + report_imonitor(meminfo); + } +#endif + if (ret != 0) + tlogd("get meminfo failed\n"); + + mailbox_free(meminfo); +} + +static void memstat_work(struct work_struct *work) +{ + (void)(work); + memstat_report(); +} + +void cmd_monitor_reset_context(void) +{ + struct cmd_monitor *monitor = NULL; + pid_t pid = current->tgid; + pid_t tid = current->pid; + + mutex_lock(&g_cmd_monitor_lock); + list_for_each_entry(monitor, &g_cmd_monitor_list, list) { + if (monitor->pid == pid && monitor->tid == tid) { + get_time_spec(&monitor->sendtime); + if (monitor->agent_call_count + 1 < 0) + tloge("agent call count add overflow\n"); + else + monitor->agent_call_count++; + break; + } + } + mutex_unlock(&g_cmd_monitor_lock); +} + +#ifdef CONFIG_TEE_LOG_EXCEPTION +static struct time_spec g_memstat_check_time; +static bool g_after_loader = false; + +static void auto_report_memstat(void) +{ + long long timedif; + struct time_spec nowtime; + get_time_spec(&nowtime); + + /* + * get time value D (timedif=nowtime-sendtime), + * we do not care about overflow + * 1 year means 1000 * (60*60*24*365) = 0x757B12C00 + * only 5bytes, will not overflow + */ + timedif = S_TO_MS * (nowtime.ts.tv_sec - g_memstat_check_time.ts.tv_sec) + + (nowtime.ts.tv_nsec - g_memstat_check_time.ts.tv_nsec) / S_TO_US; + if (timedif > g_memstat_report_freq && g_after_loader) { + tlogi("cmdmonitor auto report memstat\n"); + memstat_report(); + g_memstat_check_time = nowtime; + } + + if (!g_after_loader) { + g_memstat_check_time = nowtime; + g_after_loader = true; + } +} +#endif + +/* + * if one session timeout, monitor will print timedifs every step[n] in seconds, + * if lasted more then 360s, monitor will print timedifs every 360s. + */ +const int32_t g_timer_step[] = {1, 1, 1, 2, 5, 10, 40, 120, 180, 360}; +const int32_t g_timer_nums = sizeof(g_timer_step) / sizeof(int32_t); +static void show_timeout_cmd_info(struct cmd_monitor *monitor) +{ + long long timedif, timedif2; + struct time_spec nowtime; + int32_t time_in_sec; + get_time_spec(&nowtime); + + /* + * 1 year means 1000 * (60*60*24*365) = 0x757B12C00 + * only 5bytes, so timedif (timedif=nowtime-sendtime) will not overflow + */ + timedif = S_TO_MS * (nowtime.ts.tv_sec - monitor->sendtime.ts.tv_sec) + + (nowtime.ts.tv_nsec - monitor->sendtime.ts.tv_nsec) / S_TO_US; + + /* timeout to 10s, we log the teeos log, and report */ + if ((timedif > CMD_MAX_EXECUTE_TIME * S_TO_MS) && (!monitor->is_reported)) { + monitor->is_reported = true; + tloge("[cmd_monitor_tick] pid=%d,pname=%s,tid=%d, " + "tname=%s, lastcmdid=%u, agent call count:%d, " + "running with timedif=%lld ms and report\n", + monitor->pid, monitor->pname, monitor->tid, + monitor->tname, monitor->lastcmdid, + monitor->agent_call_count, timedif); + /* threads out of white table need info dump */ + tloge("monitor: pid-%d", monitor->pid); + if (!is_tui_in_use(monitor->tid)) { + show_cmd_bitmap(); + g_cmd_need_archivelog = 1; + wakeup_tc_siq(SIQ_DUMP_TIMEOUT); + } + } + + timedif2 = S_TO_MS * (nowtime.ts.tv_sec - monitor->lasttime.ts.tv_sec) + + (nowtime.ts.tv_nsec - monitor->lasttime.ts.tv_nsec) / S_TO_US; + time_in_sec = monitor->timer_index >= g_timer_nums ? + g_timer_step[g_timer_nums - 1] : g_timer_step[monitor->timer_index]; + if (timedif2 > time_in_sec * S_TO_MS) { + monitor->lasttime = nowtime; + monitor->timer_index = monitor->timer_index >= (int32_t)sizeof(g_timer_step) ? + (int32_t)sizeof(g_timer_step) : (monitor->timer_index + 1); + tlogi("[cmd_monitor_tick] pid=%d,pname=%s,tid=%d, " + "lastcmdid=%u,agent call count:%d,timedif=%lld ms\n", + monitor->pid, monitor->pname, monitor->tid, + monitor->lastcmdid, monitor->agent_call_count, + timedif); + } +} + +static void cmd_monitor_tick(void) +{ + struct cmd_monitor *monitor = NULL; + struct cmd_monitor *tmp = NULL; + + mutex_lock(&g_cmd_monitor_lock); + list_for_each_entry_safe(monitor, tmp, &g_cmd_monitor_list, list) { + if (monitor->returned) { + g_cmd_monitor_list_size--; + tlogd("[cmd_monitor_tick] pid=%d,pname=%s,tid=%d, " + "tname=%s,lastcmdid=%u,count=%d,agent call count=%d, " + "timetotal=%lld us returned, remained command(s)=%d\n", + monitor->pid, monitor->pname, monitor->tid, monitor->tname, + monitor->lastcmdid, monitor->count, monitor->agent_call_count, + monitor->timetotal, g_cmd_monitor_list_size); + list_del(&monitor->list); + kfree(monitor); + continue; + } + show_timeout_cmd_info(monitor); + } + + /* if have cmd in monitor list, we need tick */ + if (g_cmd_monitor_list_size > 0) + schedule_cmd_monitor_work(&g_cmd_monitor_work, usecs_to_jiffies(S_TO_US)); + mutex_unlock(&g_cmd_monitor_lock); +#ifdef CONFIG_TEE_LOG_EXCEPTION + auto_report_memstat(); +#endif +} + +static void cmd_monitor_tickfn(struct work_struct *work) +{ + (void)(work); + cmd_monitor_tick(); + /* check tlogcat if have new log */ + tz_log_write(); +} + +#define MAX_CRASH_INFO_LEN 100 +static void cmd_monitor_archivefn(struct work_struct *work) +{ + (void)(work); + + if (tlogger_store_msg(CONFIG_TEE_LOG_ACHIVE_PATH, + sizeof(CONFIG_TEE_LOG_ACHIVE_PATH)) < 0) + tloge("[cmd_monitor_tick]tlogger store lastmsg failed\n"); + + if (g_tee_detect_ta_crash == TYPE_CRASH_TEE) { + tloge("detect teeos crash, panic\n"); + report_log_system_panic(); + } else if (g_tee_detect_ta_crash == TYPE_CRASH_TA || + g_tee_detect_ta_crash == TYPE_KILLED_TA) { +#ifdef CONFIG_TEE_LOG_EXCEPTION + const char crash_prefix[] = "ta crash: "; + const char killed_prefix[] = "ta timeout and killed: "; + const char crash_info_get_failed[] = "ta crash: get uuid failed"; + char buffer[MAX_CRASH_INFO_LEN] = {0}; + const char *crash_info = buffer; + int ret = snprintf_s(buffer, sizeof(buffer), sizeof(buffer) - 1, + "%s%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", + (g_tee_detect_ta_crash == TYPE_CRASH_TA) ? crash_prefix : killed_prefix, + g_crashed_ta_uuid.time_low, g_crashed_ta_uuid.time_mid, + g_crashed_ta_uuid.timehi_and_version, + g_crashed_ta_uuid.clockseq_and_node[0], g_crashed_ta_uuid.clockseq_and_node[1], + g_crashed_ta_uuid.clockseq_and_node[2], g_crashed_ta_uuid.clockseq_and_node[3], + g_crashed_ta_uuid.clockseq_and_node[4], g_crashed_ta_uuid.clockseq_and_node[5], + g_crashed_ta_uuid.clockseq_and_node[6], g_crashed_ta_uuid.clockseq_and_node[7]); + if (ret <= 0) { + tlogw("append crash info failed\n"); + crash_info = crash_info_get_failed; + } + if (teeos_log_exception_archive(IMONITOR_TA_CRASH_EVENT_ID, crash_info) < 0) + tloge("log exception archive failed\n"); + (void)memset_s(&g_crashed_ta_uuid, sizeof(g_crashed_ta_uuid), 0, sizeof(g_crashed_ta_uuid)); +#endif + } + + g_tee_detect_ta_crash = 0; +} + +static struct cmd_monitor *init_monitor_locked(void) +{ + struct cmd_monitor *newitem = NULL; + + newitem = kzalloc(sizeof(*newitem), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)newitem)) { + tloge("[cmd_monitor_tick]kzalloc failed\n"); + return NULL; + } + + get_time_spec(&newitem->sendtime); + newitem->lasttime = newitem->sendtime; + newitem->timer_index = 0; + newitem->count = 1; + newitem->agent_call_count = 0; + newitem->returned = false; + newitem->is_reported = false; + newitem->pid = current->tgid; + newitem->tid = current->pid; + if (get_pid_name(newitem->pid, newitem->pname, + sizeof(newitem->pname)) != 0) + newitem->pname[0] = '\0'; + if (get_pid_name(newitem->tid, newitem->tname, + sizeof(newitem->tname)) != 0) + newitem->tname[0] = '\0'; + INIT_LIST_HEAD(&newitem->list); + list_add_tail(&newitem->list, &g_cmd_monitor_list); + g_cmd_monitor_list_size++; + return newitem; +} + +struct cmd_monitor *cmd_monitor_log(const struct tc_ns_smc_cmd *cmd) +{ + bool found_flag = false; + pid_t pid; + pid_t tid; + struct cmd_monitor *monitor = NULL; + + if (!cmd) + return NULL; + + pid = current->tgid; + tid = current->pid; + mutex_lock(&g_cmd_monitor_lock); + do { + list_for_each_entry(monitor, &g_cmd_monitor_list, list) { + if (monitor->pid == pid && monitor->tid == tid) { + found_flag = true; + /* restart */ + get_time_spec(&monitor->sendtime); + monitor->lasttime = monitor->sendtime; + monitor->timer_index = 0; + monitor->count++; + monitor->returned = false; + monitor->is_reported = false; + monitor->lastcmdid = cmd->cmd_id; + monitor->agent_call_count = 0; + monitor->timetotal = 0; + break; + } + } + + if (!found_flag) { +#ifndef CONFIG_BIG_SESSION + if (g_cmd_monitor_list_size > MAX_CMD_MONITOR_LIST - 1) { + tloge("monitor reach max node num\n"); + monitor = NULL; + break; + } +#endif + monitor = init_monitor_locked(); + if (!monitor) { + tloge("init monitor failed\n"); + break; + } + monitor->lastcmdid = cmd->cmd_id; + /* the first cmd will cause timer */ + if (g_cmd_monitor_list_size == 1) + schedule_cmd_monitor_work(&g_cmd_monitor_work, + usecs_to_jiffies(S_TO_US)); + } + } while (0); + mutex_unlock(&g_cmd_monitor_lock); + + return monitor; +} + +void cmd_monitor_logend(struct cmd_monitor *item) +{ + struct time_spec nowtime; + long long timedif; + + if (!item) + return; + + get_time_spec(&nowtime); + /* + * get time value D (timedif=nowtime-sendtime), + * we do not care about overflow + * 1 year means 1000000 * (60*60*24*365) = 0x1CAE8C13E000 + * only 6bytes, will not overflow + */ + timedif = S_TO_US * (nowtime.ts.tv_sec - item->sendtime.ts.tv_sec) + + (nowtime.ts.tv_nsec - item->sendtime.ts.tv_nsec) / S_TO_MS; + item->timetotal += timedif; + item->returned = true; +} + +void do_cmd_need_archivelog(void) +{ + if (g_cmd_need_archivelog == 1) { + g_cmd_need_archivelog = 0; + schedule_cmd_monitor_work(&g_cmd_monitor_work_archive, + usecs_to_jiffies(S_TO_US)); + } +} + +void init_cmd_monitor(void) +{ + g_cmd_monitor_wq = alloc_workqueue("tz_cmd_monitor_wq", + WQ_UNBOUND, TZ_WQ_MAX_ACTIVE); + if (!g_cmd_monitor_wq) + tloge("alloc cmd monitor wq failed\n"); + else + tz_workqueue_bind_mask(g_cmd_monitor_wq, 0); + + INIT_DEFERRABLE_WORK((struct delayed_work *) + (uintptr_t)&g_cmd_monitor_work, cmd_monitor_tickfn); + INIT_DEFERRABLE_WORK((struct delayed_work *) + (uintptr_t)&g_cmd_monitor_work_archive, cmd_monitor_archivefn); + INIT_DEFERRABLE_WORK((struct delayed_work *) + (uintptr_t)&g_mem_stat, memstat_work); +} + +void free_cmd_monitor(void) +{ + struct cmd_monitor *monitor = NULL; + struct cmd_monitor *tmp = NULL; + + mutex_lock(&g_cmd_monitor_lock); + list_for_each_entry_safe(monitor, tmp, &g_cmd_monitor_list, list) { + list_del(&monitor->list); + kfree(monitor); + } + mutex_unlock(&g_cmd_monitor_lock); + + flush_delayed_work(&g_cmd_monitor_work); + flush_delayed_work(&g_cmd_monitor_work_archive); + flush_delayed_work(&g_mem_stat); + if (g_cmd_monitor_wq) { + flush_workqueue(g_cmd_monitor_wq); + destroy_workqueue(g_cmd_monitor_wq); + g_cmd_monitor_wq = NULL; + } +} diff --git a/tzdriver/core/cmdmonitor.h b/tzdriver/core/cmdmonitor.h new file mode 100644 index 0000000000000000000000000000000000000000..cab0bfc631ce1c291e17ab4bcb308a2106cd072c --- /dev/null +++ b/tzdriver/core/cmdmonitor.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: cmdmonitor function declaration + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef CMD_MONITOR_H +#define CMD_MONITOR_H + +#include "tzdebug.h" +#include "teek_ns_client.h" +#include "smc_smp.h" +#include + +#if (KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE) +#define TASK_COMM_LEN 16 +#endif + +enum { + TYPE_CRASH_TEE = 1, + TYPE_CRASH_TA = 2, + TYPE_KILLED_TA = 3, +}; + +/* + * when cmd execute more than 25s in tee, + * it will be terminated when CA is killed + */ +#define CMD_MAX_EXECUTE_TIME 10U +#define S_TO_MS 1000 +#define S_TO_US 1000000 + +struct time_spec { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) + struct timespec ts; +#else + struct timespec64 ts; +#endif +}; + +struct cmd_monitor { + struct list_head list; + struct time_spec sendtime; + struct time_spec lasttime; + int32_t timer_index; + int count; + bool returned; + bool is_reported; + pid_t pid; + pid_t tid; + char pname[TASK_COMM_LEN]; + char tname[TASK_COMM_LEN]; + unsigned int lastcmdid; + long long timetotal; + int agent_call_count; +}; + +struct cmd_monitor *cmd_monitor_log(const struct tc_ns_smc_cmd *cmd); +void cmd_monitor_reset_context(void); +void cmd_monitor_logend(struct cmd_monitor *item); +void init_cmd_monitor(void); +void free_cmd_monitor(void); +void do_cmd_need_archivelog(void); +bool is_thread_reported(pid_t tid); +void tzdebug_archivelog(void); +void cmd_monitor_ta_crash(int32_t type, const uint8_t *ta_uuid, uint32_t uuid_len); +void tzdebug_memstat(void); +void get_time_spec(struct time_spec *time); +#endif diff --git a/tzdriver/core/ffa_abi.c b/tzdriver/core/ffa_abi.c new file mode 100644 index 0000000000000000000000000000000000000000..ebe6c631be877915f14bd5acc6edc664ae736711 --- /dev/null +++ b/tzdriver/core/ffa_abi.c @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: functions for ffa settings + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include "ffa_abi.h" +#include "teek_ns_client.h" +#include "tz_pm.h" +#include "smc_call.h" + +const struct ffa_ops *g_ffa_ops = NULL; +struct ffa_device *g_ffa_dev = NULL; + +static void ffa_remove(struct ffa_device *ffa_dev) +{ + tlogd("stub remove ffa driver!\n"); +} + +static int ffa_probe(struct ffa_device *ffa_dev) +{ + g_ffa_ops = ffa_dev->ops; + g_ffa_dev = ffa_dev; + if (!g_ffa_ops) { + tloge("failed to get ffa_ops!\n"); + return -ENOENT; + } + + g_ffa_ops->mode_32bit_set(ffa_dev); + return 0; +} + +/* two sp uuid can be the same */ +const struct ffa_device_id tz_ffa_device_id[] = { + /* uuid = <0xe0786148 0xe311f8e7 0x02005ebc 0x1bc5d5a5> */ + {0x48, 0x61, 0x78, 0xe0, 0xe7, 0xf8, 0x11, 0xe3, 0xbc, 0x5e, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}, + {} +}; + +static struct ffa_driver tz_ffa_driver = { + .name = "iTrustee", + .probe = ffa_probe, + .remove = ffa_remove, + .id_table = tz_ffa_device_id, +}; + +int ffa_abi_register(void) +{ + return ffa_register(&tz_ffa_driver); +} + +void ffa_abi_unregister(void) +{ + ffa_unregister(&tz_ffa_driver); +} + +void smc_req(struct smc_in_params *in, struct smc_out_params *out, uint8_t wait) +{ + ffa_forward_call(in, out, wait); +} + +static void convert_smc_param_to_ffa_param(struct smc_in_params *in_param, struct ffa_send_direct_data *ffa_param) +{ + ffa_param->data0 = in_param->x1; + ffa_param->data1 = in_param->x2; + ffa_param->data2 = in_param->x3; + ffa_param->data3 = in_param->x4; + /* x0(smc id) need to be transported for tee dealing it directly */ + ffa_param->data4 = in_param->x0; +} + +static void convert_ffa_param_to_smc_param(struct ffa_send_direct_data *ffa_param, struct smc_out_params *out_param) +{ + out_param->ret = ffa_param->data4; + out_param->exit_reason = ffa_param->data0; + out_param->ta = ffa_param->data1; + out_param->target = ffa_param->data2; +} + +int ffa_forward_call(struct smc_in_params *in_param, struct smc_out_params *out_param, uint8_t wait) +{ + if (in_param == NULL || out_param == NULL) { + tloge("invalid parameter ffa forward!\n"); + return -1; + } + + int ret; + struct ffa_send_direct_data ffa_param = {}; + convert_smc_param_to_ffa_param(in_param, &ffa_param); + + do { + ret = g_ffa_ops->sync_send_receive(g_ffa_dev, &ffa_param); + convert_ffa_param_to_smc_param(&ffa_param, out_param); + } while (out_param->ret == TSP_REQUEST && wait != 0); + + if (ret != 0) + tloge("failed to call! ret is %d\n", ret); + return ret; +} \ No newline at end of file diff --git a/tzdriver/core/ffa_abi.h b/tzdriver/core/ffa_abi.h new file mode 100644 index 0000000000000000000000000000000000000000..05047f661fa1e6d4d59c970e7c0d865f583d5da7 --- /dev/null +++ b/tzdriver/core/ffa_abi.h @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: declarations for ffa functions and useful macros + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef FFA_ABI_H +#define FFA_ABI_H + +#include +#include "smc_smp.h" +#include "smc_call.h" +/* + * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and + * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal + * messages. + * + * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP + * are using the AArch32 SMC calling convention with register usage as + * defined in FF-A specification: + * w0: Function ID (0x8400006F or 0x84000070) + * w1: Source/Destination IDs + * w2: Reserved (MBZ) + * w3-w7: Implementation defined, free to be used below + */ + +#define TZ_FFA_VERSION_MAJOR 1 +#define TZ_FFA_VERSION_MINOR 0 + +#define TZ_FFA_BLOCKING_CALL(id) (id) +#define TZ_FFA_YIELDING_CALL_BIT 31 +#define TZ_FFA_YIELDING_CALL(id) ((id) | BIT(TZ_FFA_YIELDING_CALL_BIT)) + +/* + * Returns the API version implemented, currently follows the FF-A version. + * Call register usage: + * w3: Service ID, TZ_FFA_GET_API_VERSION + * w4-w7: Not used (MBZ) + * + * Return register usage: + * w3: TZ_FFA_VERSION_MAJOR + * w4: TZ_FFA_VERSION_MINOR + * w5-w7: Not used (MBZ) + */ +#define TZ_FFA_GET_API_VERSION TZ_FFA_BLOCKING_CALL(0) + +/* + * Returns the revision of iTrustee + * + * Used by non-secure world to figure out which version of the Trusted OS + * is installed. Note that the returned revision is the revision of the + * Trusted OS, not of the API. + * + * Call register usage: + * w3: Service ID, TZ_FFA_GET_OS_VERSION + * w4-w7: Unused (MBZ) + * + * Return register usage: + * w3: CFG_TZ_REVISION_MAJOR + * w4: CFG_TZ_REVISION_MINOR + * w5: TEE_IMPL_GIT_SHA1 (or zero if not supported) + */ +#define TZ_FFA_GET_OS_VERSION TZ_FFA_BLOCKING_CALL(1) + +/* + * Exchange capabilities between normal world and secure world. + * + * Currently, there are no defined capabilities. When features are added new + * capabilities may be added. + * + * Call register usage: + * w3: Service ID, TZ_FFA_EXCHANGE_CAPABILITIES + * w4-w7: Not used (MBZ) + * + * Return register usage: + * w3: Error code, 0 on success + * w4: Bit[7:0]: Number of parameters needed for RPC to be supplied + * as the second MSG arg struct for + * TZ_FFA_YIELDING_CALL_WITH_ARG. + * Bit[31:8]: Reserved (MBZ) + * w5-w7: Not used (MBZ) + */ +#define TZ_FFA_EXCHANGE_CAPABILITIES TZ_FFA_BLOCKING_CALL(2) + +/* + * Unregister shared memory + * + * Call register usage: + * w3: Service ID, TZ_FFA_YIELDING_CALL_UNREGISTER_SHM + * w4: Shared memory handle, lower bits + * w5: Shared memory handle, higher bits + * w6-w7: Not used (MBZ) + * + * Return register usage: + * w3: Error code, 0 on success + * w4-w7: Not used (MBZ) + */ +#define TZ_FFA_UNREGISTER_SHM TZ_FFA_BLOCKING_CALL(3) + +/* + * Call with struct TZ_msg_arg as argument in the supplied shared memory + * with a zero internal offset and normal cached memory attributes + * Register usage: + * w3: Service ID, TZ_FFA_YIELDING_CALL_WITH_ARG + * w4: Lower 32 bits of a 64-bit Shared memory handle + * w5: Upper 32 bits of a 64-bit Shared memory handle + * w6: Offset into shared memory pointing to a struct TZ_msg_arg + * right after the parameters of this struct (at offset + * TZ_MSG_GET_ARG_SIZE(num_params) follows a struct TZ_msg_arg + * for RPC, this struct has reserved space for the number of RPC + * parameters as returned by TZ_FFA_EXCHANGE_CAPABILITIES. + * w7: Not used (MBZ) + * Resume from RPC. Register usage: + * w3: Service ID, TZ_FFA_YIELDING_CALL_RESUME + * w4-w6: Not used (MBZ) + * w7: Resume info + * + * Normal return (yielding call is completed). Register usage: + * w3: Error code, 0 on success + * w4: TZ_FFA_YIELDING_CALL_RETURN_DONE + * w5-w7: Not used (MBZ) + * + * RPC interrupt return (RPC from secure world). Register usage: + * w3: Error code == 0 + * w4: Any defined RPC code but TZ_FFA_YIELDING_CALL_RETURN_DONE + * w5-w6: Not used (MBZ) + * w7: Resume info + * + * Possible error codes in register w3: + * 0: Success + * FFA_DENIED: w4 isn't one of TZ_FFA_YIELDING_CALL_START + * TZ_FFA_YIELDING_CALL_RESUME + * + * Possible error codes for TZ_FFA_YIELDING_CALL_START + * FFA_BUSY: Number of OP-TEE OS threads exceeded, + * try again later + * FFA_DENIED: RPC shared memory object not found + * FFA_INVALID_PARAMETER: Bad shared memory handle or offset into the memory + * + * Possible error codes for TZ_FFA_YIELDING_CALL_RESUME + * FFA_INVALID_PARAMETER: Bad resume info + */ +#define TZ_FFA_YIELDING_CALL_WITH_ARG TZ_FFA_YIELDING_CALL(0) +#define TZ_FFA_YIELDING_CALL_RESUME TZ_FFA_YIELDING_CALL(1) + +#define TZ_FFA_YIELDING_CALL_RETURN_DONE 0 +#define TZ_FFA_YIELDING_CALL_RETURN_RPC_CMD 1 +#define TZ_FFA_YIELDING_CALL_RETURN_INTERRUPT 2 + +int ffa_abi_register(void); +void ffa_abi_unregister(void); +int ffa_forward_call(struct smc_in_params *in_param, struct smc_out_params *out_param, uint8_t wait); + +#endif \ No newline at end of file diff --git a/tzdriver/core/gp_ops.c b/tzdriver/core/gp_ops.c new file mode 100644 index 0000000000000000000000000000000000000000..54b0d60ef08a436a64a58bc67d67d3cc82a3bf62 --- /dev/null +++ b/tzdriver/core/gp_ops.c @@ -0,0 +1,1303 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: alloc global operation and pass params to TEE. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "gp_ops.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "teek_client_constants.h" +#include "tc_ns_client.h" +#include "agent.h" +#include "tc_ns_log.h" +#include "smc_smp.h" +#include "mem.h" +#include "mailbox_mempool.h" +#include "tc_client_driver.h" +#include "internal_functions.h" +#include "reserved_mempool.h" +#include "tlogger.h" +#include "dynamic_ion_mem.h" + +#define MAX_SHARED_SIZE 0x100000 /* 1 MiB */ + +static void free_operation(const struct tc_call_params *call_params, + struct tc_op_params *op_params); + +/* dir: 0-inclue input, 1-include output, 2-both */ +#define INPUT 0 +#define OUTPUT 1 +#define INOUT 2 + +static inline bool is_input_type(int dir) +{ + if (dir == INPUT || dir == INOUT) + return true; + + return false; +} + +static inline bool is_output_type(int dir) +{ + if (dir == OUTPUT || dir == INOUT) + return true; + + return false; +} + +static inline bool teec_value_type(unsigned int type, int dir) +{ + return ((is_input_type(dir) && type == TEEC_VALUE_INPUT) || + (is_output_type(dir) && type == TEEC_VALUE_OUTPUT) || + type == TEEC_VALUE_INOUT) ? true : false; +} + +static inline bool teec_tmpmem_type(unsigned int type, int dir) +{ + return ((is_input_type(dir) && type == TEEC_MEMREF_TEMP_INPUT) || + (is_output_type(dir) && type == TEEC_MEMREF_TEMP_OUTPUT) || + type == TEEC_MEMREF_TEMP_INOUT) ? true : false; +} + +static inline bool teec_memref_type(unsigned int type, int dir) +{ + return ((is_input_type(dir) && type == TEEC_MEMREF_PARTIAL_INPUT) || + (is_output_type(dir) && type == TEEC_MEMREF_PARTIAL_OUTPUT) || + type == TEEC_MEMREF_PARTIAL_INOUT) ? true : false; +} + +static int check_user_param(const struct tc_ns_client_context *client_context, + unsigned int index) +{ + if (!client_context) { + tloge("client_context is null\n"); + return -EINVAL; + } + + if (index >= PARAM_NUM) { + tloge("index is invalid, index:%x\n", index); + return -EINVAL; + } + return 0; +} + +bool is_tmp_mem(uint32_t param_type) +{ + if (param_type == TEEC_MEMREF_TEMP_INPUT || + param_type == TEEC_MEMREF_TEMP_OUTPUT || + param_type == TEEC_MEMREF_TEMP_INOUT) + return true; + + return false; +} + +bool is_ref_mem(uint32_t param_type) +{ + if (param_type == TEEC_MEMREF_PARTIAL_INPUT || + param_type == TEEC_MEMREF_PARTIAL_OUTPUT || + param_type == TEEC_MEMREF_PARTIAL_INOUT) + return true; + + return false; +} + +bool is_val_param(uint32_t param_type) +{ + if (param_type == TEEC_VALUE_INPUT || + param_type == TEEC_VALUE_OUTPUT || + param_type == TEEC_VALUE_INOUT || + param_type == TEEC_ION_INPUT || + param_type == TEEC_ION_SGLIST_INPUT) + return true; + + return false; +} + +static bool is_mem_param(uint32_t param_type) +{ + if (is_tmp_mem(param_type) || is_ref_mem(param_type)) + return true; + + return false; +} + +/* Check the size and buffer addresses have valid userspace addresses */ +static bool is_usr_refmem_valid(const union tc_ns_client_param *client_param) +{ + uint32_t size = 0; + uint64_t size_addr = client_param->memref.size_addr | + ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM); + uint64_t buffer_addr = client_param->memref.buffer | + ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71)) + if (access_ok(VERIFY_READ, (void *)(uintptr_t)size_addr, sizeof(uint32_t)) == 0) +#else + if (access_ok((void *)(uintptr_t)size_addr, sizeof(uint32_t)) == 0) +#endif + return false; + + get_user(size, (uint32_t *)(uintptr_t)size_addr); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71)) + if (access_ok(VERIFY_READ, (void *)(uintptr_t)buffer_addr, size) == 0) +#else + if (access_ok((void *)(uintptr_t)buffer_addr, size) == 0) +#endif + return false; + + return true; +} + +static bool is_usr_valmem_valid(const union tc_ns_client_param *client_param) +{ + uint64_t a_addr = client_param->value.a_addr | + ((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM); + uint64_t b_addr = client_param->value.b_addr | + ((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71)) + if (access_ok(VERIFY_READ, (void *)(uintptr_t)a_addr, sizeof(uint32_t)) == 0) +#else + if (access_ok((void *)(uintptr_t)a_addr, sizeof(uint32_t)) == 0) +#endif + return false; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71)) + if (access_ok(VERIFY_READ, (void *)(uintptr_t)b_addr, sizeof(uint32_t)) == 0) +#else + if (access_ok((void *)(uintptr_t)b_addr, sizeof(uint32_t)) == 0) +#endif + return false; + + return true; +} + +bool tc_user_param_valid(struct tc_ns_client_context *client_context, + unsigned int index) +{ + union tc_ns_client_param *client_param = NULL; + unsigned int param_type; + + if (check_user_param(client_context, index) != 0) + return false; + + client_param = &(client_context->params[index]); + param_type = teec_param_type_get(client_context->param_types, index); + tlogd("param %u type is %x\n", index, param_type); + if (param_type == TEEC_NONE) { + tlogd("param type is TEEC_NONE\n"); + return true; + } + + if (is_mem_param(param_type)) { + if (!is_usr_refmem_valid(client_param)) + return false; + } else if (is_val_param(param_type)) { + if (!is_usr_valmem_valid(client_param)) + return false; + } else { + tloge("param types is not supported\n"); + return false; + } + + return true; +} + +/* + * These function handle read from client. Because client here can be + * kernel client or user space client, we must use the proper function + */ +int read_from_client(void *dest, size_t dest_size, + const void __user *src, size_t size, uint8_t kernel_api) +{ + int ret; + + if (!dest || !src) { + tloge("src or dest is NULL input buffer\n"); + return -EINVAL; + } + + if (size > dest_size) { + tloge("size is larger than dest_size or size is 0\n"); + return -EINVAL; + } + if (size == 0) + return 0; + + if (kernel_api != 0) { + ret = memcpy_s(dest, dest_size, src, size); + if (ret != EOK) { + tloge("memcpy fail. line=%d, s_ret=%d\n", + __LINE__, ret); + return ret; + } + return ret; + } + /* buffer is in user space(CA call TEE API) */ + if (copy_from_user(dest, src, size) != 0) { + tloge("copy from user failed\n"); + return -EFAULT; + } + + return 0; +} + +int write_to_client(void __user *dest, size_t dest_size, + const void *src, size_t size, uint8_t kernel_api) +{ + int ret; + + if (!dest || !src) { + tloge("src or dest is NULL input buffer\n"); + return -EINVAL; + } + + if (size > dest_size) { + tloge("size is larger than dest_size\n"); + return -EINVAL; + } + + if (size == 0) + return 0; + + if (kernel_api != 0) { + ret = memcpy_s(dest, dest_size, src, size); + if (ret != EOK) { + tloge("write to client fail. line=%d, ret=%d\n", + __LINE__, ret); + return ret; + } + return ret; + } + + /* buffer is in user space(CA call TEE API) */ + if (copy_to_user(dest, src, size) != 0) { + tloge("copy to user failed\n"); + return -EFAULT; + } + return 0; +} + +static bool is_input_tempmem(unsigned int param_type) +{ + if (param_type == TEEC_MEMREF_TEMP_INPUT || + param_type == TEEC_MEMREF_TEMP_INOUT) + return true; + + return false; +} + +static int update_input_data(const union tc_ns_client_param *client_param, + uint32_t buffer_size, void *temp_buf, + unsigned int param_type, uint8_t kernel_params) +{ + uint64_t buffer_addr; + if (!is_input_tempmem(param_type)) + return 0; + + buffer_addr = client_param->memref.buffer | + ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM); + if (read_from_client(temp_buf, buffer_size, + (void *)(uintptr_t)buffer_addr, + buffer_size, kernel_params) != 0) { + tloge("copy memref buffer failed\n"); + return -EFAULT; + } + return 0; +} + +/* + * temp buffers we need to allocate/deallocate + * for every operation + */ +static int alloc_for_tmp_mem(const struct tc_call_params *call_params, + struct tc_op_params *op_params, uint8_t kernel_params, + uint32_t param_type, unsigned int index) +{ + union tc_ns_client_param *client_param = NULL; + void *temp_buf = NULL; + uint32_t buffer_size = 0; + uint64_t size_addr; + + /* this never happens */ + if (index >= TEE_PARAM_NUM) + return -EINVAL; + + /* For compatibility sake we assume buffer size to be 32bits */ + client_param = &(call_params->context->params[index]); + size_addr = client_param->memref.size_addr | + ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM); + if (read_from_client(&buffer_size, sizeof(buffer_size), + (uint32_t __user *)(uintptr_t)size_addr, + sizeof(uint32_t), kernel_params) != 0) { + tloge("copy memref.size_addr failed\n"); + return -EFAULT; + } + + if (buffer_size > MAX_SHARED_SIZE) { + tloge("buffer size %u from user is too large\n", buffer_size); + return -EFAULT; + } + + op_params->mb_pack->operation.params[index].memref.size = buffer_size; + /* TEEC_MEMREF_TEMP_INPUT equal to TEE_PARAM_TYPE_MEMREF_INPUT */ + op_params->trans_paramtype[index] = param_type; + + if (buffer_size == 0) { + op_params->local_tmpbuf[index].temp_buffer = NULL; + op_params->local_tmpbuf[index].size = 0; + op_params->mb_pack->operation.params[index].memref.buffer = 0; + op_params->mb_pack->operation.buffer_h_addr[index] = 0; + return 0; + } + + temp_buf = mailbox_alloc(buffer_size, MB_FLAG_ZERO); + if (!temp_buf) { + tloge("temp buf malloc failed, i = %u\n", index); + return -ENOMEM; + } + op_params->local_tmpbuf[index].temp_buffer = temp_buf; + op_params->local_tmpbuf[index].size = buffer_size; + + if (update_input_data(client_param, buffer_size, temp_buf, + param_type, kernel_params) != 0) + return -EFAULT; + + op_params->mb_pack->operation.params[index].memref.buffer = + mailbox_virt_to_phys((uintptr_t)temp_buf); + op_params->mb_pack->operation.buffer_h_addr[index] = + (unsigned int)(mailbox_virt_to_phys((uintptr_t)temp_buf) >> ADDR_TRANS_NUM); + + return 0; +} + +static int check_buffer_for_ref(uint32_t *buffer_size, + const union tc_ns_client_param *client_param, uint8_t kernel_params) +{ + uint64_t size_addr = client_param->memref.size_addr | + ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM); + if (read_from_client(buffer_size, sizeof(*buffer_size), + (uint32_t __user *)(uintptr_t)size_addr, + sizeof(uint32_t), kernel_params) != 0) { + tloge("copy memref.size_addr failed\n"); + return -EFAULT; + } + if (*buffer_size == 0) { + tloge("buffer_size from user is 0\n"); + return -ENOMEM; + } + return 0; +} + +static bool is_refmem_offset_valid(const struct tc_ns_shared_mem *shared_mem, + const union tc_ns_client_param *client_param, uint32_t buffer_size) +{ + /* + * arbitrary CA can control offset by ioctl, so in here + * offset must be checked, and avoid integer overflow. + */ + if (((shared_mem->len - client_param->memref.offset) >= buffer_size) && + (shared_mem->len > client_param->memref.offset)) + return true; + tloge("Unexpected size %u vs %u", shared_mem->len, buffer_size); + return false; +} + +static bool is_phyaddr_valid(const struct tc_ns_operation *operation, int index) +{ + /* + * for 8G physical memory device, there is a chance that + * operation->params[i].memref.buffer could be all 0, + * buffer_h_addr cannot be 0 in the same time. + */ + if ((operation->params[index].memref.buffer == 0) && + (operation->buffer_h_addr[index]) == 0) { + tloge("can not find shared buffer, exit\n"); + return false; + } + + return true; +} + +static int set_operation_buffer(const struct tc_ns_shared_mem *shared_mem, void *buffer_addr, + uint32_t buffer_size, unsigned int index, struct tc_op_params *op_params) +{ + if (shared_mem->mem_type == RESERVED_TYPE) { + /* no copy to mailbox */ + op_params->mb_pack->operation.mb_buffer[index] = buffer_addr; + op_params->mb_pack->operation.params[index].memref.buffer = + res_mem_virt_to_phys((uintptr_t)buffer_addr); + op_params->mb_pack->operation.buffer_h_addr[index] = + res_mem_virt_to_phys((uintptr_t)buffer_addr) >> ADDR_TRANS_NUM; + } else { + void *tmp_buffer_addr = mailbox_copy_alloc(buffer_addr, buffer_size); + if (tmp_buffer_addr == NULL) + return -ENOMEM; + + op_params->mb_pack->operation.mb_buffer[index] = tmp_buffer_addr; + op_params->mb_pack->operation.params[index].memref.buffer = + (unsigned int)mailbox_virt_to_phys((uintptr_t)tmp_buffer_addr); + op_params->mb_pack->operation.buffer_h_addr[index] = + (unsigned int)((uint64_t)mailbox_virt_to_phys((uintptr_t)tmp_buffer_addr) >> ADDR_TRANS_NUM); + } + return 0; +} + +/* + * MEMREF_PARTIAL buffers are already allocated so we just + * need to search for the shared_mem ref; + * For interface compatibility we assume buffer size to be 32bits + */ +static int alloc_for_ref_mem(const struct tc_call_params *call_params, + struct tc_op_params *op_params, uint8_t kernel_params, + uint32_t param_type, unsigned int index) +{ + union tc_ns_client_param *client_param = NULL; + struct tc_ns_shared_mem *shared_mem = NULL; + uint32_t buffer_size = 0; + void *buffer_addr = NULL; + int ret = 0; + + /* this never happens */ + if (index >= TEE_PARAM_NUM) + return -EINVAL; + + client_param = &(call_params->context->params[index]); + if (check_buffer_for_ref(&buffer_size, client_param, kernel_params) != 0) + return -EINVAL; + + op_params->mb_pack->operation.params[index].memref.buffer = 0; + + mutex_lock(&call_params->dev->shared_mem_lock); + list_for_each_entry(shared_mem, + &call_params->dev->shared_mem_list, head) { + buffer_addr = (void *)(uintptr_t)(client_param->memref.buffer | + ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM)); + if (shared_mem->user_addr != buffer_addr) + continue; + if (!is_refmem_offset_valid(shared_mem, client_param, + buffer_size)) { + break; + } + buffer_addr = (void *)(uintptr_t)( + (uintptr_t)shared_mem->kernel_addr + + client_param->memref.offset); + + ret = set_operation_buffer(shared_mem, buffer_addr, buffer_size, index, op_params); + if (ret != 0) { + tloge("set operation buffer failed\n"); + break; + } + op_params->mb_pack->operation.sharemem[index] = shared_mem; + get_sharemem_struct(shared_mem); + break; + } + mutex_unlock(&call_params->dev->shared_mem_lock); + if (ret != 0) + return ret; + + if (!is_phyaddr_valid(&op_params->mb_pack->operation, index)) + return -EINVAL; + + op_params->mb_pack->operation.params[index].memref.size = buffer_size; + /* Change TEEC_MEMREF_PARTIAL_XXXXX to TEE_PARAM_TYPE_MEMREF_XXXXX */ + op_params->trans_paramtype[index] = param_type - + (TEEC_MEMREF_PARTIAL_INPUT - TEE_PARAM_TYPE_MEMREF_INPUT); + + if (shared_mem->mem_type == RESERVED_TYPE) + op_params->trans_paramtype[index] += + (TEE_PARAM_TYPE_RESMEM_INPUT - TEE_PARAM_TYPE_MEMREF_INPUT); + return ret; +} + +#ifdef CONFIG_NOCOPY_SHAREDMEM +static int fill_shared_mem_info(void *start_vaddr, uint32_t pages_no, uint32_t offset, uint32_t buffer_size, void *buff) +{ + struct pagelist_info *page_info = NULL; + struct page **pages = NULL; + uint64_t *phys_addr = NULL; + uint32_t page_num; + uint32_t i; + if (pages_no == 0) + return -EFAULT; + pages = (struct page **)vmalloc(pages_no * sizeof(uint64_t)); + if (pages == NULL) + return -EFAULT; + down_read(&mm_sem_lock(current->mm)); + page_num = get_user_pages((uintptr_t)start_vaddr, pages_no, FOLL_WRITE, pages, NULL); + up_read(&mm_sem_lock(current->mm)); + if (page_num != pages_no) { + tloge("get page phy addr failed\n"); + if (page_num > 0) + release_pages(pages, page_num); + vfree(pages); + return -EFAULT; + } + page_info = buff; + page_info->page_num = pages_no; + page_info->page_size = PAGE_SIZE; + page_info->sharedmem_offset = offset; + page_info->sharedmem_size = buffer_size; + phys_addr = (uint64_t *)buff + (sizeof(*page_info) / sizeof(uint64_t)); + for (i = 0; i < pages_no; i++) { + struct page *page = pages[i]; + if (page == NULL) { + release_pages(pages, page_num); + vfree(pages); + return -EFAULT; + } + phys_addr[i] = (uintptr_t)page_to_phys(page); + } + vfree(pages); + return 0; +} + +static int check_buffer_for_sharedmem(uint32_t *buffer_size, + const union tc_ns_client_param *client_param, uint8_t kernel_params) +{ + uint64_t size_addr = client_param->memref.size_addr | + ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM); + uint64_t buffer_addr = client_param->memref.buffer | + ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM); + if (read_from_client(buffer_size, sizeof(*buffer_size), + (uint32_t __user *)(uintptr_t)size_addr, + sizeof(uint32_t), kernel_params)) { + tloge("copy size_addr failed\n"); + return -EFAULT; + } + + if (*buffer_size == 0 || *buffer_size > SZ_256M) { + tloge("invalid buffer size\n"); + return -ENOMEM; + } + + if ((client_param->memref.offset >= SZ_256M) || + (UINT64_MAX - buffer_addr <= client_param->memref.offset)) { + tloge("invalid buff or offset\n"); + return -EFAULT; + } + return 0; +} + +static int transfer_shared_mem(const struct tc_call_params *call_params, + struct tc_op_params *op_params, uint8_t kernel_params, + uint32_t param_type, unsigned int index) +{ + void *buff = NULL; + void *start_vaddr = NULL; + union tc_ns_client_param *client_param = NULL; + uint32_t buffer_size; + uint32_t pages_no; + uint32_t offset; + uint32_t buff_len; + uint64_t buffer_addr; + + if (index >= TEE_PARAM_NUM) + return -EINVAL; + + client_param = &(call_params->context->params[index]); + if (check_buffer_for_sharedmem(&buffer_size, client_param, kernel_params)) + return -EINVAL; + + buffer_addr = client_param->memref.buffer | + ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM); + buff = (void *)(uint64_t)(buffer_addr + client_param->memref.offset); + start_vaddr = (void *)(((uint64_t)buff) & PAGE_MASK); + offset = ((uint32_t)(uintptr_t)buff) & (~PAGE_MASK); + pages_no = PAGE_ALIGN(offset + buffer_size) / PAGE_SIZE; + + buff_len = sizeof(struct pagelist_info) + (sizeof(uint64_t) * pages_no); + buff = mailbox_alloc(buff_len, MB_FLAG_ZERO); + if (buff == NULL) + return -EFAULT; + + if (fill_shared_mem_info(start_vaddr, pages_no, offset, buffer_size, buff)) { + mailbox_free(buff); + return -EFAULT; + } + + op_params->local_tmpbuf[index].temp_buffer = buff; + op_params->local_tmpbuf[index].size = buff_len; + + op_params->mb_pack->operation.params[index].memref.buffer = mailbox_virt_to_phys((uintptr_t)buff); + op_params->mb_pack->operation.buffer_h_addr[index] = (uint64_t)mailbox_virt_to_phys((uintptr_t)buff) >> ADDR_TRANS_NUM; + op_params->mb_pack->operation.params[index].memref.size = buff_len; + op_params->trans_paramtype[index] = param_type; + return 0; +} +#else +static int transfer_shared_mem(const struct tc_call_params *call_params, + const struct tc_op_params *op_params, uint8_t kernel_params, + uint32_t param_type, unsigned int index) +{ + (void)call_params; + (void)op_params; + (void)kernel_params; + (void)param_type; + (void)index; + tloge("invalid shared mem type\n"); + return -1; +} +#endif + +static int transfer_client_value(const struct tc_call_params *call_params, + struct tc_op_params *op_params, uint8_t kernel_params, + uint32_t param_type, unsigned int index) +{ + struct tc_ns_operation *operation = &op_params->mb_pack->operation; + union tc_ns_client_param *client_param = NULL; + uint64_t a_addr, b_addr; + + /* this never happens */ + if (index >= TEE_PARAM_NUM) + return -EINVAL; + + client_param = &(call_params->context->params[index]); + a_addr = client_param->value.a_addr | + ((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM); + b_addr = client_param->value.b_addr | + ((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM); + + if (read_from_client(&operation->params[index].value.a, + sizeof(operation->params[index].value.a), + (void *)(uintptr_t)a_addr, + sizeof(operation->params[index].value.a), + kernel_params) != 0) { + tloge("copy valuea failed\n"); + return -EFAULT; + } + if (read_from_client(&operation->params[index].value.b, + sizeof(operation->params[index].value.b), + (void *)(uintptr_t)b_addr, + sizeof(operation->params[index].value.b), + kernel_params) != 0) { + tloge("copy valueb failed\n"); + return -EFAULT; + } + + /* TEEC_VALUE_INPUT equal to TEE_PARAM_TYPE_VALUE_INPUT */ + op_params->trans_paramtype[index] = param_type; + return 0; +} + +static int alloc_operation(const struct tc_call_params *call_params, + struct tc_op_params *op_params) +{ + int ret = 0; + uint32_t index; + uint8_t kernel_params; + uint32_t param_type; + + kernel_params = call_params->dev->kernel_api; + for (index = 0; index < TEE_PARAM_NUM; index++) { + /* + * Normally kernel_params = kernel_api + * But when TC_CALL_LOGIN, params 2/3 will + * be filled by kernel. so under this circumstance, + * params 2/3 has to be set to kernel mode; and + * param 0/1 will keep the same with kernel_api. + */ + if ((call_params->flags & TC_CALL_LOGIN) && (index >= 2)) + kernel_params = TEE_REQ_FROM_KERNEL_MODE; + param_type = teec_param_type_get( + call_params->context->param_types, index); + + tlogd("param %u type is %x\n", index, param_type); + if (teec_tmpmem_type(param_type, INOUT)) + ret = alloc_for_tmp_mem(call_params, op_params, + kernel_params, param_type, index); + else if (teec_memref_type(param_type, INOUT)) + ret = alloc_for_ref_mem(call_params, op_params, + kernel_params, param_type, index); + else if (teec_value_type(param_type, INOUT)) + ret = transfer_client_value(call_params, op_params, + kernel_params, param_type, index); + else if (param_type == TEEC_ION_INPUT) + ret = alloc_for_ion(call_params, op_params, + kernel_params, param_type, index); + else if (param_type == TEEC_ION_SGLIST_INPUT) + ret = alloc_for_ion_sglist(call_params, op_params, + kernel_params, param_type, index); + else if (param_type == TEEC_MEMREF_SHARED_INOUT) + ret = transfer_shared_mem(call_params, op_params, + kernel_params, param_type, index); + else + tlogd("param type = TEEC_NONE\n"); + + if (ret != 0) + break; + } + if (ret != 0) { + free_operation(call_params, op_params); + return ret; + } + op_params->mb_pack->operation.paramtypes = + teec_param_types(op_params->trans_paramtype[0], + op_params->trans_paramtype[1], + op_params->trans_paramtype[2], + op_params->trans_paramtype[3]); + op_params->op_inited = true; + + return ret; +} + +static int update_tmp_mem(const struct tc_call_params *call_params, + const struct tc_op_params *op_params, unsigned int index, bool is_complete) +{ + union tc_ns_client_param *client_param = NULL; + uint32_t buffer_size; + struct tc_ns_operation *operation = &op_params->mb_pack->operation; + uint64_t size_addr, buffer_addr; + + if (index >= TEE_PARAM_NUM) { + tloge("tmp buf size or index is invalid\n"); + return -EFAULT; + } + + buffer_size = operation->params[index].memref.size; + client_param = &(call_params->context->params[index]); + size_addr = client_param->memref.size_addr | + ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM); + buffer_addr = client_param->memref.buffer | + ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM); + /* Size is updated all the time */ + if (write_to_client((void *)(uintptr_t)size_addr, + sizeof(buffer_size), + &buffer_size, sizeof(buffer_size), + call_params->dev->kernel_api) != 0) { + tloge("copy tempbuf size failed\n"); + return -EFAULT; + } + if (buffer_size > op_params->local_tmpbuf[index].size) { + /* incomplete case, when the buffer size is invalid see next param */ + if (!is_complete) + return 0; + /* + * complete case, operation is allocated from mailbox + * and share with gtask, so it's possible to be changed + */ + tloge("memref.size has been changed larger than the initial\n"); + return -EFAULT; + } + if (buffer_size == 0) + return 0; + /* Only update the buffer when the buffer size is valid in complete case */ + if (write_to_client((void *)(uintptr_t)buffer_addr, + operation->params[index].memref.size, + op_params->local_tmpbuf[index].temp_buffer, + operation->params[index].memref.size, + call_params->dev->kernel_api) != 0) { + tloge("copy tempbuf failed\n"); + return -ENOMEM; + } + return 0; +} + +static int update_for_ref_mem(const struct tc_call_params *call_params, + const struct tc_op_params *op_params, unsigned int index) +{ + union tc_ns_client_param *client_param = NULL; + uint32_t buffer_size; + unsigned int orig_size = 0; + struct tc_ns_operation *operation = &op_params->mb_pack->operation; + uint64_t size_addr; + + if (index >= TEE_PARAM_NUM) { + tloge("index is invalid\n"); + return -EFAULT; + } + + /* update size */ + buffer_size = operation->params[index].memref.size; + client_param = &(call_params->context->params[index]); + size_addr = client_param->memref.size_addr | + ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM); + + if (read_from_client(&orig_size, + sizeof(orig_size), + (uint32_t __user *)(uintptr_t)size_addr, + sizeof(orig_size), call_params->dev->kernel_api) != 0) { + tloge("copy orig memref.size_addr failed\n"); + return -EFAULT; + } + + if (write_to_client((void *)(uintptr_t)size_addr, + sizeof(buffer_size), + &buffer_size, sizeof(buffer_size), + call_params->dev->kernel_api) != 0) { + tloge("copy buf size failed\n"); + return -EFAULT; + } + + /* reserved memory no need to copy */ + if (operation->sharemem[index]->mem_type == RESERVED_TYPE) + return 0; + /* copy from mb_buffer to sharemem */ + if (operation->mb_buffer[index] && orig_size >= buffer_size) { + void *buffer_addr = + (void *)(uintptr_t)((uintptr_t) + operation->sharemem[index]->kernel_addr + + client_param->memref.offset); + if (memcpy_s(buffer_addr, + operation->sharemem[index]->len - + client_param->memref.offset, + operation->mb_buffer[index], buffer_size) != 0) { + tloge("copy to sharemem failed\n"); + return -EFAULT; + } + } + return 0; +} + +static int update_for_value(const struct tc_call_params *call_params, + const struct tc_op_params *op_params, unsigned int index) +{ + union tc_ns_client_param *client_param = NULL; + struct tc_ns_operation *operation = &op_params->mb_pack->operation; + uint64_t a_addr, b_addr; + + if (index >= TEE_PARAM_NUM) { + tloge("index is invalid\n"); + return -EFAULT; + } + client_param = &(call_params->context->params[index]); + a_addr = client_param->value.a_addr | + ((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM); + b_addr = client_param->value.b_addr | + ((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM); + + if (write_to_client((void *)(uintptr_t)a_addr, + sizeof(operation->params[index].value.a), + &operation->params[index].value.a, + sizeof(operation->params[index].value.a), + call_params->dev->kernel_api) != 0) { + tloge("inc copy value.a_addr failed\n"); + return -EFAULT; + } + if (write_to_client((void *)(uintptr_t)b_addr, + sizeof(operation->params[index].value.b), + &operation->params[index].value.b, + sizeof(operation->params[index].value.b), + call_params->dev->kernel_api) != 0) { + tloge("inc copy value.b_addr failed\n"); + return -EFAULT; + } + return 0; +} + +static int update_client_operation(const struct tc_call_params *call_params, + const struct tc_op_params *op_params, bool is_complete) +{ + int ret = 0; + uint32_t param_type; + uint32_t index; + + if (!op_params->op_inited) + return 0; + + /* if paramTypes is NULL, no need to update */ + if (call_params->context->param_types == 0) + return 0; + + for (index = 0; index < TEE_PARAM_NUM; index++) { + param_type = teec_param_type_get( + call_params->context->param_types, index); + if (teec_tmpmem_type(param_type, OUTPUT)) + ret = update_tmp_mem(call_params, op_params, + index, is_complete); + else if (teec_memref_type(param_type, OUTPUT)) + ret = update_for_ref_mem(call_params, + op_params, index); + else if (is_complete && teec_value_type(param_type, OUTPUT)) + ret = update_for_value(call_params, op_params, index); + else + tlogd("param_type:%u don't need to update\n", param_type); + if (ret != 0) + break; + } + return ret; +} + +#ifdef CONFIG_NOCOPY_SHAREDMEM +static void release_page(void *buf) +{ + uint32_t i; + uint64_t *phys_addr = NULL; + struct pagelist_info *page_info = NULL; + struct page *page = NULL; + + page_info = buf; + phys_addr = (uint64_t *)buf + (sizeof(*page_info) / sizeof(uint64_t)); + for (i = 0; i < page_info->page_num; i++) { + page = (struct page *)(uintptr_t)phys_to_page(phys_addr[i]); + if (page == NULL) + continue; + set_bit(PG_dirty, &page->flags); + put_page(page); + } +} +#endif +static void free_operation(const struct tc_call_params *call_params, struct tc_op_params *op_params) +{ + uint32_t param_type; + uint32_t index; + void *temp_buf = NULL; + struct tc_ns_temp_buf *local_tmpbuf = op_params->local_tmpbuf; + struct tc_ns_operation *operation = &op_params->mb_pack->operation; + + for (index = 0; index < TEE_PARAM_NUM; index++) { + param_type = teec_param_type_get(call_params->context->param_types, index); + if (is_tmp_mem(param_type)) { + /* free temp buffer */ + temp_buf = local_tmpbuf[index].temp_buffer; + tlogd("free temp buf, i = %u\n", index); +#if (!defined(CONFIG_LIBLINUX)) && (!defined(CONFIG_SHARED_MEM_RESERVED)) + /* if temp_buf from iomap instead of page_alloc, virt_addr_valid will return false */ + if (!virt_addr_valid((unsigned long)(uintptr_t)temp_buf)) + continue; +#endif + if (!ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)temp_buf)) { + mailbox_free(temp_buf); + temp_buf = NULL; + } + } else if (is_ref_mem(param_type)) { + struct tc_ns_shared_mem *shm = operation->sharemem[index]; + if (shm != NULL && shm->mem_type == RESERVED_TYPE) { + put_sharemem_struct(operation->sharemem[index]); + continue; + } + put_sharemem_struct(operation->sharemem[index]); + if (operation->mb_buffer[index]) + mailbox_free(operation->mb_buffer[index]); + } else if (param_type == TEEC_ION_SGLIST_INPUT) { + temp_buf = local_tmpbuf[index].temp_buffer; + tlogd("free ion sglist buf, i = %u\n", index); +#if (!defined(CONFIG_LIBLINUX)) && (!defined(CONFIG_SHARED_MEM_RESERVED)) + /* if temp_buf from iomap instead of page_alloc, virt_addr_valid will return false */ + if (!virt_addr_valid((uint64_t)(uintptr_t)temp_buf)) + continue; +#endif + if (!ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)temp_buf)) { + mailbox_free(temp_buf); + temp_buf = NULL; + } + } else if (param_type == TEEC_MEMREF_SHARED_INOUT) { +#ifdef CONFIG_NOCOPY_SHAREDMEM + temp_buf = local_tmpbuf[index].temp_buffer; + if (temp_buf != NULL) { + release_page(temp_buf); + mailbox_free(temp_buf); + } +#endif + } + } +} + +static bool is_clicall_params_vaild(const struct tc_call_params *call_params) +{ + if (!call_params) { + tloge("call param is null"); + return false; + } + + if (!call_params->dev) { + tloge("dev file is null"); + return false; + } + + if (!call_params->context) { + tloge("client context is null"); + return false; + } + + return true; +} + +static int alloc_for_client_call(struct tc_op_params *op_params) +{ + op_params->smc_cmd = kzalloc(sizeof(*(op_params->smc_cmd)), + GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)(op_params->smc_cmd))) { + tloge("smc cmd malloc failed\n"); + return -ENOMEM; + } + + op_params->mb_pack = mailbox_alloc_cmd_pack(); + if (!op_params->mb_pack) { + kfree(op_params->smc_cmd); + op_params->smc_cmd = NULL; + return -ENOMEM; + } + + return 0; +} + +static int init_smc_cmd(const struct tc_call_params *call_params, + struct tc_op_params *op_params) +{ + struct tc_ns_smc_cmd *smc_cmd = op_params->smc_cmd; + struct tc_ns_client_context *context = call_params->context; + struct tc_ns_operation *operation = &op_params->mb_pack->operation; + bool global = call_params->flags & TC_CALL_GLOBAL; + + smc_cmd->cmd_type = global ? CMD_TYPE_GLOBAL : CMD_TYPE_TA; + if (memcpy_s(smc_cmd->uuid, sizeof(smc_cmd->uuid), + context->uuid, UUID_LEN) != 0) { + tloge("memcpy uuid error\n"); + return -EFAULT; + } + smc_cmd->cmd_id = context->cmd_id; + smc_cmd->dev_file_id = call_params->dev->dev_file_id; + smc_cmd->context_id = context->session_id; + smc_cmd->err_origin = context->returns.origin; + smc_cmd->started = context->started; + smc_cmd->ca_pid = current->pid; + smc_cmd->pid = current->tgid; + + tlogv("current uid is %u\n", smc_cmd->uid); + if (context->param_types != 0) { + smc_cmd->operation_phys = + mailbox_virt_to_phys((uintptr_t)operation); + smc_cmd->operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)operation) >> ADDR_TRANS_NUM; + } else { + smc_cmd->operation_phys = 0; + smc_cmd->operation_h_phys = 0; + } + smc_cmd->login_method = context->login.method; + + /* if smc from kernel CA, set login_method to TEEK_LOGIN_IDENTIFY */ + if (call_params->dev->kernel_api == TEE_REQ_FROM_KERNEL_MODE) + smc_cmd->login_method = TEEK_LOGIN_IDENTIFY; + + return 0; +} + +static bool need_check_login(const struct tc_call_params *call_params, + const struct tc_op_params *op_params) +{ + if (call_params->dev->pub_key_len == sizeof(uint32_t) && + op_params->smc_cmd->cmd_id == GLOBAL_CMD_ID_OPEN_SESSION && + current->mm && ((call_params->flags & TC_CALL_GLOBAL) != 0)) + return true; + + return false; +} + +static int check_login_for_encrypt(const struct tc_call_params *call_params, + struct tc_op_params *op_params) +{ + struct tc_ns_session *sess = call_params->sess; + struct tc_ns_smc_cmd *smc_cmd = op_params->smc_cmd; + struct mb_cmd_pack *mb_pack = op_params->mb_pack; + + if (need_check_login(call_params, op_params) && sess) { + if (memcpy_s(mb_pack->login_data, sizeof(mb_pack->login_data), + sess->auth_hash_buf, + sizeof(sess->auth_hash_buf)) != 0) { + tloge("copy login data failed\n"); + return -EFAULT; + } + smc_cmd->login_data_phy = mailbox_virt_to_phys((uintptr_t)mb_pack->login_data); + smc_cmd->login_data_h_addr = + (uint64_t)mailbox_virt_to_phys((uintptr_t)mb_pack->login_data) >> ADDR_TRANS_NUM; + smc_cmd->login_data_len = MAX_SHA_256_SZ * (NUM_OF_SO + 1); + } else { + smc_cmd->login_data_phy = 0; + smc_cmd->login_data_h_addr = 0; + smc_cmd->login_data_len = 0; + } + return 0; +} + +static uint32_t get_uid_for_cmd(void) +{ + kuid_t kuid; + + kuid = current_uid(); + return kuid.val; +} + +static void reset_session_id(const struct tc_call_params *call_params, + const struct tc_op_params *op_params, int tee_ret) +{ + bool need_reset = false; + + call_params->context->session_id = op_params->smc_cmd->context_id; + /* + * if tee_ret error except TEEC_PENDING, + * but context_id is seted,need to reset to 0 + */ + need_reset = ((call_params->flags & TC_CALL_GLOBAL) && + call_params->context->cmd_id == GLOBAL_CMD_ID_OPEN_SESSION && + tee_ret && tee_ret != (int)TEEC_PENDING); + if (need_reset) + call_params->context->session_id = 0; + return; +} + +static void pend_ca_thread(struct tc_ns_session *session, + const struct tc_ns_smc_cmd *smc_cmd) +{ + struct tc_wait_data *wq = NULL; + + if (session) + wq = &session->wait_data; + + if (wq) { + tlogv("before wait event\n"); + /* + * use wait_event instead of wait_event_interruptible so + * that ap suspend will not wake up the TEE wait call + */ + wait_event(wq->send_cmd_wq, wq->send_wait_flag != 0); + wq->send_wait_flag = 0; + } + tlogv("operation start is :%d\n", smc_cmd->started); + return; +} + + +static void release_tc_call_resource(const struct tc_call_params *call_params, + struct tc_op_params *op_params, int tee_ret) +{ + /* kfree(NULL) is safe and this check is probably not required */ + call_params->context->returns.code = tee_ret; + call_params->context->returns.origin = op_params->smc_cmd->err_origin; + + /* + * 1. when CA invoke command and crash, Gtask release service node + * then del ion won't be triggered, so here tzdriver need to kill ion; + * 2. when ta crash, tzdriver also need to kill ion; + */ + if (tee_ret == (int)TEE_ERROR_TAGET_DEAD || tee_ret == (int)TEEC_ERROR_GENERIC) + kill_ion_by_uuid((struct tc_uuid *)op_params->smc_cmd->uuid); + + if (op_params->op_inited) + free_operation(call_params, op_params); + + kfree(op_params->smc_cmd); + mailbox_free(op_params->mb_pack); +} + +static int config_smc_cmd_context(const struct tc_call_params *call_params, + struct tc_op_params *op_params) +{ + int ret; + + ret = init_smc_cmd(call_params, op_params); + if (ret != 0) + return ret; + + ret = check_login_for_encrypt(call_params, op_params); + + return ret; +} + +static int handle_ta_pending(const struct tc_call_params *call_params, + struct tc_op_params *op_params, int *tee_ret) +{ + if (*tee_ret != (int)TEEC_PENDING) + return 0; + + while (*tee_ret == (int)TEEC_PENDING) { + pend_ca_thread(call_params->sess, op_params->smc_cmd); + *tee_ret = tc_ns_smc_with_no_nr(op_params->smc_cmd); + } + + return 0; +} + +static int post_proc_smc_return(const struct tc_call_params *call_params, + struct tc_op_params *op_params, int tee_ret) +{ + int ret; + + if (tee_ret != 0) { + tloge("smc call ret 0x%x, cmd ret val 0x%x, origin %u\n", tee_ret, + op_params->smc_cmd->ret_val, op_params->smc_cmd->err_origin); + /* same as libteec_vendor, err from TEE, set ret positive */ + ret = EFAULT; + if (tee_ret == (int)TEEC_CLIENT_INTR) + ret = -ERESTARTSYS; + + if (tee_ret == (int)TEEC_ERROR_SHORT_BUFFER) + (void)update_client_operation(call_params, op_params, false); + } else { + tz_log_write(); + ret = update_client_operation(call_params, op_params, true); + } + + return ret; +} + +int tc_client_call(const struct tc_call_params *call_params) +{ + int ret; + int tee_ret = 0; + struct tc_op_params op_params = { NULL, NULL, {{0}}, {0}, false }; + + if (!is_clicall_params_vaild(call_params)) + return -EINVAL; + + if (alloc_for_client_call(&op_params) != 0) + return -ENOMEM; + + op_params.smc_cmd->err_origin = TEEC_ORIGIN_COMMS; + op_params.smc_cmd->uid = get_uid_for_cmd(); + if (call_params->context->param_types != 0) { + ret = alloc_operation(call_params, &op_params); + if (ret != 0) + goto free_src; + } + + ret = config_smc_cmd_context(call_params, &op_params); + if (ret != 0) + goto free_src; + + tee_ret = tc_ns_smc(op_params.smc_cmd); + + reset_session_id(call_params, &op_params, tee_ret); + + ret = handle_ta_pending(call_params, &op_params, &tee_ret); + if (ret != 0) + goto free_src; + + ret = post_proc_smc_return(call_params, &op_params, tee_ret); + +free_src: + if (ret < 0) /* if ret > 0, means err from TEE */ + op_params.smc_cmd->err_origin = TEEC_ORIGIN_COMMS; + release_tc_call_resource(call_params, &op_params, tee_ret); + return ret; +} diff --git a/tzdriver/core/gp_ops.h b/tzdriver/core/gp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..32dab319b5a928b701d893316601cef37b24905f --- /dev/null +++ b/tzdriver/core/gp_ops.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function declaration for alloc global operation and pass params to TEE. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef GP_OPS_H +#define GP_OPS_H +#include "tc_ns_client.h" +#include "teek_ns_client.h" + +struct pagelist_info { + uint64_t page_num; + uint64_t page_size; + uint64_t sharedmem_offset; + uint64_t sharedmem_size; +}; + +int write_to_client(void __user *dest, size_t dest_size, + const void *src, size_t size, uint8_t kernel_api); +int read_from_client(void *dest, size_t dest_size, + const void __user *src, size_t size, uint8_t kernel_api); +bool tc_user_param_valid(struct tc_ns_client_context *client_context, + unsigned int index); +int tc_client_call(const struct tc_call_params *call_params); +bool is_tmp_mem(uint32_t param_type); +bool is_ref_mem(uint32_t param_type); +bool is_val_param(uint32_t param_type); + +#endif diff --git a/tzdriver/core/mailbox_mempool.c b/tzdriver/core/mailbox_mempool.c new file mode 100644 index 0000000000000000000000000000000000000000..c01c04aae7c671248023426ad8625d0092fd92af --- /dev/null +++ b/tzdriver/core/mailbox_mempool.c @@ -0,0 +1,644 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: mailbox memory managing for sharing memory with TEE. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "mailbox_mempool.h" +#include "shared_mem.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE) +#include +#endif +#include "teek_client_constants.h" +#include "tc_ns_log.h" +#include "smc_smp.h" +#include "ko_adapt.h" +#include "internal_functions.h" + +#define MAILBOX_PAGE_MAX (MAILBOX_POOL_SIZE >> PAGE_SHIFT) +static int g_max_oder; + +#define OPT_MODE 0660U +#define STATE_MODE 0440U + +struct mb_page_t { + struct list_head node; + mailbox_page_t *page; + int order; + unsigned int count; /* whether be used */ +}; + +struct mb_free_area_t { + struct list_head page_list; + int order; +}; + +struct mb_zone_t { + mailbox_page_t *all_pages; + struct mb_page_t pages[MAILBOX_PAGE_MAX]; + struct mb_free_area_t free_areas[0]; +}; + +static struct mb_zone_t *g_m_zone; +static struct mutex g_mb_lock; + +static void mailbox_show_status(void) +{ + unsigned int i; + struct mb_page_t *pos = NULL; + struct list_head *head = NULL; + unsigned int used = 0; + + if (!g_m_zone) { + tloge("zone struct is NULL\n"); + return; + } + + tloge("########################################\n"); + mutex_lock(&g_mb_lock); + for (i = 0; i < MAILBOX_PAGE_MAX; i++) { + if (g_m_zone->pages[i].count != 0) { + tloge("page[%02d], order=%02d, count=%d\n", i, g_m_zone->pages[i].order, g_m_zone->pages[i].count); + used += (1 << (uint32_t)g_m_zone->pages[i].order); + } + } + tloge("total usage:%u/%u\n", used, MAILBOX_PAGE_MAX); + tloge("----------------------------------------\n"); + + for (i = 0; i < (unsigned int)g_max_oder; i++) { + head = &g_m_zone->free_areas[i].page_list; + if (list_empty(head) != 0) { + tloge("order[%02d] is empty\n", i); + } else { + list_for_each_entry(pos, head, node) + tloge("order[%02d]\n", i); + } + } + mutex_unlock(&g_mb_lock); + + tloge("########################################\n"); +} + +#define MB_SHOW_LINE 64 +#define BITS_OF_BYTE 8 +static void mailbox_show_details(void) +{ + unsigned int i; + unsigned int used = 0; + unsigned int left = 0; + unsigned int order = 0; + + if (!g_m_zone) { + tloge("zone struct is NULL\n"); + return; + } + tloge("----- show mailbox details -----"); + mutex_lock(&g_mb_lock); + for (i = 0; i < MAILBOX_PAGE_MAX; i++) { + if (i % MB_SHOW_LINE == 0) { + tloge("\n"); + tloge("%04d-%04d:", i, i + MB_SHOW_LINE); + } + if (g_m_zone->pages[i].count != 0) { + left = 1 << (uint32_t)g_m_zone->pages[i].order; + order = (uint32_t)g_m_zone->pages[i].order; + used += (1 << (uint32_t)g_m_zone->pages[i].order); + } + if (left != 0) { + left--; + tloge("%01d", order); + } else { + tloge("X"); + } + if (i > 1 && (i + 1) % (MB_SHOW_LINE / BITS_OF_BYTE) == 0) + tloge(" "); + } + tloge("total usage:%u/%u\n", used, MAILBOX_PAGE_MAX); + mutex_unlock(&g_mb_lock); +} + +void *mailbox_alloc(size_t size, unsigned int flag) +{ + unsigned int i; + struct mb_page_t *pos = (struct mb_page_t *)NULL; + struct list_head *head = NULL; + int order = get_order(ALIGN(size, SZ_4K)); + void *addr = NULL; + + if ((size == 0) || !g_m_zone) { + tlogw("alloc 0 size mailbox or zone struct is NULL\n"); + return NULL; + } + + if (order > g_max_oder || order < 0) { + tloge("invalid order %d\n", order); + return NULL; + } + mutex_lock(&g_mb_lock); + + for (i = (unsigned int)order; i <= (unsigned int)g_max_oder; i++) { + unsigned int j; + head = &g_m_zone->free_areas[i].page_list; + if (list_empty(head) != 0) + continue; + pos = list_first_entry(head, struct mb_page_t, node); + pos->count = 1; + pos->order = order; + /* split and add free list */ + for (j = (unsigned int)order; j < i; j++) { + struct mb_page_t *new_page = NULL; + new_page = pos + (1 << j); + new_page->count = 0; + new_page->order = (int)j; + list_add_tail(&new_page->node, &g_m_zone->free_areas[j].page_list); + } + list_del(&pos->node); + addr = (void *)mailbox_page_address(pos->page); + break; + } + + mutex_unlock(&g_mb_lock); + if (addr && ((flag & MB_FLAG_ZERO) != 0)) { + if (memset_s(addr, ALIGN(size, SZ_4K), 0, ALIGN(size, SZ_4K)) != 0) { + tloge("clean mailbox failed\n"); + mailbox_free(addr); + return NULL; + } + } + return addr; +} + +static void add_max_order_block(unsigned int idex) +{ + struct mb_page_t *self = NULL; + + if (idex != (unsigned int)g_max_oder || !g_m_zone) + return; + + /* + * when idex equal max order, no one use mailbox mem, + * we need to hang all pages in the last free area page list + */ + self = &g_m_zone->pages[0]; + list_add_tail(&self->node, + &g_m_zone->free_areas[g_max_oder].page_list); +} + +static bool is_ptr_valid(const mailbox_page_t *page) +{ + if (!g_m_zone) + return false; + + if (page < g_m_zone->all_pages || + page >= (g_m_zone->all_pages + MAILBOX_PAGE_MAX)) { + tloge("invalid ptr to free in mailbox\n"); + return false; + } + return true; +} + +void mailbox_free(const void *ptr) +{ + unsigned int i; + mailbox_page_t *page = NULL; + struct mb_page_t *self = NULL; + struct mb_page_t *buddy = NULL; + unsigned int self_idx; + unsigned int buddy_idx; + + if (!ptr || !g_m_zone) { + tloge("invalid ptr or zone struct is NULL\n"); + return; + } + + page = mailbox_virt_to_page((uint64_t)(uintptr_t)ptr); + if (!is_ptr_valid(page)) + return; + mutex_lock(&g_mb_lock); + self_idx = page - g_m_zone->all_pages; + self = &g_m_zone->pages[self_idx]; + if (self->count == 0) { + tloge("already freed in mailbox\n"); + mutex_unlock(&g_mb_lock); + return; + } + + for (i = (unsigned int)self->order; i < + (unsigned int)g_max_oder; i++) { + self_idx = page - g_m_zone->all_pages; + buddy_idx = self_idx ^ (uint32_t)(1 << i); + self = &g_m_zone->pages[self_idx]; + buddy = &g_m_zone->pages[buddy_idx]; + self->count = 0; + /* is buddy free */ + if ((unsigned int)buddy->order == i && buddy->count == 0) { + /* release buddy */ + list_del(&buddy->node); + /* combine self and buddy */ + if (self_idx > buddy_idx) { + page = buddy->page; + buddy->order = (int)i + 1; + self->order = -1; + } else { + self->order = (int)i + 1; + buddy->order = -1; + } + } else { + /* release self */ + list_add_tail(&self->node, + &g_m_zone->free_areas[i].page_list); + mutex_unlock(&g_mb_lock); + return; + } + } + + add_max_order_block(i); + mutex_unlock(&g_mb_lock); +} + +struct mb_cmd_pack *mailbox_alloc_cmd_pack(void) +{ + void *pack = mailbox_alloc(SZ_4K, MB_FLAG_ZERO); + + if (!pack) + tloge("alloc mb cmd pack failed\n"); + + return (struct mb_cmd_pack *)pack; +} + +void *mailbox_copy_alloc(const void *src, size_t size) +{ + void *mb_ptr = NULL; + + if (!src || !size) { + tloge("invali src to alloc mailbox copy\n"); + return NULL; + } + + mb_ptr = mailbox_alloc(size, 0); + if (!mb_ptr) { + tloge("alloc size %zu mailbox failed\n", size); + return NULL; + } + + if (memcpy_s(mb_ptr, size, src, size) != 0) { + tloge("memcpy to mailbox failed\n"); + mailbox_free(mb_ptr); + return NULL; + } + + return mb_ptr; +} + +struct mb_dbg_entry { + struct list_head node; + unsigned int idx; + void *ptr; +}; + +static LIST_HEAD(mb_dbg_list); +static DEFINE_MUTEX(mb_dbg_lock); +static unsigned int g_mb_dbg_entry_count = 1; +static unsigned int g_mb_dbg_last_res; /* only cache 1 opt result */ +static struct dentry *g_mb_dbg_dentry; + +static unsigned int mb_dbg_add_entry(void *ptr) +{ + struct mb_dbg_entry *new_entry = NULL; + unsigned int index = 0; + + new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)new_entry)) { + tloge("alloc entry failed\n"); + return 0; + } + + INIT_LIST_HEAD(&new_entry->node); + new_entry->ptr = ptr; + mutex_lock(&mb_dbg_lock); + new_entry->idx = g_mb_dbg_entry_count; + + if ((g_mb_dbg_entry_count++) == 0) + g_mb_dbg_entry_count++; + list_add_tail(&new_entry->node, &mb_dbg_list); + index = new_entry->idx; + mutex_unlock(&mb_dbg_lock); + + return index; +} + +static void mb_dbg_remove_entry(unsigned int idx) +{ + struct mb_dbg_entry *pos = NULL; + struct mb_dbg_entry *temp = NULL; + + mutex_lock(&mb_dbg_lock); + list_for_each_entry_safe(pos, temp, &mb_dbg_list, node) { + if (pos->idx == idx) { + mailbox_free(pos->ptr); + list_del(&pos->node); + kfree(pos); + mutex_unlock(&mb_dbg_lock); + return; + } + } + mutex_unlock(&mb_dbg_lock); + + tloge("entry %u invalid\n", idx); +} + +static void mb_dbg_reset(void) +{ + struct mb_dbg_entry *pos = NULL; + struct mb_dbg_entry *tmp = NULL; + + mutex_lock(&mb_dbg_lock); + list_for_each_entry_safe(pos, tmp, &mb_dbg_list, node) { + mailbox_free(pos->ptr); + list_del(&pos->node); + kfree(pos); + } + g_mb_dbg_entry_count = 0; + mutex_unlock(&mb_dbg_lock); +} + +#define MB_WRITE_SIZE 64 + +static bool is_opt_write_param_valid(const struct file *filp, + const char __user *ubuf, size_t cnt, const loff_t *ppos) +{ + if (!filp || !ppos || !ubuf) + return false; + + if (cnt >= MB_WRITE_SIZE || cnt == 0) + return false; + + return true; +} + +static void alloc_dbg_entry(unsigned int alloc_size) +{ + unsigned int idx; + void *ptr = NULL; + + ptr = mailbox_alloc(alloc_size, 0); + if (!ptr) { + tloge("alloc order=%u in mailbox failed\n", alloc_size); + return; + } + + idx = mb_dbg_add_entry(ptr); + if (idx == 0) + mailbox_free(ptr); + g_mb_dbg_last_res = idx; +} + +static ssize_t mb_dbg_opt_write(struct file *filp, + const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[MB_WRITE_SIZE] = {0}; + char *cmd = NULL; + char *value = NULL; + unsigned int alloc_size; + unsigned int free_idx; + + if (!is_opt_write_param_valid(filp, ubuf, cnt, ppos)) + return -EINVAL; + + if (copy_from_user(buf, ubuf, cnt) != 0) + return -EFAULT; + + buf[cnt] = 0; + value = buf; + if (strncmp(value, "reset", strlen("reset")) == 0) { + tlogi("mb dbg reset\n"); + mb_dbg_reset(); + return (ssize_t)cnt; + } + + cmd = strsep(&value, ":"); + if (!cmd || !value) { + tloge("no valid cmd or value for mb dbg\n"); + return -EFAULT; + } + + if (strncmp(cmd, "alloc", strlen("alloc")) == 0) { + if (kstrtou32(value, 10, &alloc_size) == 0) + alloc_dbg_entry(alloc_size); + else + tloge("invalid value format for mb dbg\n"); + } else if (strncmp(cmd, "free", strlen("free")) == 0) { + if (kstrtou32(value, 10, &free_idx) == 0) + mb_dbg_remove_entry(free_idx); + else + tloge("invalid value format for mb dbg\n"); + } else { + tloge("invalid format for mb dbg\n"); + } + + return (ssize_t)cnt; +} + +static ssize_t mb_dbg_opt_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[16] = {0}; + ssize_t ret; + + (void)(filp); + + ret = snprintf_s(buf, sizeof(buf), 15, "%u\n", g_mb_dbg_last_res); + if (ret < 0) { + tloge("snprintf idx failed\n"); + return -EINVAL; + } + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, ret); +} + +static const struct file_operations g_mb_dbg_opt_fops = { + .owner = THIS_MODULE, + .read = mb_dbg_opt_read, + .write = mb_dbg_opt_write, +}; + +static ssize_t mb_dbg_state_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + (void)cnt; + (void)(filp); + (void)(ubuf); + (void)(ppos); + mailbox_show_status(); + mailbox_show_details(); + return 0; +} + +static const struct file_operations g_mb_dbg_state_fops = { + .owner = THIS_MODULE, + .read = mb_dbg_state_read, +}; + +static int mailbox_register(const void *mb_pool, unsigned int size) +{ + struct tc_ns_operation *operation = NULL; + struct tc_ns_smc_cmd *smc_cmd = NULL; + int ret = 0; + + smc_cmd = kzalloc(sizeof(*smc_cmd), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)smc_cmd)) { + tloge("alloc smc_cmd failed\n"); + return -EIO; + } + + operation = (struct tc_ns_operation *)(uintptr_t)get_operation_vaddr(); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)operation)) { + tloge("alloc operation failed\n"); + ret = -EIO; + goto free_smc_cmd; + } + + operation->paramtypes = TEE_PARAM_TYPE_VALUE_INPUT | + (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM); + operation->params[0].value.a = mailbox_virt_to_phys((uintptr_t)mb_pool); + operation->params[0].value.b = + (uint64_t)mailbox_virt_to_phys((uintptr_t)mb_pool) >> ADDR_TRANS_NUM; + operation->params[1].value.a = size; + + smc_cmd->cmd_type = CMD_TYPE_GLOBAL; + smc_cmd->cmd_id = GLOBAL_CMD_ID_REGISTER_MAILBOX; + smc_cmd->operation_phys = mailbox_virt_to_phys((uintptr_t)operation); + smc_cmd->operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)operation) >> ADDR_TRANS_NUM; + + if (is_tee_rebooting()) + ret = send_smc_cmd_rebooting(TSP_REQUEST, 0, 0, smc_cmd); + else + ret= tc_ns_smc(smc_cmd); + + if (ret != 0) { + tloge("resigter mailbox failed\n"); + ret = -EIO; + } + + free_operation((uint64_t)(uintptr_t)operation); + operation = NULL; +free_smc_cmd: + kfree(smc_cmd); + smc_cmd = NULL; + return ret; +} + +static void mailbox_debug_init(void) +{ +#ifdef DEF_ENG + g_mb_dbg_dentry = debugfs_create_dir("tz_mailbox", NULL); + debugfs_create_file("opt", OPT_MODE, g_mb_dbg_dentry, NULL, &g_mb_dbg_opt_fops); + debugfs_create_file("state", STATE_MODE, g_mb_dbg_dentry, NULL, &g_mb_dbg_state_fops); +#endif +} + +int re_register_mailbox(void) +{ + if (!g_m_zone) + return -EFAULT; + + if (g_m_zone->all_pages != NULL) { + if (memset_s((void *)mailbox_page_address(g_m_zone->all_pages), + MAILBOX_POOL_SIZE, 0, MAILBOX_POOL_SIZE) != EOK) { + tloge("memset mailbox failed\n"); + return -EFAULT; + } + if (mailbox_register((const void *) mailbox_page_address(g_m_zone->all_pages), MAILBOX_POOL_SIZE) != 0) { + tloge("register mailbox failed\n"); + return -EIO; + } + } + + return 0; +} + +int mailbox_mempool_init(void) +{ + int i; + struct mb_page_t *mb_page = NULL; + struct mb_free_area_t *area = NULL; + mailbox_page_t *all_pages = NULL; + size_t zone_len; + + g_max_oder = get_order(MAILBOX_POOL_SIZE); + tlogi("in this RE, mailbox max order is: %d\n", g_max_oder); + + /* zone len is fixed, will not overflow */ + zone_len = sizeof(*area) * (g_max_oder + 1) + sizeof(*g_m_zone); + g_m_zone = kzalloc(zone_len, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_m_zone)) { + tloge("fail to alloc zone struct\n"); + return -ENOMEM; + } + all_pages = mailbox_alloc_pages(g_max_oder); + if (!all_pages) { + tloge("fail to alloc mailbox mempool\n"); + kfree(g_m_zone); + g_m_zone = NULL; + return -ENOMEM; + } + if (mailbox_register((const void *) mailbox_page_address(all_pages), MAILBOX_POOL_SIZE) != 0) { + tloge("register mailbox failed\n"); + mailbox_free_pages(all_pages, g_max_oder); + kfree(g_m_zone); + g_m_zone = NULL; + return -EIO; + } + for (i = 0; i < MAILBOX_PAGE_MAX; i++) { + g_m_zone->pages[i].order = -1; + g_m_zone->pages[i].count = 0; + g_m_zone->pages[i].page = &all_pages[i]; + } + + g_m_zone->pages[0].order = g_max_oder; + for (i = 0; i <= g_max_oder; i++) { + area = &g_m_zone->free_areas[i]; + INIT_LIST_HEAD(&area->page_list); + area->order = i; + } + + mb_page = &g_m_zone->pages[0]; + list_add_tail(&mb_page->node, &area->page_list); + g_m_zone->all_pages = all_pages; + mutex_init(&g_mb_lock); + mailbox_debug_init(); + + return 0; +} + +void free_mailbox_mempool(void) +{ + mailbox_free_pages(g_m_zone->all_pages, g_max_oder); + g_m_zone->all_pages = NULL; + kfree(g_m_zone); + g_m_zone = NULL; + + if (!g_mb_dbg_dentry) + return; + debugfs_remove_recursive(g_mb_dbg_dentry); + g_mb_dbg_dentry = NULL; +} diff --git a/tzdriver/core/mailbox_mempool.h b/tzdriver/core/mailbox_mempool.h new file mode 100644 index 0000000000000000000000000000000000000000..30df77d553c40e9cc726ebd333ae43219dc0d5ee --- /dev/null +++ b/tzdriver/core/mailbox_mempool.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: mailbox memory managing for sharing memory with TEE. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef MAILBOX_MEMPOOOL_H +#define MAILBOX_MEMPOOOL_H + +#include +#include +#include "teek_ns_client.h" + +#ifndef MAILBOX_POOL_SIZE +#define MAILBOX_POOL_SIZE SZ_4M +#endif + +/* alloc options */ +#define MB_FLAG_ZERO 0x1 /* set 0 after alloc page */ +#define GLOBAL_UUID_LEN 17 /* first char represent global cmd */ + +void *mailbox_alloc(size_t size, unsigned int flag); +void mailbox_free(const void *ptr); +int mailbox_mempool_init(void); +void free_mailbox_mempool(void); +struct mb_cmd_pack *mailbox_alloc_cmd_pack(void); +void *mailbox_copy_alloc(const void *src, size_t size); +int re_register_mailbox(void); +uintptr_t mailbox_virt_to_phys(uintptr_t addr); + +#endif diff --git a/tzdriver/core/mem.c b/tzdriver/core/mem.c new file mode 100644 index 0000000000000000000000000000000000000000..75f49977f9a1e978fee2d1302f2e379adb5070fd --- /dev/null +++ b/tzdriver/core/mem.c @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: memory operation for gp sharedmem. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "mem.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "smc_smp.h" +#include "tc_ns_client.h" +#include "teek_ns_client.h" +#include "agent.h" +#include "tc_ns_log.h" +#include "mailbox_mempool.h" +#include "internal_functions.h" +#include "reserved_mempool.h" + +void tc_mem_free(struct tc_ns_shared_mem *shared_mem) +{ + if (!shared_mem) + return; + if (shared_mem->mem_type == RESERVED_TYPE) { + reserved_mem_free(shared_mem->kernel_addr); + kfree(shared_mem); + return; + } + + if (shared_mem->kernel_addr) { +#ifndef CONFIG_LIBLINUX + vfree(shared_mem->kernel_addr); +#else + kfree(shared_mem->kernel_addr); +#endif + shared_mem->kernel_addr = NULL; + } + kfree(shared_mem); +} + +static void init_shared_mem(struct tc_ns_shared_mem *sh, void *addr, size_t len) +{ + sh->kernel_addr = addr; + sh->len = (uint32_t)len; + sh->user_addr = INVALID_MAP_ADDR; + sh->user_addr_ca = INVALID_MAP_ADDR; + atomic_set(&sh->usage, 0); +} +struct tc_ns_shared_mem *tc_mem_allocate(size_t len) +{ + struct tc_ns_shared_mem *shared_mem = NULL; + void *addr = NULL; + + shared_mem = kmalloc(sizeof(*shared_mem), GFP_KERNEL | __GFP_ZERO); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)shared_mem)) { + tloge("shared_mem kmalloc failed\n"); + return ERR_PTR(-ENOMEM); + } + shared_mem->mem_type = VMALLOC_TYPE; + len = ALIGN(len, SZ_4K); + if (exist_res_mem()) { + if (len > get_res_mem_slice_size()) { + tloge("allocate reserved mem size too large\n"); + kfree(shared_mem); + return ERR_PTR(-EINVAL); + } + addr = reserved_mem_alloc(len); + if (addr) { + shared_mem->mem_type = RESERVED_TYPE; + init_shared_mem(shared_mem, addr, len); + return shared_mem; + } else { + tlogw("no more reserved memory to alloc so we use system vmalloc.\n"); + } + } + if (len > MAILBOX_POOL_SIZE) { + tloge("alloc sharemem size %zu is too large\n", len); + kfree(shared_mem); + return ERR_PTR(-EINVAL); + } +#ifndef CONFIG_LIBLINUX + addr = vmalloc_user(len); +#else + addr = kzalloc(len, GFP_KERNEL); +#endif + if (!addr) { + tloge("alloc mailbox failed\n"); + kfree(shared_mem); + return ERR_PTR(-ENOMEM); + } + + init_shared_mem(shared_mem, addr, len); + return shared_mem; +} diff --git a/tzdriver/core/mem.h b/tzdriver/core/mem.h new file mode 100644 index 0000000000000000000000000000000000000000..81a68b0f1bd176068a9715d59047b88325a73fbf --- /dev/null +++ b/tzdriver/core/mem.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: memory operation for gp sharedmem. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef MEM_H +#define MEM_H +#include +#include "teek_ns_client.h" + +#define PRE_ALLOCATE_SIZE (1024*1024) +#define MEM_POOL_ELEMENT_SIZE (64*1024) +#define MEM_POOL_ELEMENT_NR (8) +#define MEM_POOL_ELEMENT_ORDER (4) + +struct tc_ns_shared_mem *tc_mem_allocate(size_t len); +void tc_mem_free(struct tc_ns_shared_mem *shared_mem); + +static inline void get_sharemem_struct(struct tc_ns_shared_mem *sharemem) +{ + if (sharemem != NULL) + atomic_inc(&sharemem->usage); +} + +static inline void put_sharemem_struct(struct tc_ns_shared_mem *sharemem) +{ + if (sharemem != NULL) { + if (atomic_dec_and_test(&sharemem->usage)) + tc_mem_free(sharemem); + } +} + +#endif diff --git a/tzdriver/core/reserved_mempool.c b/tzdriver/core/reserved_mempool.c new file mode 100644 index 0000000000000000000000000000000000000000..2b9f74ca0cc5a3bb4231d13f32429b082d107a93 --- /dev/null +++ b/tzdriver/core/reserved_mempool.c @@ -0,0 +1,523 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: memory managering for reserved memory with TEE. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "reserved_mempool.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "teek_client_constants.h" +#include "tc_ns_log.h" +#include "smc_smp.h" + +#define STATE_MODE 0440U +#define SLICE_RATE 4 +#define MAX_SLICE 0x400000 +#define MIN_RES_MEM_SIZE 0x400000 + +struct virt_page { + unsigned long start; +}; + +struct reserved_page_t { + struct list_head node; + struct virt_page *page; + int order; + unsigned int count; /* whether be used */ +}; + +struct reserved_free_area_t { + struct list_head page_list; + int order; +}; + +struct reserved_zone_t { + struct virt_page *all_pages; + struct reserved_page_t *pages; + struct reserved_free_area_t free_areas[0]; +}; + +static struct reserved_zone_t *g_res_zone; +static struct mutex g_res_lock; +static int g_res_max_order; +static unsigned long g_start_vaddr = 0; +static unsigned long g_start_paddr; +static struct dentry *g_res_mem_dbg_dentry; +static unsigned int g_res_mem_size = 0; + +static unsigned int get_res_page_size(void) +{ + return g_res_mem_size >> PAGE_SHIFT; +} + +static unsigned int calc_res_mem_size(unsigned int rsize) +{ + unsigned int size = rsize; + unsigned int idx = 0; + + if (size == 0 || (size & (size - 1)) == 0) + return size; + + while (size != 0) { + size = size >> 1; + idx++; + } + return (1 << (idx - 1)); +} + +unsigned int get_res_mem_slice_size(void) +{ + unsigned int size = (g_res_mem_size >> SLICE_RATE); + return (size > MAX_SLICE) ? MAX_SLICE : size; +} + +bool exist_res_mem(void) +{ + return (g_start_vaddr != 0) && (g_res_mem_size != 0); +} + +unsigned long res_mem_virt_to_phys(unsigned long vaddr) +{ + return vaddr - g_start_vaddr + g_start_paddr; +} + +int load_reserved_mem(void) +{ + struct device_node *np = NULL; + struct resource r; + unsigned int res_size; + int rc; + void *p = NULL; + + np = of_find_compatible_node(NULL, NULL, "tz_reserved"); + if (np == NULL) { + tlogd("can not find reserved memory.\n"); + return 0; + } + + rc = of_address_to_resource(np, 0, &r); + if (rc != 0) { + tloge("of_address_to_resource error\n"); + return -ENODEV; + } + + res_size = (unsigned int)resource_size(&r); + if (res_size < MIN_RES_MEM_SIZE) { + tloge("reserved memory size is too small\n"); + return -EINVAL; + } + + p = ioremap(r.start, res_size); + if (p == NULL) { + tloge("io remap for reserved memory failed\n"); + return -ENOMEM; + } + g_start_vaddr = (unsigned long)(uintptr_t)p; + g_start_paddr = (unsigned long)r.start; + g_res_mem_size = calc_res_mem_size(res_size); + + return 0; +} + +void unmap_res_mem(void) +{ + if (exist_res_mem()) { + iounmap((void __iomem *)g_start_vaddr); + g_start_vaddr = 0; + g_res_mem_size = 0; + } +} + +static int create_zone(void) +{ + size_t zone_len; + g_res_max_order = get_order(g_res_mem_size); + zone_len = sizeof(struct reserved_free_area_t) * (g_res_max_order + 1) + sizeof(*g_res_zone); + + g_res_zone = kzalloc(zone_len, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_res_zone)) { + tloge("fail to create zone\n"); + return -ENOMEM; + } + + g_res_zone->pages = kzalloc(sizeof(struct reserved_page_t) * get_res_page_size(), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_res_zone->pages)) { + tloge("failed to alloc zone pages\n"); + kfree(g_res_zone); + g_res_zone = NULL; + return -ENOMEM; + } + return 0; +} + +static struct virt_page *create_virt_pages(void) +{ + unsigned int i = 0; + struct virt_page *pages = NULL; + + pages = kzalloc(get_res_page_size() * sizeof(struct virt_page), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)pages)) { + tloge("alloc pages failed\n"); + return NULL; + } + + for (i = 0; i < get_res_page_size(); i++) + pages[i].start = g_start_vaddr + i * PAGE_SIZE; + return pages; +} + +void free_reserved_mempool(void) +{ + if (!exist_res_mem()) + return; + + if (g_res_zone->all_pages != NULL) { + kfree(g_res_zone->all_pages); + g_res_zone->all_pages = NULL; + } + + if (g_res_zone->pages != NULL) { + kfree(g_res_zone->pages); + g_res_zone->pages = NULL; + } + + if (g_res_zone != NULL) { + kfree(g_res_zone); + g_res_zone = NULL; + } + + if (!g_res_mem_dbg_dentry) + return; + debugfs_remove_recursive(g_res_mem_dbg_dentry); + g_res_mem_dbg_dentry = NULL; +} + +static void show_res_mem_info(void) +{ + unsigned int i; + struct reserved_page_t *pos = NULL; + struct list_head *head = NULL; + unsigned int used = 0; + + if (g_res_zone == NULL) { + tloge("res zone is NULL\n"); + return; + } + + tloge("################## reserved memory info ######################\n"); + mutex_lock(&g_res_lock); + for (i = 0; i < get_res_page_size(); i++) { + if (g_res_zone->pages[i].count != 0) { + tloge("page[%02d], order=%02d, count=%d\n", + i, g_res_zone->pages[i].order, + g_res_zone->pages[i].count); + used += (1 << (uint32_t)g_res_zone->pages[i].order); + } + } + tloge("reserved memory total usage:%u/%u\n", used, get_res_page_size()); + tloge("--------------------------------------------------------------\n"); + + for (i = 0; i < (unsigned int)g_res_max_order; i++) { + head = &g_res_zone->free_areas[i].page_list; + if (list_empty(head) != 0) { + tloge("order[%02d] is empty\n", i); + } else { + list_for_each_entry(pos, head, node) + tloge("order[%02d]\n", i); + } + } + mutex_unlock(&g_res_lock); + + tloge("#############################################################\n"); +} + +static ssize_t mb_res_mem_state_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + (void)(filp); + (void)(ubuf); + (void)cnt; + (void)(ppos); + show_res_mem_info(); + return 0; +} + +static const struct file_operations g_res_mem_dbg_state_fops = { + .owner = THIS_MODULE, + .read = mb_res_mem_state_read, +}; + +static void init_res_mem_dentry(void) +{ +#ifdef DEF_ENG + g_res_mem_dbg_dentry = debugfs_create_dir("tz_res_mem", NULL); + debugfs_create_file("state", STATE_MODE, g_res_mem_dbg_dentry, NULL, &g_res_mem_dbg_state_fops); +#endif +} + +static int res_mem_register(unsigned long paddr, unsigned int size) +{ + struct tc_ns_operation *operation = NULL; + struct tc_ns_smc_cmd *smc_cmd = NULL; + int ret = 0; + + smc_cmd = kzalloc(sizeof(*smc_cmd), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)smc_cmd)) { + tloge("alloc smc_cmd failed\n"); + return -ENOMEM; + } + + operation = kzalloc(sizeof(*operation), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)operation)) { + tloge("alloc operation failed\n"); + ret = -ENOMEM; + goto free_smc_cmd; + } + + operation->paramtypes = TEE_PARAM_TYPE_VALUE_INPUT | + (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM); + operation->params[0].value.a = paddr; + operation->params[0].value.b = paddr >> ADDR_TRANS_NUM; + operation->params[1].value.a = size; + + smc_cmd->cmd_type = CMD_TYPE_GLOBAL; + smc_cmd->cmd_id = GLOBAL_CMD_ID_REGISTER_RESMEM; + smc_cmd->operation_phys = virt_to_phys(operation); + smc_cmd->operation_h_phys = virt_to_phys(operation) >> ADDR_TRANS_NUM; + + if (tc_ns_smc(smc_cmd) != 0) { + tloge("resigter res mem failed\n"); + ret = -EIO; + } + + kfree(operation); + operation = NULL; +free_smc_cmd: + kfree(smc_cmd); + smc_cmd = NULL; + return ret; +} + +static void zone_init(struct virt_page *all_pages) +{ + int i; + struct reserved_free_area_t *area = NULL; + int max_order_cnt; + struct reserved_page_t *res_page = NULL; + + for (i = 0; i < (int)get_res_page_size(); i++) { + g_res_zone->pages[i].order = -1; + g_res_zone->pages[i].count = 0; + g_res_zone->pages[i].page = &all_pages[i]; + } + + for (i = 0; i <= g_res_max_order; i++) { + area = &g_res_zone->free_areas[i]; + INIT_LIST_HEAD(&area->page_list); + area->order = i; + } + + max_order_cnt = (int)(get_res_page_size() / (1 << (unsigned int)g_res_max_order)); + g_res_zone->all_pages = all_pages; + for (i = 0; i < max_order_cnt; i++) { + int idx = i * (1 << (unsigned int)g_res_max_order); + g_res_zone->pages[idx].order = g_res_max_order; + res_page = &g_res_zone->pages[idx]; + list_add_tail(&res_page->node, &area->page_list); + } +} + +int reserved_mempool_init(void) +{ + struct virt_page *all_pages = NULL; + int ret = 0; + unsigned long paddr; + + if (!exist_res_mem()) + return 0; + + ret = create_zone(); + if (ret != 0) + return ret; + + all_pages = create_virt_pages(); + if (all_pages == NULL) { + kfree(g_res_zone->pages); + g_res_zone->pages = NULL; + kfree(g_res_zone); + g_res_zone = NULL; + return -ENOMEM; + } + + paddr = g_start_paddr; + ret = res_mem_register(paddr, g_res_mem_size); + if (ret != 0) { + kfree(all_pages); + all_pages = NULL; + kfree(g_res_zone->pages); + g_res_zone->pages = NULL; + kfree(g_res_zone); + g_res_zone = NULL; + return -EIO; + } + + zone_init(all_pages); + + mutex_init(&g_res_lock); + init_res_mem_dentry(); + return 0; +} + +void *reserved_mem_alloc(size_t size) +{ + int i, j; + struct reserved_page_t *pos = NULL; + struct list_head *head = NULL; + int order = get_order(ALIGN(size, SZ_4K)); + unsigned long addr = 0; + + bool valid_param = (size > 0 && order <= g_res_max_order && order >= 0); + if (!valid_param) { + tloge("invalid alloc param, size %d, order %d, max %d\n",(int)size, order, g_res_max_order); + return NULL; + } + mutex_lock(&g_res_lock); + for (i = order; i <= g_res_max_order; i++) { + head = &g_res_zone->free_areas[i].page_list; + if (list_empty(head) != 0) + continue; + + pos = list_first_entry(head, struct reserved_page_t, node); + pos->count = 1; + pos->order = order; + + for (j = order; j < i; j++) { + struct reserved_page_t *new_page = NULL; + new_page = pos + (1 << (unsigned int)j); + new_page->count = 0; + new_page->order = j; + list_add_tail(&new_page->node, &g_res_zone->free_areas[j].page_list); + } + list_del(&pos->node); + addr = pos->page->start; + break; + } + mutex_unlock(&g_res_lock); + return (void *)(uintptr_t)addr; +} + +static int get_virt_page_index(const void *ptr) +{ + unsigned long vaddr = (unsigned long)(uintptr_t)ptr; + unsigned long offset = vaddr - g_start_vaddr; + int pg_idx = offset / (1 << PAGE_SHIFT); + if ((unsigned int)pg_idx >= get_res_page_size() || pg_idx < 0) + return -1; + return pg_idx; +} + +static int buddy_merge(struct virt_page *vpage, int order, unsigned int *page_index) +{ + int i; + unsigned int cur_idx; + unsigned int buddy_idx; + struct reserved_page_t *self = NULL; + struct reserved_page_t *buddy = NULL; + + for (i = order; i < g_res_max_order; i++) { + cur_idx = vpage - g_res_zone->all_pages; + buddy_idx = cur_idx ^ (1 << (unsigned int)i); + self = &g_res_zone->pages[cur_idx]; + buddy = &g_res_zone->pages[buddy_idx]; + self->count = 0; + /* is buddy free */ + if (buddy->order == i && buddy->count == 0) { + /* release buddy */ + list_del(&buddy->node); + /* combine self and buddy */ + if (cur_idx > buddy_idx) { + vpage = buddy->page; + buddy->order = i + 1; + self->order = -1; + } else { + self->order = i + 1; + buddy->order = -1; + } + } else { + /* release self */ + list_add_tail(&self->node, + &g_res_zone->free_areas[i].page_list); + return -1; + } + } + + if (order == g_res_max_order) { + cur_idx = vpage - g_res_zone->all_pages; + tlogd("no need to find buddy, cur is %u\n", cur_idx); + *page_index = cur_idx; + return 0; + } + *page_index = (cur_idx > buddy_idx) ? buddy_idx : cur_idx; + return 0; +} + +void reserved_mem_free(const void *ptr) +{ + struct reserved_page_t *self = NULL; + int self_idx; + unsigned int page_index; + struct reserved_page_t *max_order_page = NULL; + + if (ptr == NULL) { + tloge("invalid ptr\n"); + return; + } + + mutex_lock(&g_res_lock); + self_idx = get_virt_page_index(ptr); + if (self_idx < 0) { + mutex_unlock(&g_res_lock); + tloge("invalid page\n"); + return; + } + self = &g_res_zone->pages[self_idx]; + if (self->count == 0) { + tloge("already free in reserved mempool\n"); + mutex_unlock(&g_res_lock); + return; + } + + if (buddy_merge(self->page, self->order, &page_index) < 0) { + mutex_unlock(&g_res_lock); + return; + } + + max_order_page = &g_res_zone->pages[page_index]; + list_add_tail(&max_order_page->node, + &g_res_zone->free_areas[g_res_max_order].page_list); + mutex_unlock(&g_res_lock); +} diff --git a/tzdriver/core/reserved_mempool.h b/tzdriver/core/reserved_mempool.h new file mode 100644 index 0000000000000000000000000000000000000000..1728bb0a482f9e62507d19fb558afb5ba9515472 --- /dev/null +++ b/tzdriver/core/reserved_mempool.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: reserved memory managing for sharing memory with TEE. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef RESERVED_MEMPOOOL_H +#define RESERVED_MEMPOOOL_H + +#include +#include + +int load_reserved_mem(void); +void unmap_res_mem(void); +void *reserved_mem_alloc(size_t size); +void free_reserved_mempool(void); +int reserved_mempool_init(void); +void reserved_mem_free(const void *ptr); +bool exist_res_mem(void); +unsigned long res_mem_virt_to_phys(unsigned long vaddr); +unsigned int get_res_mem_slice_size(void); +#endif diff --git a/tzdriver/core/secs_power_ctrl.h b/tzdriver/core/secs_power_ctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..62b8a0f2168256397c666b2aa2dbfd582ca3970f --- /dev/null +++ b/tzdriver/core/secs_power_ctrl.h @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function declaration for secs power ctrl. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef SECS_POWER_CTRL_H +#define SECS_POWER_CTRL_H + +#include + +#ifdef CONFIG_HISI_SECS_CTRL +#include + +#define SECS_SUSPEND_STATUS 0xA5A5 +unsigned long get_secs_suspend_status(void); + +static int power_on_cc(void) +{ + return hisi_secs_power_on(); +} + +static int power_down_cc(void) +{ + return hisi_secs_power_down(); +} + +static void secs_suspend_status(uint64_t target) +{ + if (get_secs_suspend_status() == SECS_SUSPEND_STATUS) + tloge("WARNING irq during suspend! No = %lld\n", target); +} +#else + +static int power_on_cc(void) +{ + return 0; +} + +static int power_down_cc(void) +{ + return 0; +} + +static void secs_suspend_status(uint64_t target) +{ + (void)target; + return; +} +#endif + +#endif diff --git a/tzdriver/core/session_manager.c b/tzdriver/core/session_manager.c new file mode 100644 index 0000000000000000000000000000000000000000..3063932a566cb5d9ac284f85a46027ad6356d1e2 --- /dev/null +++ b/tzdriver/core/session_manager.c @@ -0,0 +1,1459 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function for session management. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "session_manager.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE) +#include +#include +#include +#endif +#include +#include +#include "smc_smp.h" +#include "mem.h" +#include "gp_ops.h" +#include "tc_ns_log.h" +#include "teek_client_constants.h" +#include "client_hash_auth.h" +#include "mailbox_mempool.h" +#include "tc_client_driver.h" +#include "internal_functions.h" +#include "dynamic_ion_mem.h" +#include "ko_adapt.h" + +#ifdef CONFIG_CRL_PATH +#include "tz_update_crl.h" +uint8_t g_update_crl_flag = 0; +#endif + +static DEFINE_MUTEX(g_load_app_lock); +#define MAX_REF_COUNT (255) + +/* record all service node and need mutex to avoid race */ +struct list_head g_service_list; +DEFINE_MUTEX(g_service_list_lock); + +void init_srvc_list(void) +{ + INIT_LIST_HEAD(&g_service_list); +} + +void get_session_struct(struct tc_ns_session *session) +{ + if (!session) + return; + + atomic_inc(&session->usage); +} + +void put_session_struct(struct tc_ns_session *session) +{ + if (!session || !atomic_dec_and_test(&session->usage)) + return; + + if (memset_s(session, sizeof(*session), 0, sizeof(*session)) != 0) + tloge("Caution, memset failed!\n"); + kfree(session); +} + +void get_service_struct(struct tc_ns_service *service) +{ + if (!service) + return; + + atomic_inc(&service->usage); + tlogd("service->usage = %d\n", atomic_read(&service->usage)); +} + +void put_service_struct(struct tc_ns_service *service) +{ + if (!service) + return; + + tlogd("service->usage = %d\n", atomic_read(&service->usage)); + mutex_lock(&g_service_list_lock); + if (atomic_dec_and_test(&service->usage)) { + tlogd("del service [0x%x] from service list\n", + *(uint32_t *)service->uuid); + list_del(&service->head); + kfree(service); + } + mutex_unlock(&g_service_list_lock); +} + +static int add_service_to_dev(struct tc_ns_dev_file *dev, + struct tc_ns_service *service) +{ + uint32_t i; + + if (!dev || !service) + return -EINVAL; + + for (i = 0; i < SERVICES_MAX_COUNT; i++) { + if (!dev->services[i]) { + tlogd("add service %u to %u\n", i, dev->dev_file_id); + dev->services[i] = service; + dev->service_ref[i] = 1; + return 0; + } + } + return -EFAULT; +} + +static void tz_srv_sess_dump(const char *param) +{ + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + + (void)param; + smc_cmd.cmd_id = GLOBAL_CMD_ID_DUMP_SRV_SESS; + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + + livepatch_down_read_sem(); + if (tc_ns_smc(&smc_cmd)) + tloge("send dump service session failed\n"); + livepatch_up_read_sem(); +} + +void dump_services_status(const char *param) +{ + struct tc_ns_service *service = NULL; + + (void)param; + mutex_lock(&g_service_list_lock); + tlogi("show service list:\n"); + list_for_each_entry(service, &g_service_list, head) { + tlogi("uuid-%x, usage=%d\n", *(uint32_t *)service->uuid, + atomic_read(&service->usage)); + } + mutex_unlock(&g_service_list_lock); + + tz_srv_sess_dump(param); +} + +static void del_service_from_dev(struct tc_ns_dev_file *dev, + struct tc_ns_service *service) +{ + uint32_t i; + + for (i = 0; i < SERVICES_MAX_COUNT; i++) { + if (dev->services[i] == service) { + tlogd("dev service ref-%u = %u\n", i, + dev->service_ref[i]); + if (dev->service_ref[i] == 0) { + tloge("Caution! No service to be deleted!\n"); + break; + } + dev->service_ref[i]--; + if (dev->service_ref[i] == 0) { + tlogd("del service %u from %u\n", + i, dev->dev_file_id); + dev->services[i] = NULL; + put_service_struct(service); + } + break; + } + } +} + +struct tc_ns_session *tc_find_session_withowner( + const struct list_head *session_list, + unsigned int session_id, const struct tc_ns_dev_file *dev_file) +{ + struct tc_ns_session *session = NULL; + + if (!session_list || !dev_file) { + tloge("session list or dev is null\n"); + return NULL; + } + + list_for_each_entry(session, session_list, head) { + if (session->session_id == session_id && + session->owner == dev_file) + return session; + } + return NULL; +} + +struct tc_ns_service *tc_find_service_in_dev(const struct tc_ns_dev_file *dev, + const unsigned char *uuid, int uuid_size) +{ + uint32_t i; + + if (!dev || !uuid || uuid_size != UUID_LEN) + return NULL; + + for (i = 0; i < SERVICES_MAX_COUNT; i++) { + if (dev->services[i] != NULL && + memcmp(dev->services[i]->uuid, uuid, UUID_LEN) == 0) + return dev->services[i]; + } + return NULL; +} + +struct tc_ns_session *tc_find_session_by_uuid(unsigned int dev_file_id, + const struct tc_ns_smc_cmd *cmd) +{ + struct tc_ns_dev_file *dev_file = NULL; + struct tc_ns_service *service = NULL; + struct tc_ns_session *session = NULL; + + if (!cmd) { + tloge("parameter is null pointer!\n"); + return NULL; + } + + dev_file = tc_find_dev_file(dev_file_id); + if (!dev_file) { + tloge("can't find dev file!\n"); + return NULL; + } + + mutex_lock(&dev_file->service_lock); + service = tc_find_service_in_dev(dev_file, cmd->uuid, UUID_LEN); + get_service_struct(service); + mutex_unlock(&dev_file->service_lock); + if (!service) { + tloge("can't find service!\n"); + return NULL; + } + + mutex_lock(&service->session_lock); + session = tc_find_session_withowner(&service->session_list, + cmd->context_id, dev_file); + get_session_struct(session); + mutex_unlock(&service->session_lock); + put_service_struct(service); + if (!session) { + tloge("can't find session-0x%x!\n", cmd->context_id); + return NULL; + } + return session; +} + +static int tc_ns_need_load_image(unsigned int file_id, + const unsigned char *uuid, unsigned int uuid_len) +{ + int ret; + int smc_ret; + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + struct mb_cmd_pack *mb_pack = NULL; + char *mb_param = NULL; + + if (!uuid || uuid_len != UUID_LEN) { + tloge("invalid uuid\n"); + return -ENOMEM; + } + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) { + tloge("alloc mb pack failed\n"); + return -ENOMEM; + } + mb_param = mailbox_copy_alloc(uuid, uuid_len); + if (!mb_param) { + tloge("alloc mb param failed\n"); + ret = -ENOMEM; + goto clean; + } + mb_pack->operation.paramtypes = TEEC_MEMREF_TEMP_INOUT; + mb_pack->operation.params[0].memref.buffer = + mailbox_virt_to_phys((uintptr_t)mb_param); + mb_pack->operation.buffer_h_addr[0] = + (uint64_t)mailbox_virt_to_phys((uintptr_t)mb_param) >> ADDR_TRANS_NUM; + mb_pack->operation.params[0].memref.size = SZ_4K; + smc_cmd.cmd_id = GLOBAL_CMD_ID_NEED_LOAD_APP; + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.dev_file_id = file_id; + smc_cmd.context_id = 0; + smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; + + smc_ret = tc_ns_smc(&smc_cmd); + if (smc_ret != 0) { + tloge("smc call returns error ret 0x%x\n", smc_ret); + ret = -EFAULT; + goto clean; + } else { + ret = *(int *)mb_param; + } +clean: + if (mb_param) + mailbox_free(mb_param); + mailbox_free(mb_pack); + + return ret; +} + +int tc_ns_load_secfile(struct tc_ns_dev_file *dev_file, + void __user *argp, bool is_from_client_node) +{ + int ret; + struct load_secfile_ioctl_struct ioctl_arg = { {0}, {0}, {NULL} }; + bool load = true; + void *file_addr = NULL; + + if (!dev_file || !argp) { + tloge("Invalid params !\n"); + return -EINVAL; + } + + if (copy_from_user(&ioctl_arg, argp, sizeof(ioctl_arg)) != 0) { + tloge("copy from user failed\n"); + ret = -ENOMEM; + return ret; + } + + if (ioctl_arg.sec_file_info.secfile_type >= LOAD_TYPE_MAX || + ioctl_arg.sec_file_info.secfile_type == LOAD_PATCH) { + tloge("invalid secfile type: %d!", ioctl_arg.sec_file_info.secfile_type); + return -EINVAL; + } + + mutex_lock(&g_load_app_lock); + if (is_from_client_node) { + if (ioctl_arg.sec_file_info.secfile_type != LOAD_TA && + ioctl_arg.sec_file_info.secfile_type != LOAD_LIB) { + tloge("this node does not allow this type of file to be loaded\n"); + mutex_unlock(&g_load_app_lock); + return -EINVAL; + } + } + + if (ioctl_arg.sec_file_info.secfile_type == LOAD_TA) { + ret = tc_ns_need_load_image(dev_file->dev_file_id, ioctl_arg.uuid, (unsigned int)UUID_LEN); + if (ret != 1) /* 1 means we need to load image */ + load = false; + } + + if (load) { + file_addr = (void *)(uintptr_t)(ioctl_arg.memref.file_addr | + (((uint64_t)ioctl_arg.memref.file_h_addr) << ADDR_TRANS_NUM)); + ret = tc_ns_load_image(dev_file, file_addr, &ioctl_arg.sec_file_info, NULL); + if (ret != 0) + tloge("load TA secfile: %d failed, ret = 0x%x\n", + ioctl_arg.sec_file_info.secfile_type, ret); + } + mutex_unlock(&g_load_app_lock); + if (copy_to_user(argp, &ioctl_arg, sizeof(ioctl_arg)) != 0) + tloge("copy to user failed\n"); + return ret; +} + +static uint32_t tc_ns_get_uid(void) +{ + struct task_struct *task = NULL; + const struct cred *cred = NULL; + uint32_t uid; + + rcu_read_lock(); + task = get_current(); + get_task_struct(task); + rcu_read_unlock(); + cred = koadpt_get_task_cred(task); + if (!cred) { + tloge("failed to get uid of the task\n"); + put_task_struct(task); + return (uint32_t)(-1); + } + + uid = cred->uid.val; + put_cred(cred); + put_task_struct(task); + tlogd("current uid is %u\n", uid); + return uid; +} + +#ifdef CONFIG_AUTH_SUPPORT_UNAME +static int set_login_information_uname(struct tc_ns_dev_file *dev_file, uint32_t uid) +{ + char uname[MAX_NAME_LENGTH] = { 0 }; + uint32_t username_len = 0; + int ret = tc_ns_get_uname(uid, uname, sizeof(uname), &username_len); + if (ret < 0 || username_len >= MAX_NAME_LENGTH) { + tloge("get user name filed\n"); + return -EFAULT; + } + if (memcpy_s(dev_file->pub_key, MAX_PUBKEY_LEN, uname, username_len)) { + tloge("failed to copy username, pub key len=%u\n", dev_file->pub_key_len); + return -EFAULT; + } + /* use pub_key to store username info */ + dev_file->pub_key_len = username_len; + return 0; +} +#else +static int set_login_information_uid(struct tc_ns_dev_file *dev_file, uint32_t ca_uid) +{ + if (memcpy_s(dev_file->pub_key, MAX_PUBKEY_LEN, &ca_uid, sizeof(ca_uid)) != 0) { + tloge("failed to copy pubkey, pub key len=%u\n", + dev_file->pub_key_len); + return -EFAULT; + } + dev_file->pub_key_len = sizeof(ca_uid); + return 0; +} +#endif + +/* + * Modify the client context so params id 2 and 3 contain temp pointers to the + * public key and package name for the open session. This is used for the + * TEEC_LOGIN_IDENTIFY open session method + */ +static int set_login_information(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context) +{ + uint64_t size_addr, buffer_addr; + /* The daemon has failed to get login information or not supplied */ + if (dev_file->pkg_name_len == 0) + return -EINVAL; + /* + * The 3rd parameter buffer points to the pkg name buffer in the + * device file pointer + * get package name len and package name + */ + size_addr = (__u64)(uintptr_t)&dev_file->pkg_name_len; + buffer_addr = (__u64)(uintptr_t)dev_file->pkg_name; + context->params[3].memref.size_addr = (__u32)size_addr; + context->params[3].memref.size_h_addr = (__u32)(size_addr >> ADDR_TRANS_NUM); + context->params[3].memref.buffer = (__u32)buffer_addr; + context->params[3].memref.buffer_h_addr = (__u32)(buffer_addr >> ADDR_TRANS_NUM); + + /* Set public key len and public key */ + if (dev_file->pub_key_len == 0) { + /* If get public key failed, then get uid in kernel */ + uint32_t ca_uid = tc_ns_get_uid(); + if (ca_uid == (uint32_t)(-1)) { + tloge("failed to get uid of the task\n"); + goto error; + } +#ifdef CONFIG_AUTH_SUPPORT_UNAME + if (set_login_information_uname(dev_file, ca_uid) != 0) + goto error; +#else + if (set_login_information_uid(dev_file, ca_uid) != 0) + goto error; +#endif +#ifdef CONFIG_AUTH_HASH + dev_file->pkg_name_len = strlen((unsigned char *)dev_file->pkg_name); +#endif + } + size_addr = (__u64)(uintptr_t)&dev_file->pub_key_len; + buffer_addr = (__u64)(uintptr_t)dev_file->pub_key; + context->params[2].memref.size_addr = (__u32)size_addr; + context->params[2].memref.size_h_addr = (__u32)(size_addr >> ADDR_TRANS_NUM); + context->params[2].memref.buffer = (__u32)buffer_addr; + context->params[2].memref.buffer_h_addr = (__u32)(buffer_addr >> ADDR_TRANS_NUM); + /* Now we mark the 2 parameters as input temp buffers */ + context->param_types = teec_param_types( + teec_param_type_get(context->param_types, 0), + teec_param_type_get(context->param_types, 1), + TEEC_MEMREF_TEMP_INPUT, TEEC_MEMREF_TEMP_INPUT); +#ifdef CONFIG_AUTH_HASH + if(set_login_information_hash(dev_file) != 0) { + tloge("set login information hash failed\n"); + goto error; + } +#endif + return 0; +error: + return -EFAULT; +} + +static int check_login_method(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context, uint8_t *flags) +{ + int ret; + + if (!dev_file || !context || !flags) + return -EFAULT; + + if (is_tee_rebooting()) { + context->returns.code = TEE_ERROR_IS_DEAD; + /* when ret > 0, use context return code */ + return EFAULT; + } + + if (context->login.method != TEEC_LOGIN_IDENTIFY) { + tloge("login method is not supported\n"); + return -EINVAL; + } + + tlogd("login method is IDENTIFY\n"); + /* check if usr params 0 and 1 are valid */ + if (dev_file->kernel_api == TEE_REQ_FROM_USER_MODE && + (!tc_user_param_valid(context, (unsigned int)0) || + !tc_user_param_valid(context, (unsigned int)1))) + return -EINVAL; + + ret = set_login_information(dev_file, context); + if (ret != 0) { + tloge("set login information failed ret =%d\n", ret); + return ret; + } + *flags |= TC_CALL_LOGIN; + + return 0; +} + +static struct tc_ns_service *tc_ref_service_in_dev(struct tc_ns_dev_file *dev, + const unsigned char *uuid, int uuid_size, bool *is_full) +{ + uint32_t i; + + if (uuid_size != UUID_LEN) + return NULL; + + for (i = 0; i < SERVICES_MAX_COUNT; i++) { + if (dev->services[i] != NULL && + memcmp(dev->services[i]->uuid, uuid, UUID_LEN) == 0) { + if (dev->service_ref[i] == MAX_REF_COUNT) { + *is_full = true; + return NULL; + } + dev->service_ref[i]++; + return dev->services[i]; + } + } + return NULL; +} + +static int tc_ns_service_init(const unsigned char *uuid, uint32_t uuid_len, + struct tc_ns_service **new_service) +{ + int ret = 0; + struct tc_ns_service *service = NULL; + + if (!uuid || !new_service || uuid_len != UUID_LEN) + return -EINVAL; + + service = kzalloc(sizeof(*service), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)service)) { + tloge("kzalloc failed\n"); + ret = -ENOMEM; + return ret; + } + + if (memcpy_s(service->uuid, sizeof(service->uuid), uuid, uuid_len) != 0) { + kfree(service); + return -EFAULT; + } + + INIT_LIST_HEAD(&service->session_list); + mutex_init(&service->session_lock); + list_add_tail(&service->head, &g_service_list); + tlogd("add service: 0x%x to service list\n", *(const uint32_t *)uuid); + atomic_set(&service->usage, 1); + mutex_init(&service->operation_lock); + *new_service = service; + + return ret; +} + +static struct tc_ns_service *tc_find_service_from_all( + const unsigned char *uuid, uint32_t uuid_len) +{ + struct tc_ns_service *service = NULL; + + if (!uuid || uuid_len != UUID_LEN) + return NULL; + + list_for_each_entry(service, &g_service_list, head) { + if (memcmp(service->uuid, uuid, sizeof(service->uuid)) == 0) + return service; + } + + return NULL; +} + +static struct tc_ns_service *find_service(struct tc_ns_dev_file *dev_file, + const struct tc_ns_client_context *context) +{ + int ret; + struct tc_ns_service *service = NULL; + bool is_full = false; + + mutex_lock(&dev_file->service_lock); + service = tc_ref_service_in_dev(dev_file, context->uuid, + UUID_LEN, &is_full); + /* if service has been opened in this dev or ref cnt is full */ + if (service || is_full) { + /* + * If service has been reference by this dev, find service in dev + * will incre ref count to declaim there's how many callers to + * this service from the dev, instead of incre service->usage. + * While close session, dev->service_ref[i] will decre and till + * it get to 0, put service struct will be called. + */ + mutex_unlock(&dev_file->service_lock); + return service; + } + mutex_lock(&g_service_list_lock); + service = tc_find_service_from_all(context->uuid, UUID_LEN); + /* if service has been opened in other dev */ + if (service) { + get_service_struct(service); + mutex_unlock(&g_service_list_lock); + goto add_service; + } + /* Create a new service if we couldn't find it in list */ + ret = tc_ns_service_init(context->uuid, UUID_LEN, &service); + /* unlock after init to make sure find service from all is correct */ + mutex_unlock(&g_service_list_lock); + if (ret != 0) { + tloge("service init failed"); + mutex_unlock(&dev_file->service_lock); + return NULL; + } +add_service: + ret = add_service_to_dev(dev_file, service); + mutex_unlock(&dev_file->service_lock); + if (ret != 0) { + /* + * for new srvc, match init usage to 1; + * for srvc already exist, match get; + */ + put_service_struct(service); + service = NULL; + tloge("fail to add service to dev\n"); + return NULL; + } + return service; +} + +static bool is_valid_ta_size(const char *file_buffer, unsigned int file_size) +{ + if (!file_buffer || file_size == 0) { + tloge("invalid load ta size\n"); + return false; + } + + if (file_size > SZ_8M) { + tloge("not support TA larger than 8M, size=%u\n", file_size); + return false; + } + return true; +} + +static int alloc_for_load_image(struct load_img_params *params) +{ + /* we will try any possible to alloc mailbox mem to load TA */ + for (; params->mb_load_size > 0; params->mb_load_size >>= 1) { + params->mb_load_mem = mailbox_alloc(params->mb_load_size, 0); + if (params->mb_load_mem) + break; + tlogw("alloc mem size=%u for TA load mem fail\n", + params->mb_load_size); + } + + if (!params->mb_load_mem) { + tloge("alloc TA load mem failed\n"); + return -ENOMEM; + } + + params->mb_pack = mailbox_alloc_cmd_pack(); + if (!params->mb_pack) { + mailbox_free(params->mb_load_mem); + params->mb_load_mem = NULL; + tloge("alloc mb pack failed\n"); + return -ENOMEM; + } + + params->uuid_return = mailbox_alloc(sizeof(*(params->uuid_return)), 0); + if (!params->uuid_return) { + mailbox_free(params->mb_load_mem); + params->mb_load_mem = NULL; + mailbox_free(params->mb_pack); + params->mb_pack = NULL; + tloge("alloc uuid failed\n"); + return -ENOMEM; + } + return 0; +} + +static void pack_load_frame_cmd(uint32_t load_size, + const struct load_img_params *params, struct tc_ns_smc_cmd *smc_cmd) +{ + struct mb_cmd_pack *mb_pack = params->mb_pack; + char *mb_load_mem = params->mb_load_mem; + struct tc_uuid *uuid_return = params->uuid_return; + + mb_pack->operation.params[0].memref.buffer = + mailbox_virt_to_phys((uintptr_t)mb_load_mem); + mb_pack->operation.buffer_h_addr[0] = + (uint64_t)mailbox_virt_to_phys((uintptr_t)mb_load_mem) >> ADDR_TRANS_NUM; + mb_pack->operation.params[0].memref.size = load_size + sizeof(int); + mb_pack->operation.params[2].memref.buffer = + mailbox_virt_to_phys((uintptr_t)uuid_return); + mb_pack->operation.buffer_h_addr[2] = + (uint64_t)mailbox_virt_to_phys((uintptr_t)uuid_return) >> ADDR_TRANS_NUM; + mb_pack->operation.params[2].memref.size = sizeof(*uuid_return); + mb_pack->operation.paramtypes = teec_param_types(TEEC_MEMREF_TEMP_INPUT, + TEEC_VALUE_INOUT, TEEC_MEMREF_TEMP_OUTPUT, TEEC_VALUE_INOUT); + + smc_cmd->cmd_type = CMD_TYPE_GLOBAL; + smc_cmd->cmd_id = GLOBAL_CMD_ID_LOAD_SECURE_APP; + smc_cmd->context_id = 0; + smc_cmd->operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd->operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; +} + +static int32_t load_image_copy_file(struct load_img_params *params, uint32_t load_size, + int32_t load_flag, uint32_t loaded_size) +{ + if (params->dev_file->kernel_api == TEE_REQ_FROM_KERNEL_MODE) { + if (memcpy_s(params->mb_load_mem + sizeof(load_flag), + params->mb_load_size - sizeof(load_flag), + params->file_buffer + loaded_size, load_size) != 0) { + tloge("memcpy file buf get fail\n"); + return -EFAULT; + } + return 0; + } + if (copy_from_user(params->mb_load_mem + sizeof(load_flag), + (const void __user *)params->file_buffer + loaded_size, load_size)) { + tloge("file buf get fail\n"); + return -EFAULT; + } + return 0; +} + +static int load_image_by_frame(struct load_img_params *params, unsigned int load_times, + struct tc_ns_client_return *tee_ret, struct sec_file_info *sec_file_info) +{ + char *p = params->mb_load_mem; + uint32_t load_size; + int load_flag = 1; /* 0:it's last block, 1:not last block */ + uint32_t loaded_size = 0; + unsigned int index; + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + int smc_ret; + + for (index = 0; index < load_times; index++) { + smc_cmd.err_origin = TEEC_ORIGIN_COMMS; + if (index == (load_times - 1)) { + load_flag = 0; + load_size = params->file_size - loaded_size; + } else { + load_size = params->mb_load_size - sizeof(load_flag); + } + *(int *)p = load_flag; + if (load_size > params->mb_load_size - sizeof(load_flag)) { + tloge("invalid load size %u/%u\n", load_size, + params->mb_load_size); + return -EINVAL; + } + + if (load_image_copy_file(params, load_size, load_flag, loaded_size) != 0) + return -EFAULT; + + pack_load_frame_cmd(load_size, params, &smc_cmd); + params->mb_pack->operation.params[3].value.a = index; + params->mb_pack->operation.params[1].value.a = sec_file_info->secfile_type; + smc_cmd.dev_file_id = params->dev_file->dev_file_id; + smc_ret = tc_ns_smc(&smc_cmd); + tlogd("configid=%u, ret=%d, load_flag=%d, index=%u\n", + params->mb_pack->operation.params[1].value.a, smc_ret, + load_flag, index); + + if (smc_ret != 0) { + if (tee_ret != NULL) { + tee_ret->code = smc_ret; + tee_ret->origin = smc_cmd.err_origin; + } + sec_file_info->sec_load_err = (int32_t)params->mb_pack->operation.params[3].value.b; + return -EFAULT; + } + + if (!smc_ret && !load_flag && load_image_for_ion(params, tee_ret ? &tee_ret->origin : NULL)) + return -EPERM; + + loaded_size += load_size; + } + return 0; +} + +int tc_ns_load_image_with_lock(struct tc_ns_dev_file *dev, const char *file_buffer, + unsigned int file_size, enum secfile_type_t type) +{ + int ret; + struct sec_file_info sec_file = {0, 0, 0}; + + if (!dev || !file_buffer) { + tloge("dev or file buffer is NULL!\n"); + return -EINVAL; + } + + sec_file.secfile_type = type; + sec_file.file_size = file_size; + + mutex_lock(&g_load_app_lock); + ret = tc_ns_load_image(dev, file_buffer, &sec_file, NULL); + mutex_unlock(&g_load_app_lock); + + return ret; +} + +static void free_load_image_buffer(struct load_img_params *params) +{ + mailbox_free(params->mb_load_mem); + mailbox_free(params->mb_pack); + mailbox_free(params->uuid_return); +} + +int load_image(struct load_img_params *params, + struct sec_file_info *sec_file_info, struct tc_ns_client_return *tee_ret) +{ + int ret; + unsigned int load_times; + unsigned int file_size; + + /* tee_ret can be null */ + if (params == NULL || sec_file_info == NULL) + return -1; + + file_size = params->file_size; + + params->mb_load_size = (file_size > (SZ_1M - sizeof(int))) ? + SZ_1M : ALIGN(file_size, SZ_4K); + + ret = alloc_for_load_image(params); + if (ret != 0) { + tloge("Alloc load image buf fail!\n"); + return ret; + } + + if (params->mb_load_size <= sizeof(int)) { + tloge("mb load size is too small!\n"); + free_load_image_buffer(params); + return -ENOMEM; + } + + load_times = file_size / (params->mb_load_size - sizeof(int)); + if ((file_size % (params->mb_load_size - sizeof(int))) != 0) + load_times += 1; + + ret = load_image_by_frame(params, load_times, tee_ret, sec_file_info); + if (ret != 0) { + tloge("load image by frame fail!\n"); + free_load_image_buffer(params); + return ret; + } + + free_load_image_buffer(params); + return 0; +} + +int tc_ns_load_image(struct tc_ns_dev_file *dev, const char *file_buffer, + struct sec_file_info *sec_file_info, struct tc_ns_client_return *tee_ret) +{ + int ret; + unsigned int file_size; + struct load_img_params params = { dev, file_buffer, 0, NULL, NULL, NULL, 0 }; + + if (!dev || !file_buffer || !sec_file_info) { + tloge("dev or file buffer or sec_file_info is NULL!\n"); + return -EINVAL; + } + + file_size = sec_file_info->file_size; + params.file_size = file_size; +#ifdef CONFIG_CRL_PATH + if (g_update_crl_flag == 0) { + if (tz_update_crl(CONFIG_CRL_PATH, dev) != 0) { + tloge("tzdriver updates main crl failed\n"); + if (tz_update_crl(CONFIG_CRL_BAK_PATH, dev) != 0) { + tloge("tzdriver updates backup crl failed\n"); + } else { + g_update_crl_flag = 1; + tloge("tzdriver updates backup crl successfully\n"); + } + } else { + g_update_crl_flag = 1; + tloge("tzdriver updates main crl successfully\n"); + } + } +#endif + + if (!is_valid_ta_size(file_buffer, file_size)) + return -EINVAL; + + return load_image(¶ms, sec_file_info, tee_ret); +} + +static int load_ta_image(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context) +{ + int ret; + struct sec_file_info sec_file = {0, 0, 0}; + struct tc_ns_client_return tee_ret = {0}; + void *file_addr = NULL; + + tee_ret.origin = TEEC_ORIGIN_COMMS; + + mutex_lock(&g_load_app_lock); + ret = tc_ns_need_load_image(dev_file->dev_file_id, context->uuid, (unsigned int)UUID_LEN); + if (ret == 1) { /* 1 means we need to load image */ + if (!context->file_buffer) { + tloge("context's file_buffer is NULL"); + mutex_unlock(&g_load_app_lock); + return -1; + } + file_addr = (void *)(uintptr_t)(context->memref.file_addr | + (((uint64_t)context->memref.file_h_addr) << ADDR_TRANS_NUM)); + sec_file.secfile_type = LOAD_TA; + sec_file.file_size = context->file_size; + ret = tc_ns_load_image(dev_file, file_addr, &sec_file, &tee_ret); + if (ret != 0) { + tloge("load image failed, ret=%x", ret); + context->returns.code = tee_ret.code; + if (tee_ret.origin != TEEC_ORIGIN_COMMS) { + context->returns.origin = tee_ret.origin; + ret = EFAULT; + } + mutex_unlock(&g_load_app_lock); + return ret; + } + } + mutex_unlock(&g_load_app_lock); + + return ret; +} + +static void init_new_sess_node(struct tc_ns_dev_file *dev_file, + const struct tc_ns_client_context *context, + struct tc_ns_service *service, + struct tc_ns_session *session) +{ + session->session_id = context->session_id; + atomic_set(&session->usage, 1); + session->owner = dev_file; + + session->wait_data.send_wait_flag = 0; + init_waitqueue_head(&session->wait_data.send_cmd_wq); + + mutex_lock(&service->session_lock); + list_add_tail(&session->head, &service->session_list); + mutex_unlock(&service->session_lock); +} + +static int proc_open_session(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context, struct tc_ns_service *service, + struct tc_ns_session *session, uint8_t flags) +{ + int ret; + struct tc_call_params params = { + dev_file, context, session, flags + }; + + mutex_lock(&service->operation_lock); + ret = load_ta_image(dev_file, context); + if (ret != 0) { + tloge("load ta image failed\n"); + mutex_unlock(&service->operation_lock); + return ret; + } + + ret = tc_client_call(¶ms); + if (ret != 0) { + /* Clean this session secure information */ + kill_ion_by_uuid((struct tc_uuid *)context->uuid); + mutex_unlock(&service->operation_lock); + tloge("smc call returns error, ret=0x%x\n", ret); + return ret; + } + init_new_sess_node(dev_file, context, service, session); + /* + * session_id in tee is unique, but in concurrency scene + * same session_id may appear in tzdriver, put session_list + * add/del in service->operation_lock can avoid it. + */ + mutex_unlock(&service->operation_lock); + return ret; +} + +static void clear_context_param(struct tc_ns_client_context *context) +{ + context->params[2].memref.size_addr = 0; + context->params[2].memref.size_h_addr = 0; + context->params[2].memref.buffer = 0; + context->params[2].memref.buffer_h_addr = 0; + context->params[3].memref.size_addr = 0; + context->params[3].memref.size_h_addr = 0; + context->params[3].memref.buffer = 0; + context->params[3].memref.buffer_h_addr = 0; +} + +int tc_ns_open_session(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context) +{ + int ret; + struct tc_ns_service *service = NULL; + struct tc_ns_session *session = NULL; + uint8_t flags = TC_CALL_GLOBAL; + + if (!dev_file || !context) { + tloge("invalid dev_file or context\n"); + return -EINVAL; + } + + ret = check_login_method(dev_file, context, &flags); + if (ret != 0) + goto err_clear_param; + + context->cmd_id = GLOBAL_CMD_ID_OPEN_SESSION; + + service = find_service(dev_file, context); + if (!service) { + tloge("find service failed\n"); + ret = -ENOMEM; + goto err_clear_param; + } + + session = kzalloc(sizeof(*session), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)session)) { + tloge("kzalloc failed\n"); + mutex_lock(&dev_file->service_lock); + del_service_from_dev(dev_file, service); + mutex_unlock(&dev_file->service_lock); + ret = -ENOMEM; + goto err_clear_param; + } + mutex_init(&session->ta_session_lock); + +#ifndef CONFIG_LIBLINUX + ret = calc_client_auth_hash(dev_file, context, session); + if (ret != 0) { + tloge("calc client auth hash failed\n"); + goto err_free_rsrc; + } +#endif + + ret = proc_open_session(dev_file, context, service, session, flags); + if (ret == 0) + goto err_clear_param; +err_free_rsrc: + mutex_lock(&dev_file->service_lock); + del_service_from_dev(dev_file, service); + mutex_unlock(&dev_file->service_lock); + + kfree(session); +err_clear_param: + clear_context_param(context); + return ret; +} + +static struct tc_ns_session *get_session(struct tc_ns_service *service, + const struct tc_ns_dev_file *dev_file, + const struct tc_ns_client_context *context) +{ + struct tc_ns_session *session = NULL; + + mutex_lock(&service->session_lock); + session = tc_find_session_withowner(&service->session_list, + context->session_id, dev_file); + get_session_struct(session); + mutex_unlock(&service->session_lock); + + return session; +} + +static struct tc_ns_service *get_service(struct tc_ns_dev_file *dev_file, + const struct tc_ns_client_context *context) +{ + struct tc_ns_service *service = NULL; + + mutex_lock(&dev_file->service_lock); + service = tc_find_service_in_dev(dev_file, context->uuid, UUID_LEN); + get_service_struct(service); + mutex_unlock(&dev_file->service_lock); + + return service; +} + +static int close_session(struct tc_ns_dev_file *dev, + struct tc_ns_session *session, const unsigned char *uuid, + unsigned int uuid_len, unsigned int session_id) +{ + struct tc_ns_client_context context; + int ret; + struct tc_call_params params = { + dev, &context, session, 0 + }; + + if (uuid_len != UUID_LEN) + return -EINVAL; + + if (memset_s(&context, sizeof(context), 0, sizeof(context)) != 0) + return -EFAULT; + + if (memcpy_s(context.uuid, sizeof(context.uuid), uuid, uuid_len) != 0) + return -EFAULT; + + context.session_id = session_id; + context.cmd_id = GLOBAL_CMD_ID_CLOSE_SESSION; + params.flags = TC_CALL_GLOBAL | TC_CALL_SYNC; + ret = tc_client_call(¶ms); + if (ret != 0) + tloge("close session failed, ret=0x%x\n", ret); + + kill_ion_by_uuid((struct tc_uuid *)context.uuid); + return ret; +} + +static void close_session_in_service_list(struct tc_ns_dev_file *dev, + struct tc_ns_service *service) +{ + struct tc_ns_session *tmp_session = NULL; + struct tc_ns_session *session = NULL; + int ret; + + list_for_each_entry_safe(session, tmp_session, + &service->session_list, head) { + if (session->owner != dev) + continue; + ret = close_session(dev, session, service->uuid, + (unsigned int)UUID_LEN, session->session_id); + if (ret != 0) + tloge("close session smc failed when close fd!\n"); + mutex_lock(&service->session_lock); + list_del(&session->head); + mutex_unlock(&service->session_lock); + + put_session_struct(session); /* pair with open session */ + } +} + +static bool if_exist_unclosed_session(struct tc_ns_dev_file *dev) +{ + uint32_t index; + + for (index = 0; index < SERVICES_MAX_COUNT; index++) { + if (dev->services[index] != NULL && + list_empty(&dev->services[index]->session_list) == 0) + return true; + } + return false; +} + +static int close_session_thread_fn(void *arg) +{ + struct tc_ns_dev_file *dev = arg; + uint32_t index; + struct tc_ns_service *service = NULL; + + /* close unclosed session */ + for (index = 0; index < SERVICES_MAX_COUNT; index++) { + if (dev->services[index] != NULL && + list_empty(&dev->services[index]->session_list) == 0) { + service = dev->services[index]; + + mutex_lock(&service->operation_lock); + close_session_in_service_list(dev, service); + mutex_unlock(&service->operation_lock); + + put_service_struct(service); /* pair with open session */ + } + } + + tlogd("complete close all unclosed session\n"); + complete(&dev->close_comp); + return 0; +} + +void close_unclosed_session_in_kthread(struct tc_ns_dev_file *dev) +{ + struct task_struct *close_thread = NULL; + + if (!dev) { + tloge("dev is invalid\n"); + return; + } + + if (!if_exist_unclosed_session(dev)) + return; + + /* when self recovery, release session in reboot interface */ + if (is_tee_rebooting()) + return; + close_thread = kthread_create(close_session_thread_fn, + dev, "close_fn_%6d", dev->dev_file_id); + if (unlikely(IS_ERR_OR_NULL(close_thread))) { + tloge("fail to create close session thread\n"); + return; + } + + tz_kthread_bind_mask(close_thread); + wake_up_process(close_thread); + wait_for_completion(&dev->close_comp); + tlogd("wait for completion success\n"); +} + +int tc_ns_close_session(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context) +{ + int ret = -EINVAL; + struct tc_ns_service *service = NULL; + struct tc_ns_session *session = NULL; + + if (!dev_file || !context) { + tloge("invalid dev_file or context\n"); + return ret; + } + + if (is_tee_rebooting()) { + context->returns.code = TEE_ERROR_IS_DEAD; + return TEE_ERROR_IS_DEAD; + } + + service = get_service(dev_file, context); + if (!service) { + tloge("invalid service\n"); + return ret; + } + /* + * session_id in tee is unique, but in concurrency scene + * same session_id may appear in tzdriver, put session_list + * add/del in service->operation_lock can avoid it. + */ + mutex_lock(&service->operation_lock); + session = get_session(service, dev_file, context); + if (session) { + int ret2; + mutex_lock(&session->ta_session_lock); + ret2 = close_session(dev_file, session, context->uuid, + (unsigned int)UUID_LEN, context->session_id); + mutex_unlock(&session->ta_session_lock); + if (ret2 != 0) + tloge("close session smc failed!\n"); + mutex_lock(&service->session_lock); + list_del(&session->head); + mutex_unlock(&service->session_lock); + + put_session_struct(session); + put_session_struct(session); /* pair with open session */ + + ret = 0; + mutex_lock(&dev_file->service_lock); + del_service_from_dev(dev_file, service); + mutex_unlock(&dev_file->service_lock); + } else { + tloge("invalid session\n"); + } + mutex_unlock(&service->operation_lock); + put_service_struct(service); + return ret; +} + +int tc_ns_send_cmd(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context) +{ + int ret = -EINVAL; + struct tc_ns_service *service = NULL; + struct tc_ns_session *session = NULL; + struct tc_call_params params = { + dev_file, context, NULL, 0 + }; + + if (!dev_file || !context) { + tloge("invalid dev_file or context\n"); + return ret; + } + + if (is_tee_rebooting()) { + context->returns.code = TEE_ERROR_IS_DEAD; + return EFAULT; + } + + service = get_service(dev_file, context); + if (service) { + session = get_session(service, dev_file, context); + put_service_struct(service); + if (session) { + tlogd("send cmd find session id %x\n", + context->session_id); + goto find_session; + } + tloge("can't find session\n"); + } else { + tloge("can't find service\n"); + } + + return ret; +find_session: + mutex_lock(&session->ta_session_lock); + params.sess = session; + ret = tc_client_call(¶ms); + mutex_unlock(&session->ta_session_lock); + put_session_struct(session); + if (ret != 0) + tloge("smc call returns error, ret=0x%x\n", ret); + return ret; +} + +static int ioctl_session_send_cmd(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context, void *argp) +{ + int ret; + + ret = tc_ns_send_cmd(dev_file, context); + if (ret != 0) + tloge("send cmd failed ret is %d\n", ret); + if (copy_to_user(argp, context, sizeof(*context)) != 0) { + if (ret == 0) + ret = -EFAULT; + } + return ret; +} + +int tc_client_session_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -EINVAL; + void *argp = (void __user *)(uintptr_t)arg; + struct tc_ns_dev_file *dev_file = NULL; + struct tc_ns_client_context context; + + if (!argp || !file) { + tloge("invalid params\n"); + return -EINVAL; + } + + dev_file = file->private_data; + if (copy_from_user(&context, argp, sizeof(context)) != 0) { + tloge("copy from user failed\n"); + return -EFAULT; + } + + context.returns.origin = TEEC_ORIGIN_COMMS; + switch (cmd) { + case TC_NS_CLIENT_IOCTL_SES_OPEN_REQ: + ret = tc_ns_open_session(dev_file, &context); + if (ret != 0) + tloge("open session failed ret is %d\n", ret); + if (copy_to_user(argp, &context, sizeof(context)) != 0 && ret == 0) + ret = -EFAULT; + break; + case TC_NS_CLIENT_IOCTL_SES_CLOSE_REQ: + ret = tc_ns_close_session(dev_file, &context); + break; + case TC_NS_CLIENT_IOCTL_SEND_CMD_REQ: + tee_trace_add_event(INVOKE_CMD_START, 0); + ret = ioctl_session_send_cmd(dev_file, &context, argp); + tee_trace_add_event(INVOKE_CMD_END, 0); + break; + default: + tloge("invalid cmd:0x%x!\n", cmd); + return ret; + } + /* + * Don't leak ERESTARTSYS to user space. + * + * CloseSession is not reentrant, so convert to -EINTR. + * In other case, restart_syscall(). + * + * It is better to call it right after the error code + * is generated (in tc_client_call), but kernel CAs are + * still exist when these words are written. Setting TIF + * flags for callers of those CAs is very hard to analysis. + * + * For kernel CA, when ERESTARTSYS is seen, loop in kernel + * instead of notifying user. + * + * P.S. ret code in this function is in mixed naming space. + * See the definition of ret. However, this function never + * return its default value, so using -EXXX is safe. + */ + if (ret == -ERESTARTSYS) { + if (cmd == TC_NS_CLIENT_IOCTL_SES_CLOSE_REQ) + ret = -EINTR; + else + return restart_syscall(); + } + return ret; +} + +static void cleanup_session(struct tc_ns_service *service) +{ + struct tc_ns_session *session = NULL; + struct tc_ns_session *session_tmp = NULL; + + if (!service) + return; + + /* close unclosed session */ + if (list_empty(&service->session_list) == 0) { + mutex_lock(&service->operation_lock); + list_for_each_entry_safe(session, session_tmp, &service->session_list, head) { + tlogd("clean up session %u\n", session->session_id); + mutex_lock(&service->session_lock); + list_del(&session->head); + mutex_unlock(&service->session_lock); + put_session_struct(session); + } + mutex_unlock(&service->operation_lock); + } + put_service_struct(service); + + return; +} + +void free_all_session(void) +{ + struct tc_ns_dev_file *dev_file = NULL; + struct tc_ns_dev_file *dev_file_tmp = NULL; + struct tc_ns_dev_list *dev_list = NULL; + int i; + + dev_list = get_dev_list(); + if (!dev_list) { + tloge("cleanup session, dev list is null\n"); + return; + } + mutex_lock(&dev_list->dev_lock); + list_for_each_entry_safe(dev_file, dev_file_tmp, &dev_list->dev_file_list, head) { + mutex_lock(&dev_file->service_lock); + for (i = 0; i < SERVICES_MAX_COUNT; i++) { + if (dev_file->services[i] == NULL) + continue; + get_service_struct(dev_file->services[i]); + /* avoid dead lock in close session */ + mutex_unlock(&dev_file->service_lock); + cleanup_session(dev_file->services[i]); + mutex_lock(&dev_file->service_lock); + dev_file->services[i] = NULL; + } + mutex_unlock(&dev_file->service_lock); + } + mutex_unlock(&dev_list->dev_lock); + return; +} diff --git a/tzdriver/core/session_manager.h b/tzdriver/core/session_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..f943434184d3c0cc3cd5119381195c03d0f6bffc --- /dev/null +++ b/tzdriver/core/session_manager.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function declaration for session management. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef SESSION_MANAGER_H +#define SESSION_MANAGER_H + +#include +#include "tc_ns_client.h" +#include "teek_ns_client.h" + +int tc_client_session_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +int tc_ns_open_session(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context); +int tc_ns_close_session(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context); +int tc_ns_send_cmd(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context); +int tc_ns_load_image(struct tc_ns_dev_file *dev, const char *file_buffer, + struct sec_file_info *sec_file_info, struct tc_ns_client_return *tee_ret); +int tc_ns_load_image_with_lock(struct tc_ns_dev_file *dev, + const char *file_buffer, unsigned int file_size, enum secfile_type_t type); +void close_unclosed_session_in_kthread(struct tc_ns_dev_file *dev); +struct tc_ns_session *tc_find_session_by_uuid(unsigned int dev_file_id, + const struct tc_ns_smc_cmd *cmd); +struct tc_ns_service *tc_find_service_in_dev(const struct tc_ns_dev_file *dev, + const unsigned char *uuid, int uuid_size); +struct tc_ns_session *tc_find_session_withowner( + const struct list_head *session_list, unsigned int session_id, + const struct tc_ns_dev_file *dev_file); +int tc_ns_load_secfile(struct tc_ns_dev_file *dev_file, + void __user *argp, bool is_from_client_node); +int load_image(struct load_img_params *params, + struct sec_file_info *sec_file_info, struct tc_ns_client_return *tee_ret); +void get_service_struct(struct tc_ns_service *service); +void put_service_struct(struct tc_ns_service *service); +void get_session_struct(struct tc_ns_session *session); +void put_session_struct(struct tc_ns_session *session); +void dump_services_status(const char *param); +void init_srvc_list(void); +void free_all_session(void); + +#endif diff --git a/tzdriver/core/shared_mem.c b/tzdriver/core/shared_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..0e81ae47e655f9718136ebcbf341a599cc3f6e5c --- /dev/null +++ b/tzdriver/core/shared_mem.c @@ -0,0 +1,406 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "shared_mem.h" +#include +#include +#include +#include +#include +#include +#include +#include "tc_ns_log.h" +#include "tc_ns_client.h" +#include "teek_ns_client.h" +#include "smc_smp.h" +#include "internal_functions.h" +#include "mailbox_mempool.h" +#include "ko_adapt.h" + +uint64_t get_reserved_cmd_vaddr_of(phys_addr_t cmd_phys, uint64_t cmd_size) +{ + if (cmd_phys == 0 || cmd_size == 0) { + tloge("cmd phy or cmd size is error\n"); + return 0; + } + uint64_t cmd_vaddr = (uint64_t)(uintptr_t)ioremap_cache(cmd_phys, cmd_size); + if (cmd_vaddr == 0) { + tloge("io remap for reserved cmd buffer failed\n"); + return 0; + } + (void)memset_s((void *)(uintptr_t)cmd_vaddr, cmd_size, 0, cmd_size); + return cmd_vaddr; +} + +#ifdef CONFIG_SHARED_MEM_RESERVED + +#define CMD_MEM_MIN_SIZE 0x1000 +#define SPI_MEM_MIN_SIZE 0x1000 +#define OPERATION_MEM_MIN_SIZE 0x1000 +uint64_t g_cmd_mem_paddr; +uint64_t g_cmd_mem_size; +uint64_t g_mailbox_paddr; +uint64_t g_mailbox_size; +uint64_t g_log_mem_paddr; +uint64_t g_log_mem_size; +uint64_t g_spi_mem_paddr; +uint64_t g_spi_mem_size; +static mailbox_page_t *g_mailbox_page; +static uintptr_t g_shmem_start_virt; +static uintptr_t g_page_offset; + +int load_tz_shared_mem(struct device_node *np) +{ + int rc; + + rc = of_property_read_u64(np, "tz_shmem_cmd_addr", &g_cmd_mem_paddr); + if (rc != 0) { + tloge("read tz_shmem_cmd_addr failed\n"); + return -ENODEV; + } + + rc = of_property_read_u64(np, "tz_shmem_cmd_size", &g_cmd_mem_size); + if (rc != 0 || g_cmd_mem_size < CMD_MEM_MIN_SIZE) { + tloge("read tz_shmem_cmd_size failed or size too short\n"); + return -ENODEV; + } + + rc = of_property_read_u64(np, "tz_shmem_mailbox_addr", &g_mailbox_paddr); + if (rc != 0) { + tloge("read tz_shmem_mailbox_addr failed\n"); + return -ENODEV; + } + + rc = of_property_read_u64(np, "tz_shmem_mailbox_size", &g_mailbox_size); + if (rc != 0 || g_mailbox_size < MAILBOX_POOL_SIZE + OPERATION_MEM_MIN_SIZE) { + tloge("read tz_shmem_mailbox_size failed or size too short\n"); + return -ENODEV; + } + + rc = of_property_read_u64(np, "tz_shmem_spi_addr", &g_spi_mem_paddr); + if (rc != 0) { + tloge("read tz_shmem_spi_addr failed\n"); + return -ENODEV; + } + + rc = of_property_read_u64(np, "tz_shmem_spi_size", &g_spi_mem_size); + if (rc != 0 || g_spi_mem_size < SPI_MEM_MIN_SIZE) { + tloge("read tz_shmem_spi_size failed or size too short\n"); + return -ENODEV; + } + + rc = of_property_read_u64(np, "tz_shmem_log_addr", &g_log_mem_paddr); + if (rc != 0) { + tloge("read tz_shmem_log_addr failed\n"); + return -ENODEV; + } + + rc = of_property_read_u64(np, "tz_shmem_log_size", &g_log_mem_size); + if (rc != 0 || g_log_mem_size < PAGES_LOG_MEM_LEN) { + tloge("read tz_shmem_log_size failed or size too short\n"); + return -ENODEV; + } + + return 0; +} + +mailbox_page_t *mailbox_alloc_pages(int order) +{ + uint32_t i; + uint32_t page_num = 1 << (unsigned int)order; + uint32_t page_size = page_num * sizeof(mailbox_page_t); + + g_page_offset = MAILBOX_POOL_SIZE / page_num; + g_mailbox_page = kmalloc(page_size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)g_mailbox_page)) { + tloge("Failed to allocate mailbox page\n"); + return NULL; + } + + g_shmem_start_virt = (uintptr_t)ioremap_cache(g_mailbox_paddr, g_mailbox_size); + if (g_shmem_start_virt == 0) { + tloge("io remap for mailbox page failed\n"); + kfree(g_mailbox_page); + g_mailbox_page = NULL; + return NULL; + } + (void)memset_s((void *)g_shmem_start_virt, g_mailbox_size, 0, g_mailbox_size); + g_mailbox_page[0] = (mailbox_page_t)g_shmem_start_virt; + for (i = 1; i < page_num; i++) + g_mailbox_page[i] = g_mailbox_page[i - 1] + g_page_offset; + + return g_mailbox_page; +} + +void mailbox_free_pages(mailbox_page_t *pages, int order) +{ + if (!pages || pages != g_mailbox_page) + return; + + (void)order; + kfree(pages); + g_mailbox_page = NULL; +} + +uintptr_t mailbox_page_address(mailbox_page_t *page) +{ + if (!page) + return 0; + + return *page; +} + +uintptr_t mailbox_virt_to_phys(uintptr_t addr) +{ + if (addr < g_shmem_start_virt || addr > g_shmem_start_virt + g_mailbox_size) + return 0; + + return g_mailbox_paddr + (addr - g_shmem_start_virt); +} + +mailbox_page_t *mailbox_virt_to_page(uint64_t ptr) +{ + if (ptr < g_shmem_start_virt || ptr > g_shmem_start_virt + g_mailbox_size) + return 0; + + return &g_mailbox_page[(ptr - g_shmem_start_virt) / g_page_offset]; +} + +uint64_t get_operation_vaddr(void) +{ + return g_shmem_start_virt + MAILBOX_POOL_SIZE; +} + +void free_operation(uint64_t op_vaddr) +{ + (void)op_vaddr; +} + +/* + * This function only for wireless platform, CONFIG_LOG_POOL + * macro cnotrols the log retention of soft reset feature. + * Enable CONFIG_LOG_POOL macro, this function won't memset + * log pool memory, and the old log before reset can be retention. + */ +uint64_t get_log_mem_vaddr(void) +{ + uint64_t log_vaddr = (uint64_t)(uintptr_t)ioremap_cache(g_log_mem_paddr, g_log_mem_size); + if (log_vaddr == 0) { + tloge("ioremap for log buffer failed\n"); + return 0; + } +#ifndef CONFIG_LOG_POOL + (void)memset_s((void *)(uintptr_t)log_vaddr, g_log_mem_size, 0, g_log_mem_size); +#endif + + return log_vaddr; +} + +uint64_t get_log_mem_paddr(uint64_t log_vaddr) +{ + (void)log_vaddr; + return g_log_mem_paddr; +} + +uint64_t get_log_mem_size(void) +{ + return g_log_mem_size; +} + +void free_log_mem(uint64_t log_vaddr) +{ + iounmap((void __iomem*)(uintptr_t)log_vaddr); +} + +uint64_t get_cmd_mem_vaddr(void) +{ + return get_reserved_cmd_vaddr_of(g_cmd_mem_paddr, g_cmd_mem_size); +} + +uint64_t get_cmd_mem_paddr(uint64_t cmd_vaddr) +{ + (void)cmd_vaddr; + return g_cmd_mem_paddr; +} + +void free_cmd_mem(uint64_t cmd_vaddr) +{ + iounmap((void __iomem*)(uintptr_t)cmd_vaddr); +} + +uint64_t get_spi_mem_vaddr(void) +{ + uint64_t spi_vaddr = (uint64_t)(uintptr_t)ioremap_cache(g_spi_mem_paddr, g_spi_mem_size); + if (spi_vaddr == 0) { + tloge("io remap for spi buffer failed\n"); + return 0; + } + (void)memset_s((void *)(uintptr_t)spi_vaddr, g_spi_mem_size, 0, g_spi_mem_size); + return spi_vaddr; +} + +uint64_t get_spi_mem_paddr(uintptr_t spi_vaddr) +{ + (void)spi_vaddr; + return g_spi_mem_paddr; +} + +void free_spi_mem(uint64_t spi_vaddr) +{ + iounmap((void __iomem*)(uintptr_t)spi_vaddr); +} + +#else + +int load_tz_shared_mem(struct device_node *np) +{ + (void)np; + return 0; +} + +mailbox_page_t *mailbox_alloc_pages(int order) +{ + return koadpt_alloc_pages(GFP_KERNEL, order); +} + +void mailbox_free_pages(mailbox_page_t *pages, int order) +{ + if (!pages) + return; + + __free_pages(pages, order); +} + +uintptr_t mailbox_page_address(mailbox_page_t *page) +{ + if (!page) + return 0; + + return page_address(page); +} + +uintptr_t mailbox_virt_to_phys(uintptr_t addr) +{ + if (!addr) + return 0; + + return virt_to_phys(addr); +} + +mailbox_page_t *mailbox_virt_to_page(uint64_t ptr) +{ + if (!ptr) + return NULL; + + return virt_to_page(ptr); +} + +uint64_t get_operation_vaddr(void) +{ + return kzalloc(sizeof(struct tc_ns_operation), GFP_KERNEL); +} + +void free_operation(uint64_t op_vaddr) +{ + if (!op_vaddr) + return; + + kfree(op_vaddr); +} + +uint64_t get_log_mem_vaddr(void) +{ + return __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(PAGES_LOG_MEM_LEN)); +} + +uint64_t get_log_mem_paddr(uint64_t log_vaddr) +{ + if (!log_vaddr) + return 0; + + return virt_to_phys((void *)(uintptr_t)log_vaddr); +} + +uint64_t get_log_mem_size(void) +{ + return 0; +} + +void free_log_mem(uint64_t log_vaddr) +{ + if (!log_vaddr) + return; + + free_pages(log_vaddr, get_order(PAGES_LOG_MEM_LEN)); +} + +#define PAGES_BIG_SESSION_CMD_LEN 6 +uint64_t get_cmd_mem_vaddr(void) +{ +#ifdef CONFIG_BIG_SESSION + /* we should map at least 64 pages for 1000 sessions, 2^6 > 40 */ + return (uint64_t)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PAGES_BIG_SESSION_CMD_LEN); +#else + return (uint64_t)__get_free_page(GFP_KERNEL | __GFP_ZERO); +#endif +} + +uint64_t get_cmd_mem_paddr(uint64_t cmd_vaddr) +{ + if (!cmd_vaddr) + return 0; + + return virt_to_phys((void *)(uintptr_t)cmd_vaddr); +} + +void free_cmd_mem(uint64_t cmd_vaddr) +{ + if (!cmd_vaddr) + return; + +#ifdef CONFIG_BIG_SESSION + free_pages(cmd_vaddr, PAGES_BIG_SESSION_CMD_LEN); +#else + free_page(cmd_vaddr); +#endif +} + +uint64_t get_spi_mem_vaddr(void) +{ +#ifdef CONFIG_BIG_SESSION + /* we should map at least 3 pages for 100 sessions, 2^2 > 3 */ + return (uint64_t)__get_free_pages(GFP_KERNEL | __GFP_ZERO, CONFIG_NOTIFY_PAGE_ORDER); +#else + return (uint64_t)__get_free_page(GFP_KERNEL | __GFP_ZERO); +#endif +} + +uint64_t get_spi_mem_paddr(uintptr_t spi_vaddr) +{ + if (spi_vaddr == 0) + return 0; + + return virt_to_phys((void *)spi_vaddr); +} + +void free_spi_mem(uint64_t spi_vaddr) +{ + if (!spi_vaddr) + return; + +#ifdef CONFIG_BIG_SESSION + free_pages(spi_vaddr, CONFIG_NOTIFY_PAGE_ORDER); +#else + free_page(spi_vaddr); +#endif +} +#endif diff --git a/tzdriver/core/shared_mem.h b/tzdriver/core/shared_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..4b6afb766000b99fbbd4a81cf5c5df236e2c9384 --- /dev/null +++ b/tzdriver/core/shared_mem.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef SHARED_MEM_H +#define SHARED_MEM_H + +#include +#include + +#ifdef CONFIG_512K_LOG_PAGES_MEM +#define PAGES_LOG_MEM_LEN (512 * SZ_1K) /* mem size: 512 k */ +#else +#define PAGES_LOG_MEM_LEN (256 * SZ_1K) /* mem size: 256 k */ +#endif + +#ifndef CONFIG_SHARED_MEM_RESERVED +typedef struct page mailbox_page_t; +#else +typedef uintptr_t mailbox_page_t; +#endif + +uint64_t get_reserved_cmd_vaddr_of(phys_addr_t cmd_phys, uint64_t cmd_size); +int load_tz_shared_mem(struct device_node *np); + +mailbox_page_t *mailbox_alloc_pages(int order); +void mailbox_free_pages(mailbox_page_t *pages, int order); +uintptr_t mailbox_page_address(mailbox_page_t *page); +mailbox_page_t *mailbox_virt_to_page(uint64_t ptr); +uint64_t get_operation_vaddr(void); +void free_operation(uint64_t op_vaddr); + +uint64_t get_log_mem_vaddr(void); +uint64_t get_log_mem_paddr(uint64_t log_vaddr); +uint64_t get_log_mem_size(void); +void free_log_mem(uint64_t log_vaddr); + +uint64_t get_cmd_mem_vaddr(void); +uint64_t get_cmd_mem_paddr(uint64_t cmd_vaddr); +void free_cmd_mem(uint64_t cmd_vaddr); + +uint64_t get_spi_mem_vaddr(void); +uint64_t get_spi_mem_paddr(uintptr_t spi_vaddr); +void free_spi_mem(uint64_t spi_vaddr); +#endif diff --git a/tzdriver/core/smc_abi.c b/tzdriver/core/smc_abi.c new file mode 100644 index 0000000000000000000000000000000000000000..2fab1bebd58161015331db5072e13f584bed3761 --- /dev/null +++ b/tzdriver/core/smc_abi.c @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include "smc_call.h" +#include "smc_smp.h" +#include "teek_ns_client.h" +#include "smc_abi.h" + +#ifndef CONFIG_ARCH32 +void do_smc_transport(struct smc_in_params *in, struct smc_out_params *out, uint8_t wait) +{ + isb(); + wmb(); + do { + asm volatile( + "mov x0, %[fid]\n" + "mov x1, %[a1]\n" + "mov x2, %[a2]\n" + "mov x3, %[a3]\n" + "mov x4, %[a4]\n" + "mov x5, %[a5]\n" + "mov x6, %[a6]\n" + "mov x7, %[a7]\n" + SMCCC_SMC_INST"\n" + "str x0, [%[re0]]\n" + "str x1, [%[re1]]\n" + "str x2, [%[re2]]\n" + "str x3, [%[re3]]\n" : + [fid] "+r"(in->x0), + [a1] "+r"(in->x1), + [a2] "+r"(in->x2), + [a3] "+r"(in->x3), + [a4] "+r"(in->x4), + [a5] "+r"(in->x5), + [a6] "+r"(in->x6), + [a7] "+r"(in->x7): + [re0] "r"(&out->ret), + [re1] "r"(&out->exit_reason), + [re2] "r"(&out->ta), + [re3] "r"(&out->target) : + "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7"); + } while (out->ret == TSP_REQUEST && wait != 0); + isb(); + wmb(); +} +#else +void do_smc_transport(struct smc_in_params *in, struct smc_out_params *out, uint8_t wait) +{ + isb(); + wmb(); + do { + asm volatile( + "mov r0, %[fid]\n" + "mov r1, %[a1]\n" + "mov r2, %[a2]\n" + "mov r3, %[a3]\n" + ".arch_extension sec\n" + SMCCC_SMC_INST"\n" + "str r0, [%[re0]]\n" + "str r1, [%[re1]]\n" + "str r2, [%[re2]]\n" + "str r3, [%[re3]]\n" : + [fid] "+r"(in->x0), + [a1] "+r"(in->x1), + [a2] "+r"(in->x2), + [a3] "+r"(in->x3): + [re0] "r"(&out->ret), + [re1] "r"(&out->exit_reason), + [re2] "r"(&out->ta), + [re3] "r"(&out->target) : + "r0", "r1", "r2", "r3"); + } while (out->ret == TSP_REQUEST && wait != 0); + isb(); + wmb(); +} +#endif + +#ifdef CONFIG_THIRDPARTY_COMPATIBLE +static void fix_params_offset(struct smc_out_params *out_param) +{ + out_param->target = out_param->ta; + out_param->ta = out_param->exit_reason; + out_param->exit_reason = out_param->ret; + out_param->ret = TSP_RESPONSE; + if (out_param->exit_reason == TEE_EXIT_REASON_CRASH) { + union crash_inf temp_info; + temp_info.crash_reg[0] = out_param->ta; + temp_info.crash_reg[1] = 0; + temp_info.crash_reg[2] = out_param->target; + temp_info.crash_msg.far = temp_info.crash_msg.elr; + temp_info.crash_msg.elr = 0; + out_param->ret = TSP_CRASH; + out_param->exit_reason = temp_info.crash_reg[0]; + out_param->ta = temp_info.crash_reg[1]; + out_param->target = temp_info.crash_reg[2]; + } +} +#endif + +void smc_req(struct smc_in_params *in, struct smc_out_params *out, uint8_t wait) +{ + do_smc_transport(in, out, wait); +#ifdef CONFIG_THIRDPARTY_COMPATIBLE + fix_params_offset(out); +#endif +} diff --git a/tzdriver/core/smc_abi.h b/tzdriver/core/smc_abi.h new file mode 100644 index 0000000000000000000000000000000000000000..bf0bb2841ed01fdf693827d7915825d2eddb7eaa --- /dev/null +++ b/tzdriver/core/smc_abi.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef SMC_ABI_H +#define SMC_ABI_H + +#include "smc_call.h" +#define TEE_EXIT_REASON_CRASH 0x4 +void do_smc_transport(struct smc_in_params *in, struct smc_out_params *out, uint8_t wait); +#endif diff --git a/tzdriver/core/smc_call.h b/tzdriver/core/smc_call.h new file mode 100644 index 0000000000000000000000000000000000000000..9401a29a53e1ce6b71b8e49d99e9474165d1a235 --- /dev/null +++ b/tzdriver/core/smc_call.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef SMC_CALL_H +#define SMC_CALL_H + +#include + +struct smc_in_params { + unsigned long x0; + unsigned long x1; + unsigned long x2; + unsigned long x3; + unsigned long x4; + unsigned long x5; + unsigned long x6; + unsigned long x7; +}; + +struct smc_out_params { + unsigned long ret; + unsigned long exit_reason; + unsigned long ta; + unsigned long target; +}; + +void smc_req(struct smc_in_params *in, struct smc_out_params *out, uint8_t wait); + +#endif diff --git a/tzdriver/core/smc_smp.c b/tzdriver/core/smc_smp.c new file mode 100644 index 0000000000000000000000000000000000000000..31f7578495809de7451a7c3fcbc430cbbf556eed --- /dev/null +++ b/tzdriver/core/smc_smp.c @@ -0,0 +1,2128 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function for sending smc cmd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "smc_smp.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_SCHED_SMT_EXPELLING +#include +#endif + +#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE) +#include +#include +#endif +#include +#include + +#ifdef CONFIG_TEE_AUDIT +#include +#endif + +#ifdef CONFIG_TEE_LOG_EXCEPTION +#include +#define IMONITOR_TA_CRASH_EVENT_ID 901002003 +#endif + +#include "tc_ns_log.h" +#include "teek_client_constants.h" +#include "tc_ns_client.h" +#include "agent.h" +#include "teek_ns_client.h" +#include "mailbox_mempool.h" +#include "cmdmonitor.h" +#include "tlogger.h" +#include "ko_adapt.h" +#include "log_cfg_api.h" +#include "tee_compat_check.h" +#include "secs_power_ctrl.h" +#include "shared_mem.h" +#include "tui.h" +#include "internal_functions.h" +#ifdef CONFIG_SMC_HOOK +#include "smc_hook.h" +#endif +#include "smc_call.h" + +#define PREEMPT_COUNT 10000 +#define HZ_COUNT 10 +#define IDLED_COUNT 100 +/* + * when cannot find smc entry, will sleep 1ms + * because the task will be killed in 25s if it not return, + * so the retry count is 25s/1ms + */ +#define FIND_SMC_ENTRY_SLEEP 1 +#define FIND_SMC_ENTRY_RETRY_MAX_COUNT (CMD_MAX_EXECUTE_TIME * S_TO_MS / FIND_SMC_ENTRY_SLEEP) + +#define CPU_ZERO 0 +#define CPU_ONE 1 +#define CPU_FOUR 4 +#define CPU_FIVE 5 +#define CPU_SIX 6 +#define CPU_SEVEN 7 +#define LOW_BYTE 0xF + +#define PENDING2_RETRY (-1) + +#define RETRY_WITH_PM 1 +#define CLEAN_WITHOUT_PM 2 + +#define MAX_CHAR 0xff + +#define MAX_SIQ_NUM 4 + +/* Current state of the system */ +static bool g_sys_crash; + +struct shadow_work { + struct kthread_work kthwork; + struct work_struct work; + uint64_t target; +}; + +unsigned long g_shadow_thread_id = 0; +static struct task_struct *g_siq_thread; +static struct task_struct *g_smc_svc_thread; +static struct task_struct *g_ipi_helper_thread; +static DEFINE_KTHREAD_WORKER(g_ipi_helper_worker); + +enum cmd_reuse { + CLEAR, /* clear this cmd index */ + RESEND, /* use this cmd index resend */ +}; + +struct cmd_reuse_info { + int cmd_index; + int saved_index; + enum cmd_reuse cmd_usage; +}; + +#if (CONFIG_CPU_AFF_NR != 0) +static struct cpumask g_cpu_mask; +static int g_mask_flag = 0; +#endif + +#ifdef CONFIG_DRM_ADAPT +static struct cpumask g_drm_cpu_mask; +static int g_drm_mask_flag = 0; +#endif + +struct tc_ns_smc_queue *g_cmd_data; +phys_addr_t g_cmd_phys; + +static struct list_head g_pending_head; +static spinlock_t g_pend_lock; + +static DECLARE_WAIT_QUEUE_HEAD(siq_th_wait); +static DECLARE_WAIT_QUEUE_HEAD(ipi_th_wait); +static atomic_t g_siq_th_run; +static uint32_t g_siq_queue[MAX_SIQ_NUM]; +DEFINE_MUTEX(g_siq_lock); + +enum smc_ops_exit { + SMC_OPS_NORMAL = 0x0, + SMC_OPS_SCHEDTO = 0x1, + SMC_OPS_START_SHADOW = 0x2, + SMC_OPS_START_FIQSHD = 0x3, + SMC_OPS_PROBE_ALIVE = 0x4, + SMC_OPS_ABORT_TASK = 0x5, + SMC_EXIT_NORMAL = 0x0, + SMC_EXIT_PREEMPTED = 0x1, + SMC_EXIT_SHADOW = 0x2, + SMC_EXIT_ABORT = 0x3, +#ifdef CONFIG_THIRDPARTY_COMPATIBLE + SMC_EXIT_CRASH = 0x4, + SMC_EXIT_MAX = 0x5, +#else + SMC_EXIT_MAX = 0x4, +#endif +}; + +#define SHADOW_EXIT_RUN 0x1234dead +#define SMC_EXIT_TARGET_SHADOW_EXIT 0x1 + +#define compile_time_assert(cond, msg) typedef char g_assert_##msg[(cond) ? 1 : -1] + +#ifndef CONFIG_BIG_SESSION +compile_time_assert(sizeof(struct tc_ns_smc_queue) <= PAGE_SIZE, + size_of_tc_ns_smc_queue_too_large); +#endif + +static bool g_reserved_cmd_buffer = false; +static u64 g_cmd_size = 0; +static bool g_tz_uefi_enable = false; + +static int __init tz_check_uefi_enable_func(char *str) +{ + if (str != NULL && *str == '1') + g_tz_uefi_enable = true; + + return 0; +} +early_param("tz_uefi_enable", tz_check_uefi_enable_func); + +#define MIN_CMDLINE_SIZE 0x1000 +static int reserved_cmdline(struct reserved_mem *rmem) +{ + if (g_tz_uefi_enable && rmem && rmem->size >= MIN_CMDLINE_SIZE) { + g_cmd_phys = rmem->base; + g_cmd_size = rmem->size; + g_reserved_cmd_buffer = true; + } else { + g_reserved_cmd_buffer = false; + } + + return 0; +} +RESERVEDMEM_OF_DECLARE(g_teeos_cmdline, "teeos-cmdline", reserved_cmdline); + +static void acquire_smc_buf_lock(smc_buf_lock_t *lock) +{ + int ret; + + preempt_disable(); + do + ret = (int)cmpxchg(lock, 0, 1); + while (ret != 0); +} + +static inline void release_smc_buf_lock(smc_buf_lock_t *lock) +{ + (void)cmpxchg(lock, 1, 0); + preempt_enable(); +} + +static void occupy_setbit_smc_in_doing_entry(int32_t i, int32_t *idx) +{ + g_cmd_data->in[i].event_nr = (unsigned int)i; + isb(); + wmb(); + set_bit((unsigned int)i, (unsigned long *)g_cmd_data->in_bitmap); + set_bit((unsigned int)i, (unsigned long *)g_cmd_data->doing_bitmap); + *idx = i; +} + +static int occupy_free_smc_in_entry(const struct tc_ns_smc_cmd *cmd) +{ + int idx = -1; + int i; + uint32_t retry_count = 0; + + if (!cmd) { + tloge("bad parameters! cmd is NULL\n"); + return -1; + } + /* + * Note: + * acquire_smc_buf_lock will disable preempt and kernel will forbid + * call mutex_lock in preempt disabled scenes. + * To avoid such case(update_timestamp and update_chksum will call + * mutex_lock), only cmd copy is done when preempt is disable, + * then do update_timestamp and update_chksum. + * As soon as this idx of in_bitmap is set, gtask will see this + * cmd_in, but the cmd_in is not ready that lack of update_xxx, + * so we make a tricky here, set doing_bitmap and in_bitmap both + * at first, after update_xxx is done, clear doing_bitmap. + */ +get_smc_retry: + acquire_smc_buf_lock(&g_cmd_data->smc_lock); + for (i = 0; i < MAX_SMC_CMD; i++) { + if (test_bit(i, (unsigned long *)g_cmd_data->in_bitmap) != 0) + continue; + if (memcpy_s(&g_cmd_data->in[i], sizeof(g_cmd_data->in[i]), + cmd, sizeof(*cmd)) != EOK) { + tloge("memcpy failed,%s line:%d", __func__, __LINE__); + break; + } + occupy_setbit_smc_in_doing_entry(i, &idx); + break; + } + release_smc_buf_lock(&g_cmd_data->smc_lock); + if (idx == -1) { + if (retry_count <= FIND_SMC_ENTRY_RETRY_MAX_COUNT) { + msleep(FIND_SMC_ENTRY_SLEEP); + retry_count++; + tlogd("can't get any free smc entry and retry:%u\n", retry_count); + goto get_smc_retry; + } + tloge("can't get any free smc entry after retry:%u\n", retry_count); + return -1; + } + + acquire_smc_buf_lock(&g_cmd_data->smc_lock); + isb(); + wmb(); + clear_bit((uint32_t)idx, (unsigned long *)g_cmd_data->doing_bitmap); + release_smc_buf_lock(&g_cmd_data->smc_lock); + return idx; +} + +static int reuse_smc_in_entry(uint32_t idx) +{ + int rc = 0; + + acquire_smc_buf_lock(&g_cmd_data->smc_lock); + if (!(test_bit((int32_t)idx, (unsigned long *)g_cmd_data->in_bitmap) != 0 && + test_bit((int32_t)idx, (unsigned long *)g_cmd_data->doing_bitmap) != 0)) { + tloge("invalid cmd to reuse\n"); + rc = -1; + goto out; + } + if (memcpy_s(&g_cmd_data->in[idx], sizeof(g_cmd_data->in[idx]), + &g_cmd_data->out[idx], sizeof(g_cmd_data->out[idx])) != EOK) { + tloge("memcpy failed,%s line:%d", __func__, __LINE__); + rc = -1; + goto out; + } + + isb(); + wmb(); + clear_bit(idx, (unsigned long *)g_cmd_data->doing_bitmap); +out: + release_smc_buf_lock(&g_cmd_data->smc_lock); + return rc; +} + +static int copy_smc_out_entry(uint32_t idx, struct tc_ns_smc_cmd *copy, + enum cmd_reuse *usage) +{ + acquire_smc_buf_lock(&g_cmd_data->smc_lock); + if (test_bit((int)idx, (unsigned long *)g_cmd_data->out_bitmap) == 0) { + tloge("cmd out %u is not ready\n", idx); + release_smc_buf_lock(&g_cmd_data->smc_lock); + show_cmd_bitmap(); + return -ENOENT; + } + if (memcpy_s(copy, sizeof(*copy), &g_cmd_data->out[idx], + sizeof(g_cmd_data->out[idx])) != EOK) { + tloge("copy smc out failed\n"); + release_smc_buf_lock(&g_cmd_data->smc_lock); + return -EFAULT; + } + + isb(); + wmb(); + if (g_cmd_data->out[idx].ret_val == (int)TEEC_PENDING2 || + g_cmd_data->out[idx].ret_val == (int)TEEC_PENDING) { + *usage = RESEND; + } else { + clear_bit(idx, (unsigned long *)g_cmd_data->in_bitmap); + clear_bit(idx, (unsigned long *)g_cmd_data->doing_bitmap); + *usage = CLEAR; + } + clear_bit(idx, (unsigned long *)g_cmd_data->out_bitmap); + release_smc_buf_lock(&g_cmd_data->smc_lock); + + return 0; +} + +static inline void clear_smc_in_entry(uint32_t idx) +{ + acquire_smc_buf_lock(&g_cmd_data->smc_lock); + clear_bit(idx, (unsigned long *)g_cmd_data->in_bitmap); + release_smc_buf_lock(&g_cmd_data->smc_lock); +} + +static void release_smc_entry(uint32_t idx) +{ + acquire_smc_buf_lock(&g_cmd_data->smc_lock); + clear_bit(idx, (unsigned long *)g_cmd_data->in_bitmap); + clear_bit(idx, (unsigned long *)g_cmd_data->doing_bitmap); + clear_bit(idx, (unsigned long *)g_cmd_data->out_bitmap); + release_smc_buf_lock(&g_cmd_data->smc_lock); +} + +static bool is_cmd_working_done(uint32_t idx) +{ + bool ret = false; + + acquire_smc_buf_lock(&g_cmd_data->smc_lock); + if (test_bit((int)idx, (unsigned long *)g_cmd_data->out_bitmap) != 0) + ret = true; + release_smc_buf_lock(&g_cmd_data->smc_lock); + return ret; +} + +void occupy_clean_cmd_buf(void) +{ + acquire_smc_buf_lock(&g_cmd_data->smc_lock); + memset_s(g_cmd_data, sizeof(struct tc_ns_smc_queue), 0, sizeof(struct tc_ns_smc_queue)); + release_smc_buf_lock(&g_cmd_data->smc_lock); +} + +static void show_in_bitmap(int *cmd_in, uint32_t len) +{ + uint32_t idx; + uint32_t in = 0; + char bitmap[MAX_SMC_CMD + 1]; + + if (len != MAX_SMC_CMD || !g_cmd_data) + return; + + for (idx = 0; idx < MAX_SMC_CMD; idx++) { + if (test_bit((int32_t)idx, (unsigned long *)g_cmd_data->in_bitmap) != 0) { + bitmap[idx] = '1'; + cmd_in[in++] = (int)idx; + } else { + bitmap[idx] = '0'; + } + } + bitmap[MAX_SMC_CMD] = '\0'; + tloge("in bitmap: %s\n", bitmap); +} + +static void show_out_bitmap(int *cmd_out, uint32_t len) +{ + uint32_t idx; + uint32_t out = 0; + char bitmap[MAX_SMC_CMD + 1]; + + if (len != MAX_SMC_CMD || !g_cmd_data) + return; + + for (idx = 0; idx < MAX_SMC_CMD; idx++) { + if (test_bit((int32_t)idx, (unsigned long *)g_cmd_data->out_bitmap) != 0) { + bitmap[idx] = '1'; + cmd_out[out++] = (int)idx; + } else { + bitmap[idx] = '0'; + } + } + bitmap[MAX_SMC_CMD] = '\0'; + tloge("out bitmap: %s\n", bitmap); +} + +static void show_doing_bitmap(void) +{ + uint32_t idx; + char bitmap[MAX_SMC_CMD + 1]; + + if (!g_cmd_data) + return; + for (idx = 0; idx < MAX_SMC_CMD; idx++) { + if (test_bit((int)idx, (unsigned long *)g_cmd_data->doing_bitmap) != 0) + bitmap[idx] = '1'; + else + bitmap[idx] = '0'; + } + bitmap[MAX_SMC_CMD] = '\0'; + tloge("doing bitmap: %s\n", bitmap); +} + +static void show_single_cmd_info(const int *cmd, uint32_t len) +{ + uint32_t idx; + + if (len != MAX_SMC_CMD || !g_cmd_data) + return; + + for (idx = 0; idx < MAX_SMC_CMD; idx++) { + if (cmd[idx] == -1) + break; + tloge("cmd[%d]: cmd_id=%u, ca_pid=%u, dev_id = 0x%x, " + "event_nr=%u, ret_val=0x%x\n", + cmd[idx], + g_cmd_data->in[cmd[idx]].cmd_id, + g_cmd_data->in[cmd[idx]].ca_pid, + g_cmd_data->in[cmd[idx]].dev_file_id, + g_cmd_data->in[cmd[idx]].event_nr, + g_cmd_data->in[cmd[idx]].ret_val); + } +} + +void show_cmd_bitmap(void) +{ + int *cmd_in = NULL; + int *cmd_out = NULL; + + cmd_in = kzalloc(sizeof(int) * MAX_SMC_CMD, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)cmd_in)) { + tloge("out of mem! cannot show in bitmap\n"); + return; + } + + cmd_out = kzalloc(sizeof(int) * MAX_SMC_CMD, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)cmd_out)) { + kfree(cmd_in); + tloge("out of mem! cannot show out bitmap\n"); + return; + } + + if (memset_s(cmd_in, sizeof(int)* MAX_SMC_CMD, MAX_CHAR, sizeof(int)* MAX_SMC_CMD) != 0 || + memset_s(cmd_out, sizeof(int)* MAX_SMC_CMD, MAX_CHAR, sizeof(int)* MAX_SMC_CMD) != 0) { + tloge("memset failed\n"); + goto error; + } + + acquire_smc_buf_lock(&g_cmd_data->smc_lock); + + show_in_bitmap(cmd_in, MAX_SMC_CMD); + show_doing_bitmap(); + show_out_bitmap(cmd_out, MAX_SMC_CMD); + + tloge("cmd in value:\n"); + show_single_cmd_info(cmd_in, MAX_SMC_CMD); + + tloge("cmd_out value:\n"); + show_single_cmd_info(cmd_out, MAX_SMC_CMD); + + release_smc_buf_lock(&g_cmd_data->smc_lock); + +error: + kfree(cmd_in); + kfree(cmd_out); +} + +static struct pending_entry *init_pending_entry(void) +{ + struct pending_entry *pe = NULL; + + pe = kzalloc(sizeof(*pe), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)pe)) { + tloge("alloc pe failed\n"); + return NULL; + } + + atomic_set(&pe->users, 1); + get_task_struct(current); + pe->task = current; + +#ifdef CONFIG_TA_AFFINITY + cpumask_copy(&pe->ca_mask, CURRENT_CPUS_ALLOWED); + cpumask_copy(&pe->ta_mask, CURRENT_CPUS_ALLOWED); +#endif + + init_waitqueue_head(&pe->wq); + atomic_set(&pe->run, 0); + INIT_LIST_HEAD(&pe->list); + spin_lock(&g_pend_lock); + list_add_tail(&pe->list, &g_pending_head); + spin_unlock(&g_pend_lock); + + return pe; +} + +struct pending_entry *find_pending_entry(pid_t pid) +{ + struct pending_entry *pe = NULL; + + spin_lock(&g_pend_lock); + list_for_each_entry(pe, &g_pending_head, list) { + if (pe->task->pid == pid) { + atomic_inc(&pe->users); + spin_unlock(&g_pend_lock); + return pe; + } + } + spin_unlock(&g_pend_lock); + return NULL; +} + +void foreach_pending_entry(void (*func)(struct pending_entry *)) +{ + struct pending_entry *pe = NULL; + + if (!func) + return; + + spin_lock(&g_pend_lock); + list_for_each_entry(pe, &g_pending_head, list) { + func(pe); + } + spin_unlock(&g_pend_lock); +} + +void put_pending_entry(struct pending_entry *pe) +{ + if (!pe) + return; + + if (!atomic_dec_and_test(&pe->users)) + return; + + put_task_struct(pe->task); + kfree(pe); +} + +#ifdef CONFIG_TA_AFFINITY +static void restore_cpu_mask(struct pending_entry *pe) +{ + if (cpumask_equal(&pe->ca_mask, &pe->ta_mask)) + return; + + set_cpus_allowed_ptr(current, &pe->ca_mask); +} +#endif + +static void release_pending_entry(struct pending_entry *pe) +{ +#ifdef CONFIG_TA_AFFINITY + restore_cpu_mask(pe); +#endif + spin_lock(&g_pend_lock); + list_del(&pe->list); + spin_unlock(&g_pend_lock); + put_pending_entry(pe); +} + +static inline bool is_shadow_exit(uint64_t target) +{ + return target & SMC_EXIT_TARGET_SHADOW_EXIT; +} + +/* + * check ca and ta's affinity is match in 2 scene: + * 1. when TA is blocked to REE + * 2. when CA is wakeup by SPI wakeup + * match_ta_affinity return true if affinity is changed + */ +#ifdef CONFIG_TA_AFFINITY +static bool match_ta_affinity(struct pending_entry *pe) +{ + if (!cpumask_equal(CURRENT_CPUS_ALLOWED, &pe->ta_mask)) { + if (set_cpus_allowed_ptr(current, &pe->ta_mask)) { + tlogw("set %s affinity failed\n", current->comm); + return false; + } + return true; + } + + return false; +} +#else +static inline bool match_ta_affinity(struct pending_entry *pe) +{ + (void)pe; + return false; +} +#endif + +struct smc_cmd_ret { + unsigned long exit; + unsigned long ta; + unsigned long target; +}; + +bool sigkill_pending(struct task_struct *tsk) +{ + bool flag = false; + + if (!tsk) { + tloge("tsk is null!\n"); + return false; + } + + flag = (sigismember(&tsk->pending.signal, SIGKILL) != 0) || + (sigismember(&tsk->pending.signal, SIGUSR1) != 0); + + if (tsk->signal) + return flag || sigismember(&tsk->signal->shared_pending.signal, + SIGKILL); + return flag; +} + +#if (CONFIG_CPU_AFF_NR != 0) +static void set_cpu_strategy(struct cpumask *old_mask) +{ + unsigned int i; + + if (g_mask_flag == 0) { + cpumask_clear(&g_cpu_mask); + for (i = 0; i < CONFIG_CPU_AFF_NR; i++) + cpumask_set_cpu(i, &g_cpu_mask); + g_mask_flag = 1; + } + cpumask_copy(old_mask, CURRENT_CPUS_ALLOWED); + set_cpus_allowed_ptr(current, &g_cpu_mask); +} +#endif + +#if (CONFIG_CPU_AFF_NR != 0) +static void restore_cpu(struct cpumask *old_mask) +{ + /* current equal old means no set cpu affinity, no need to restore */ + if (cpumask_equal(CURRENT_CPUS_ALLOWED, old_mask)) + return; + + set_cpus_allowed_ptr(current, old_mask); + schedule(); +} +#endif + +static bool is_ready_to_kill(bool need_kill) +{ + return (need_kill && sigkill_pending(current) && + is_thread_reported(current->pid)); +} + +static void set_smc_send_arg(struct smc_in_params *in_param, + const struct smc_cmd_ret *secret, unsigned long ops) +{ + if (secret->exit == SMC_EXIT_PREEMPTED) { + in_param->x1 = SMC_OPS_SCHEDTO; + in_param->x3 = secret->ta; + in_param->x4 = secret->target; + } + + if (ops == SMC_OPS_SCHEDTO || ops == SMC_OPS_START_FIQSHD) + in_param->x4 = secret->target; + + tlogd("[cpu %d]begin send x0=%lx x1=%lx x2=%lx x3=%lx x4=%lx\n", + raw_smp_processor_id(), in_param->x0, in_param->x1, + in_param->x2, in_param->x3, in_param->x4); +} + +static void send_asm_smc_cmd(struct smc_in_params *in_param, struct smc_out_params *out_param) +{ + smc_req(in_param, out_param, 0); +} + +#ifdef CONFIG_TEE_REBOOT +int send_smc_cmd_rebooting(uint32_t cmd_id, phys_addr_t cmd_addr, uint32_t cmd_type, const struct tc_ns_smc_cmd *in_cmd) +{ + struct tc_ns_smc_cmd cmd = { {0}, 0 }; + struct smc_in_params in_param = {cmd_id, cmd_addr, cmd_type, cmd_addr >> ADDR_TRANS_NUM, TEE_ERROR_IS_DEAD}; + struct smc_out_params out_param = {0}; + + if (in_cmd != NULL) { + if (memcpy_s(&cmd, sizeof(cmd), in_cmd, sizeof(*in_cmd)) != EOK) { + tloge("memcpy in cmd failed\n"); + return -EFAULT; + } + if (occupy_free_smc_in_entry(&cmd) == -1) { + tloge("there's no more smc entry\n"); + return -ENOMEM; + } + } +retry: + isb(); + wmb(); + send_asm_smc_cmd(&in_param, &out_param); + isb(); + wmb(); + if (out_param.exit_reason == SMC_EXIT_PREEMPTED) + goto retry; + + return out_param.exit_reason; +} +#else +int send_smc_cmd_rebooting(uint32_t cmd_id, phys_addr_t cmd_addr, uint32_t cmd_type, const struct tc_ns_smc_cmd *in_cmd) +{ + (void)cmd_id; + (void)cmd_addr; + (void)cmd_type; + (void)in_cmd; + return 0; +} +#endif + +static noinline int smp_smc_send(uint32_t cmd, unsigned long ops, unsigned long ca, + struct smc_cmd_ret *secret, bool need_kill) +{ + struct smc_in_params in_param = { cmd, ops, ca, 0, 0 }; + struct smc_out_params out_param = {0}; +#if (CONFIG_CPU_AFF_NR != 0) + struct cpumask old_mask; +#endif + +#if (CONFIG_CPU_AFF_NR != 0) + set_cpu_strategy(&old_mask); +#endif +retry: + set_smc_send_arg(&in_param, secret, ops); + tee_trace_add_event(SMC_SEND, 0); + send_asm_smc_cmd(&in_param, &out_param); + tee_trace_add_event(SMC_DONE, 0); + tlogd("[cpu %d] return val %lx exit_reason %lx ta %lx targ %lx\n", + raw_smp_processor_id(), out_param.ret, out_param.exit_reason, + out_param.ta, out_param.target); + + secret->exit = out_param.exit_reason; + secret->ta = out_param.ta; + secret->target = out_param.target; + + if (out_param.exit_reason == SMC_EXIT_PREEMPTED) { + /* + * There's 2 ways to send a terminate cmd to kill a running TA, + * in current context or another. If send terminate in another + * context, may encounter concurrency problem, as terminate cmd + * is send but not process, the original cmd has finished. + * So we send the terminate cmd in current context. + */ + if (is_ready_to_kill(need_kill)) { + secret->exit = SMC_EXIT_ABORT; + tloge("receive kill signal\n"); + } else { +#if (!defined(CONFIG_PREEMPT)) || defined(CONFIG_RTOS_PREEMPT_OFF) + /* yield cpu to avoid soft lockup */ + cond_resched(); +#endif + goto retry; + } + } +#if (CONFIG_CPU_AFF_NR != 0) + restore_cpu(&old_mask); +#endif + return (int)out_param.ret; +} + +static uint64_t send_smc_cmd(uint32_t cmd, phys_addr_t cmd_addr, uint32_t cmd_type, uint8_t wait) +{ + uint64_t ret = 0; + struct smc_in_params in_param = { cmd, cmd_addr, cmd_type, cmd_addr >> ADDR_TRANS_NUM }; + struct smc_out_params out_param = { ret }; +#ifdef CONFIG_THIRDPARTY_COMPATIBLE + if (g_sys_crash) { + out_param.ret = TSP_CRASH; + return out_param.ret; + } +#endif + smc_req(&in_param, &out_param, wait); + ret = out_param.ret; + return ret; +} + +unsigned long raw_smc_send(uint32_t cmd, phys_addr_t cmd_addr, + uint32_t cmd_type, uint8_t wait) +{ + unsigned long x0; + +#if (CONFIG_CPU_AFF_NR != 0) + struct cpumask old_mask; + set_cpu_strategy(&old_mask); +#endif + + x0 = send_smc_cmd(cmd, cmd_addr, cmd_type, wait); + +#if (CONFIG_CPU_AFF_NR != 0) + restore_cpu(&old_mask); +#endif + return x0; +} + +static void siq_dump(phys_addr_t mode, uint32_t siq_mode) +{ + int ret = raw_smc_send(TSP_REE_SIQ, mode, 0, false); + if (ret == TSP_CRASH) { + tloge("TEEOS has crashed!\n"); + g_sys_crash = true; + cmd_monitor_ta_crash(TYPE_CRASH_TEE, NULL, 0); + } + + if (siq_mode == SIQ_DUMP_TIMEOUT) { + tz_log_write(); + } else if (siq_mode == SIQ_DUMP_SHELL) { +#ifdef CONFIG_TEE_LOG_DUMP_PATH + (void)tlogger_store_msg(CONFIG_TEE_LOG_DUMP_PATH, + sizeof(CONFIG_TEE_LOG_DUMP_PATH)); +#else + tz_log_write(); +#endif + } + do_cmd_need_archivelog(); +} + +static uint32_t get_free_siq_index(void) +{ + uint32_t i; + + for (i = 0; i < MAX_SIQ_NUM; i++) { + if (g_siq_queue[i] == 0) + return i; + } + + return MAX_SIQ_NUM; +} + +static uint32_t get_undo_siq_index(void) +{ + uint32_t i; + + for (i = 0; i < MAX_SIQ_NUM; i++) { + if (g_siq_queue[i] != 0) + return i; + } + + return MAX_SIQ_NUM; +} + +#define RUN_SIQ_THREAD 1 +#define STOP_SIQ_THREAD 2 +static int siq_thread_fn(void *arg) +{ + int ret; + uint32_t i; + (void)arg; + + while (true) { + ret = (int)wait_event_interruptible(siq_th_wait, + atomic_read(&g_siq_th_run)); + if (ret != 0) { + tloge("wait event interruptible failed!\n"); + return -EINTR; + } + if (atomic_read(&g_siq_th_run) == STOP_SIQ_THREAD) + return 0; + + mutex_lock(&g_siq_lock); + do { + i = get_undo_siq_index(); + if (i >= MAX_SIQ_NUM) + break; + siq_dump((phys_addr_t)(1), g_siq_queue[i]); + g_siq_queue[i] = 0; + } while (true); + atomic_set(&g_siq_th_run, 0); + mutex_unlock(&g_siq_lock); + } +} + +#ifdef CONFIG_TEE_AUDIT +#define MAX_UPLOAD_INFO_LEN 4 +#define INFO_HIGH_OFFSET 24U +#define INFO_MID_OFFSET 16U +#define INFO_LOW_OFFSET 8U + +static void upload_audit_event(unsigned int eventindex) +{ +#ifdef CONFIG_HW_KERNEL_STP + struct stp_item item; + int ret; + char att_info[MAX_UPLOAD_INFO_LEN + 1] = {0}; + + att_info[0] = (unsigned char)(eventindex >> INFO_HIGH_OFFSET); + att_info[1] = (unsigned char)(eventindex >> INFO_MID_OFFSET); + att_info[2] = (unsigned char)(eventindex >> INFO_LOW_OFFSET); + att_info[3] = (unsigned char)eventindex; + att_info[MAX_UPLOAD_INFO_LEN] = '\0'; + item.id = item_info[ITRUSTEE].id; /* 0x00000185 */ + item.status = STP_RISK; + item.credible = STP_REFERENCE; + item.version = 0; + ret = strcpy_s(item.name, STP_ITEM_NAME_LEN, STP_NAME_ITRUSTEE); + if (ret) { + tloge("strncpy failed %x\n", ret); + return; + } + tlogd("stp get size %lx succ\n", sizeof(item_info[ITRUSTEE].name)); + ret = kernel_stp_upload(item, att_info); + if (ret) + tloge("stp %x event upload failed\n", eventindex); + else + tloge("stp %x event upload succ\n", eventindex); +#else + (void)eventindex; +#endif +} +#endif + +static void cmd_result_check(const struct tc_ns_smc_cmd *cmd, int cmd_index) +{ + if (cmd->ret_val == (int)TEEC_PENDING || cmd->ret_val == (int)TEEC_PENDING2) + tlogd("wakeup command %u\n", cmd->event_nr); + + if (cmd->ret_val == (int)TEE_ERROR_TAGET_DEAD) { + bool ta_killed = g_cmd_data->in[cmd_index].cmd_id == GLOBAL_CMD_ID_KILL_TASK; + tloge("error smc call: ret = %x and cmd.err_origin=%x, [ta is %s]\n", + cmd->ret_val, cmd->err_origin, (ta_killed == true) ? "killed" : "crash"); + cmd_monitor_ta_crash((ta_killed == true) ? TYPE_KILLED_TA : TYPE_CRASH_TA, + cmd->uuid, sizeof(struct tc_uuid)); + ta_crash_report_log(); + } else if (cmd->ret_val == (int)TEEC_ERROR_TUI_NOT_AVAILABLE) { + do_ns_tui_release(); + } else if (cmd->ret_val == (int)TEE_ERROR_AUDIT_FAIL) { + tloge("error smc call: ret = %x and err-origin=%x\n", + cmd->ret_val, cmd->err_origin); +#ifdef CONFIG_TEE_AUDIT + tloge("error smc call: status = %x and err-origin=%x\n", + cmd->eventindex, cmd->err_origin); + upload_audit_event(cmd->eventindex); +#endif + } +} + +static void set_shadow_smc_param(struct smc_in_params *in_params, + const struct smc_out_params *out_params, int *n_idled) +{ + if (out_params->exit_reason == SMC_EXIT_PREEMPTED) { + in_params->x0 = TSP_REQUEST; + in_params->x1 = SMC_OPS_SCHEDTO; + in_params->x2 = (unsigned long)current->pid; + in_params->x3 = out_params->ta; + in_params->x4 = out_params->target; + } else if (out_params->exit_reason == SMC_EXIT_NORMAL) { + in_params->x0 = TSP_REQUEST; + in_params->x1 = SMC_OPS_SCHEDTO; + in_params->x2 = (unsigned long)current->pid; + in_params->x3 = 0; + in_params->x4 = 0; + if (*n_idled > IDLED_COUNT) { + *n_idled = 0; + in_params->x1 = SMC_OPS_PROBE_ALIVE; + } + } +} + +static void shadow_wo_pm(const void *arg, struct smc_out_params *out_params, + int *n_idled) +{ + struct smc_in_params in_params = { + TSP_REQUEST, SMC_OPS_START_SHADOW, current->pid, 0, *(unsigned long *)arg + }; + + set_shadow_smc_param(&in_params, out_params, n_idled); + tlogd("%s: [cpu %d] x0=%lx x1=%lx x2=%lx x3=%lx x4=%lx\n", + __func__, raw_smp_processor_id(), in_params.x0, in_params.x1, + in_params.x2, in_params.x3, in_params.x4); + +#ifdef CONFIG_THIRDPARTY_COMPATIBLE + if (g_sys_crash) { + out_params->ret = TSP_CRASH; + return; + } +#endif + smc_req(&in_params, out_params, 0); +} + +static void set_preempted_counter(int *n_preempted, int *n_idled, + struct pending_entry *pe) +{ + *n_idled = 0; + (*n_preempted)++; + + if (*n_preempted > PREEMPT_COUNT) { + tlogd("counter too large: retry 10K times on CPU%d\n", smp_processor_id()); + *n_preempted = 0; + } +#ifndef CONFIG_PREEMPT + /* yield cpu to avoid soft lockup */ + cond_resched(); +#endif + if (match_ta_affinity(pe)) + tloge("set shadow pid %d affinity after preempted\n", + pe->task->pid); +} + +static int proc_shadow_thread_normal_exit(struct pending_entry *pe, + int *n_preempted, int *n_idled, int *ret_val) +{ + long long timeout; + int rc; + + if (power_down_cc() != 0) { + tloge("power down cc failed\n"); + *ret_val = -1; + return CLEAN_WITHOUT_PM; + } + *n_preempted = 0; + + timeout = HZ * (long)(HZ_COUNT + ((uint8_t)current->pid & LOW_BYTE)); + rc = (int)wait_event_freezable_timeout(pe->wq, + atomic_read(&pe->run), (long)timeout); + if (rc == 0) + (*n_idled)++; + if (atomic_read(&pe->run) == SHADOW_EXIT_RUN) { + tlogd("shadow thread work quit, be killed\n"); + return CLEAN_WITHOUT_PM; + } else { + atomic_set(&pe->run, 0); + return RETRY_WITH_PM; + } + + return 0; +} + +static bool check_shadow_crash(uint64_t crash_reason, int *ret_val) +{ + if (crash_reason != TSP_CRASH) + return false; + + tloge("TEEOS shadow has crashed!\n"); + if (power_down_cc() != 0) + tloge("power down cc failed\n"); + + g_sys_crash = true; + cmd_monitor_ta_crash(TYPE_CRASH_TEE, NULL, 0); + report_log_system_error(); + *ret_val = -1; + return true; +} + +static void show_other_exit_reason(const struct smc_out_params *params) +{ + if (params->exit_reason == SMC_EXIT_SHADOW) { + tlogd("probe shadow thread non exit, just quit\n"); + return; + } + + tloge("exit on unknown code %ld\n", (long)params->exit_reason); +} + +static int shadow_thread_fn(void *arg) +{ + int n_preempted = 0; + int ret = 0; + struct smc_out_params params = { 0, SMC_EXIT_MAX, 0, 0 }; + int n_idled = 0; + struct pending_entry *pe = NULL; + + set_freezable(); + pe = init_pending_entry(); + if (!pe) { + kfree(arg); + tloge("init pending entry failed\n"); + return -ENOMEM; + } + isb(); + wmb(); + +retry: + if (power_on_cc() != 0) { + ret = -EINVAL; + tloge("power on cc failed\n"); + goto clean_wo_pm; + } + +retry_wo_pm: + shadow_wo_pm(arg, ¶ms, &n_idled); + if (check_shadow_crash(params.ret, &ret)) + goto clean_wo_pm; + + if (params.exit_reason == SMC_EXIT_PREEMPTED) { + set_preempted_counter(&n_preempted, &n_idled, pe); + goto retry_wo_pm; + } else if (params.exit_reason == SMC_EXIT_NORMAL) { + ret = proc_shadow_thread_normal_exit(pe, &n_preempted, &n_idled, &ret); + if (ret == CLEAN_WITHOUT_PM) { + goto clean_wo_pm; + } else if (ret == RETRY_WITH_PM) { + if (match_ta_affinity(pe)) + tlogd("set shadow pid %d\n", pe->task->pid); + goto retry; + } + } else { + show_other_exit_reason(¶ms); + } + + if (power_down_cc() != 0) { + tloge("power down cc failed\n"); + ret = -1; + } +clean_wo_pm: + kfree(arg); + release_pending_entry(pe); + return ret; +} + +static void shadow_work_func(struct kthread_work *work) +{ + struct task_struct *shadow_thread = NULL; + struct shadow_work *s_work = + container_of(work, struct shadow_work, kthwork); + uint64_t *target_arg = kzalloc(sizeof(uint64_t), GFP_KERNEL); + + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)target_arg)) { + tloge("%s: kmalloc failed\n", __func__); + return; + } + + *target_arg = s_work->target; + shadow_thread = kthread_create(shadow_thread_fn, + (void *)(uintptr_t)target_arg, "shadow th/%lu", + g_shadow_thread_id++); + if (IS_ERR_OR_NULL(shadow_thread)) { + kfree(target_arg); + tloge("couldn't create shadow_thread %ld\n", + PTR_ERR(shadow_thread)); + return; + } + tlogd("%s: create shadow thread %lu for target %llx\n", + __func__, g_shadow_thread_id, *target_arg); + tz_kthread_bind_mask(shadow_thread); +#if CONFIG_CPU_AFF_NR + struct cpumask shadow_mask; + unsigned int i; + + cpumask_clear(&shadow_mask); + for (i = 0; i < CONFIG_CPU_AFF_NR; i++) + cpumask_set_cpu(i, &shadow_mask); + + koadpt_kthread_bind_mask(shadow_thread, &shadow_mask); +#endif + wake_up_process(shadow_thread); +} + +static int proc_smc_wakeup_ca(pid_t ca, int which) +{ + if (ca <= 0) { + tlogw("wakeup for ca <= 0\n"); + } else { + struct pending_entry *pe = find_pending_entry(ca); + + if (!pe) { + (void)raw_smc_send(TSP_REE_SIQ, (phys_addr_t)ca, 0, false); + tlogd("invalid ca pid=%d for pending entry\n", + (int)ca); + return -1; + } + atomic_set(&pe->run, which); + wake_up(&pe->wq); + tlogd("wakeup pending thread %ld\n", (long)ca); + put_pending_entry(pe); + } + return 0; +} + +void wakeup_pe(struct pending_entry *pe) +{ + if (!pe) + return; + + atomic_set(&pe->run, 1); + wake_up(&pe->wq); +} + +int smc_wakeup_broadcast(void) +{ + foreach_pending_entry(wakeup_pe); + return 0; +} + +int smc_wakeup_ca(pid_t ca) +{ + tee_trace_add_event(SPI_WAKEUP, (uint64_t)ca); + return proc_smc_wakeup_ca(ca, 1); +} + +int smc_shadow_exit(pid_t ca) +{ + return proc_smc_wakeup_ca(ca, SHADOW_EXIT_RUN); +} + +void fiq_shadow_work_func(uint64_t target) +{ + struct smc_cmd_ret secret = { SMC_EXIT_MAX, 0, target }; + tee_trace_add_event(INTERRUPT_HANDLE_SPI_REE_SCHEDULED, target); + secs_suspend_status(target); + if (power_on_cc() != 0) { + tloge("power on cc failed\n"); + return; + } + + livepatch_down_read_sem(); + smp_smc_send(TSP_REQUEST, (unsigned long)SMC_OPS_START_FIQSHD, + (unsigned long)(uint32_t)(current->pid), &secret, false); + livepatch_up_read_sem(); + + if (power_down_cc() != 0) + tloge("power down cc failed\n"); + + return; +} + +int smc_queue_shadow_worker(uint64_t target) +{ + struct shadow_work work = { + KTHREAD_WORK_INIT(work.kthwork, shadow_work_func), + .target = target, + }; + +#if (KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE) + if (!queue_kthread_work(&g_ipi_helper_worker, &work.kthwork)) { +#else + if (!kthread_queue_work(&g_ipi_helper_worker, &work.kthwork)) { +#endif + tloge("ipi helper work fail queue, was already pending\n"); + return -1; + } + +#if (KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE) + flush_kthread_work(&work.kthwork); +#else + kthread_flush_work(&work.kthwork); +#endif + return 0; +} + +#ifdef CONFIG_DRM_ADAPT +#define DRM_USR_PRIOR (-5) +static void set_drm_strategy(void) +{ + if (!g_drm_mask_flag) { + cpumask_clear(&g_drm_cpu_mask); + cpumask_set_cpu(CPU_FOUR, &g_drm_cpu_mask); + cpumask_set_cpu(CPU_FIVE, &g_drm_cpu_mask); + cpumask_set_cpu(CPU_SIX, &g_drm_cpu_mask); + cpumask_set_cpu(CPU_SEVEN, &g_drm_cpu_mask); + g_drm_mask_flag = 1; + } + + if (current->group_leader && + strstr(current->group_leader->comm, "drm@1.")) { + set_cpus_allowed_ptr(current, &g_drm_cpu_mask); + set_user_nice(current, DRM_USR_PRIOR); + } +} +#endif + +static int smc_ops_normal(struct cmd_reuse_info *info, + const struct tc_ns_smc_cmd *cmd, u64 ops) +{ + if (ops != SMC_OPS_NORMAL) + return 0; + + if (info->cmd_usage == RESEND) { + if (reuse_smc_in_entry((uint32_t)info->cmd_index) != 0) { + tloge("reuse smc entry failed\n"); + release_smc_entry((uint32_t)info->cmd_index); + return -ENOMEM; + } + } else { + info->cmd_index = occupy_free_smc_in_entry(cmd); + if (info->cmd_index == -1) { + tloge("there's no more smc entry\n"); + return -ENOMEM; + } + } + + if (info->cmd_usage != CLEAR) { + info->cmd_index = info->saved_index; + info->cmd_usage = CLEAR; + } else { + info->saved_index = info->cmd_index; + } + + tlogd("submit new cmd: cmd.ca=%u cmd-id=%x ev-nr=%u " + "cmd-index=%u saved-index=%d\n", + cmd->ca_pid, cmd->cmd_id, + g_cmd_data->in[info->cmd_index].event_nr, info->cmd_index, + info->saved_index); + return 0; +} + +static int smp_smc_send_cmd_done(int cmd_index, struct tc_ns_smc_cmd *cmd, + struct tc_ns_smc_cmd *in) +{ + cmd_result_check(cmd, cmd_index); + switch (cmd->ret_val) { + case TEEC_PENDING2: { + unsigned int agent_id = cmd->agent_id; + /* If the agent does not exist post + * the answer right back to the TEE + */ + if (agent_process_work(cmd, agent_id) != 0) + tloge("agent process work failed\n"); + return PENDING2_RETRY; + } + case TEE_ERROR_TAGET_DEAD: + case TEEC_PENDING: + /* just copy out, and let out to proceed */ + default: + if (memcpy_s(in, sizeof(*in), cmd, sizeof(*cmd)) != EOK) { + tloge("memcpy failed,%s line:%d", __func__, __LINE__); + cmd->ret_val = -1; + } + + break; + } + + return 0; +} + +#define KERNEL_INDEX 5 +static void print_crash_msg(union crash_inf *crash_info) +{ + static const char *tee_critical_app[] = { + "gtask", + "teesmcmgr", + "hmsysmgr", + "hmfilemgr", + "platdrv", + "kernel", /* index must be same with KERNEL_INDEX */ + "vltmm_service", + "tee_drv_server" + }; + int app_num = sizeof(tee_critical_app) / sizeof(tee_critical_app[0]); + const char *crash_app_name = "NULL"; + uint16_t off = crash_info->crash_msg.off; + int app_index = crash_info->crash_msg.app & LOW_BYTE; + int halt_reason = crash_info->crash_msg.halt_reason; + + crash_info->crash_msg.off = 0; + + if (app_index >= 0 && app_index < app_num) + crash_app_name = tee_critical_app[app_index]; + else + tloge("index error: %x\n", crash_info->crash_msg.app); + + if (app_index == KERNEL_INDEX) { + tloge("====crash app:%s user sym:%s kernel crash off/size: " + "<0x%x/0x%x>\n", crash_app_name, + crash_info->crash_msg.sym_name, + off, crash_info->crash_msg.size); + tloge("====crash halt reason: 0x%x far:0x%x fault:0x%x " + "elr:0x%x (ret_ip: 0x%llx)\n", + halt_reason, crash_info->crash_msg.far, + crash_info->crash_msg.fault, crash_info->crash_msg.elr, + crash_info->crash_reg[2]); + } else { + char syms[SYM_NAME_LEN_MAX] = {0}; + + if (memcpy_s(syms, SYM_NAME_LEN_MAX, + crash_info->crash_msg.sym_name, SYM_NAME_LEN_1) != EOK) + tloge("memcpy sym name failed!\n"); + + if (memcpy_s(syms + SYM_NAME_LEN_1, + SYM_NAME_LEN_MAX - SYM_NAME_LEN_1, + crash_info->crash_msg.sym_name_append, SYM_NAME_LEN_2) != EOK) + tloge("memcpy sym_name_append failed!\n"); + tloge("====crash app:%s user_sym:%s + <0x%x/0x%x>\n", + crash_app_name, syms, off, crash_info->crash_msg.size); + tloge("====crash far:0x%x fault:%x\n", + crash_info->crash_msg.far, crash_info->crash_msg.fault); + } +} + +void clr_system_crash_flag(void) +{ + g_sys_crash = false; +} + +static int smp_smc_send_process(struct tc_ns_smc_cmd *cmd, u64 ops, + struct smc_cmd_ret *cmd_ret, int cmd_index) +{ + int ret; + tlogd("smc send start cmd_id = %u, ca = %u\n", + cmd->cmd_id, cmd->ca_pid); + + if (power_on_cc() != 0) { + tloge("power on cc failed\n"); + cmd->ret_val = -1; + return -1; + } + + ret = smp_smc_send(TSP_REQUEST, (unsigned long)ops, + (unsigned long)(uint32_t)(current->pid), cmd_ret, ops != SMC_OPS_ABORT_TASK); + + if (power_down_cc() != 0) { + tloge("power down cc failed\n"); + cmd->ret_val = -1; + return -1; + } + + tlogd("smc send ret = %x, cmd ret.exit=%ld, cmd index=%d\n", + ret, (long)cmd_ret->exit, cmd_index); + isb(); + wmb(); + if (ret == (int)TSP_CRASH) { + union crash_inf crash_info; + crash_info.crash_reg[0] = cmd_ret->exit; + crash_info.crash_reg[1] = cmd_ret->ta; + crash_info.crash_reg[2] = cmd_ret->target; + + tloge("TEEOS has crashed!\n"); + print_crash_msg(&crash_info); + + g_sys_crash = true; + cmd_monitor_ta_crash(TYPE_CRASH_TEE, NULL, 0); + + tee_wake_up_reboot(); +#ifndef CONFIG_TEE_REBOOT + report_log_system_error(); +#endif + cmd->ret_val = TEE_ERROR_IS_DEAD; + return -1; + } + + return 0; +} + +static int init_for_smc_send(struct tc_ns_smc_cmd *in, + struct pending_entry **pe, struct tc_ns_smc_cmd *cmd, + bool reuse) +{ +#ifdef CONFIG_DRM_ADAPT + set_drm_strategy(); +#endif + *pe = init_pending_entry(); + if (!(*pe)) { + tloge("init pending entry failed\n"); + return -ENOMEM; + } + + in->ca_pid = (unsigned int)current->pid; + if (reuse) + return 0; + + if (memcpy_s(cmd, sizeof(*cmd), in, sizeof(*in)) != EOK) { + tloge("memcpy in cmd failed\n"); + release_pending_entry(*pe); + return -EFAULT; + } + + return 0; +} + +static bool is_ca_killed(int cmd_index) +{ + (void)cmd_index; + /* if CA has not been killed */ + if (sigkill_pending(current)) { + /* signal pending, send abort cmd */ + tloge("wait event timeout and find pending signal\n"); + return true; + } + return false; +} + +static void clean_smc_resrc(struct cmd_reuse_info info, + const struct tc_ns_smc_cmd *cmd, + struct pending_entry *pe) +{ + if (info.cmd_usage != CLEAR && cmd->ret_val != (int)TEEC_PENDING) + release_smc_entry((uint32_t)info.cmd_index); + + release_pending_entry(pe); +} + +static int set_abort_cmd(int index) +{ + acquire_smc_buf_lock(&g_cmd_data->smc_lock); + if (test_bit(index, (unsigned long *)g_cmd_data->doing_bitmap) == 0) { + release_smc_buf_lock(&g_cmd_data->smc_lock); + tloge("can't abort an unprocess cmd\n"); + return -1; + } + + g_cmd_data->in[index].cmd_id = GLOBAL_CMD_ID_KILL_TASK; + g_cmd_data->in[index].cmd_type = CMD_TYPE_GLOBAL; + /* these phy addrs are not necessary, clear them to avoid gtask check err */ + g_cmd_data->in[index].operation_phys = 0; + g_cmd_data->in[index].operation_h_phys = 0; + g_cmd_data->in[index].login_data_phy = 0; + g_cmd_data->in[index].login_data_h_addr = 0; + + clear_bit((unsigned int)index, (unsigned long *)g_cmd_data->doing_bitmap); + release_smc_buf_lock(&g_cmd_data->smc_lock); + tloge("set abort cmd success\n"); + + return 0; +} + +static enum smc_ops_exit process_abort_cmd(int index, const struct pending_entry *pe) +{ + (void)pe; + if (set_abort_cmd(index) == 0) + return SMC_OPS_ABORT_TASK; + + return SMC_OPS_SCHEDTO; +} + +#define TO_STEP_SIZE 5 +#define INVALID_STEP_SIZE 0xFFFFFFFFU + +struct timeout_step_t { + unsigned long steps[TO_STEP_SIZE]; + uint32_t size; + uint32_t cur; + bool timeout_reset; +}; + +static void init_timeout_step(uint32_t timeout, struct timeout_step_t *step) +{ + uint32_t i = 0; + + if (timeout == 0) { + step->steps[0] = RESLEEP_TIMEOUT * HZ; + step->size = 1; + } else { + uint32_t timeout_in_jiffies; + + if (timeout > RESLEEP_TIMEOUT * MSEC_PER_SEC) + timeout = RESLEEP_TIMEOUT * MSEC_PER_SEC; + timeout_in_jiffies = (uint32_t)msecs_to_jiffies(timeout); + + /* + * [timeout_in_jiffies-1, timeout_in_jiffies+2] jiffies + * As REE and TEE tick have deviation, to make sure last REE timeout + * is after TEE timeout, we set a timeout step from + * 'timeout_in_jiffies -1' to 'timeout_in_jiffies + 2' + */ + if (timeout_in_jiffies > 1) { + step->steps[i++] = timeout_in_jiffies - 1; + step->steps[i++] = 1; + } else { + step->steps[i++] = timeout_in_jiffies; + } + step->steps[i++] = 1; + step->steps[i++] = 1; + + if (RESLEEP_TIMEOUT * HZ > (timeout_in_jiffies + 2)) + step->steps[i++] = RESLEEP_TIMEOUT * HZ - 2 - timeout_in_jiffies; + step->size = i; + } + step->cur = 0; +} + +enum pending_t { + PD_WAKEUP, + PD_TIMEOUT, + PD_DONE, + PD_RETRY, +}; + +enum smc_status_t { + ST_DONE, + ST_RETRY, +}; + +static long wait_event_internal(struct pending_entry *pe, struct timeout_step_t *step) +{ + if (!current->mm) { + /* + * smc svc thread need freezable, to solve the problem: + * When the system is in hibernation, the TEE image needs + * to be backed up in some scenarios, all smc cmds are not allowed to enter tee + */ + return wait_event_freezable_timeout(pe->wq, atomic_read(&pe->run), + step->steps[step->cur]); + } else { + return wait_event_timeout(pe->wq, atomic_read(&pe->run), + step->steps[step->cur]); + } +} +static enum pending_t proc_ta_pending(struct pending_entry *pe, + struct timeout_step_t *step, uint64_t pending_args, uint32_t cmd_index, + u64 *ops) +{ + bool kernel_call = false; + bool woke_up = false; + /* + * if ->mm is NULL, it's a kernel thread and a kthread will never + * receive a signal. + */ + uint32_t timeout = (uint32_t)pending_args; + bool timer_no_irq = (pending_args >> 32) == 0 ? false : true; + uint32_t cur_timeout; + if (step->cur == INVALID_STEP_SIZE) + init_timeout_step(timeout, step); + if (!current->mm) + kernel_call = true; +resleep: + cur_timeout = jiffies_to_msecs(step->steps[step->cur]); + tee_trace_add_event(SMC_SLEEP, 0); + if (wait_event_internal(pe, step) == 0) { + if (step->cur < (step->size - 1)) { + step->cur++; + /* + * As there may no timer irq in TEE, we need a chance to + * run timer's irq handler initiatively by SMC_OPS_SCHEDTO. + */ + if (timer_no_irq) { + *ops = SMC_OPS_SCHEDTO; + return PD_TIMEOUT; + } else { + goto resleep; + } + } + if (is_ca_killed(cmd_index)) { + *ops = (u64)process_abort_cmd(cmd_index, pe); + return PD_WAKEUP; + } + } else { + woke_up = true; + tlogd("%s woke up\n", __func__); + } + atomic_set(&pe->run, 0); + if (!is_cmd_working_done(cmd_index)) { + *ops = SMC_OPS_SCHEDTO; + return PD_WAKEUP; + } else if (!kernel_call && !woke_up) { + tloge("cmd done, may miss a spi!\n"); + show_cmd_bitmap(); + } + tlogd("cmd is done\n"); + return PD_DONE; +} + +static void set_timeout_step(struct timeout_step_t *timeout_step) +{ + if (!timeout_step->timeout_reset) + return; + + timeout_step->cur = INVALID_STEP_SIZE; + timeout_step->timeout_reset = false; +} + +static enum smc_status_t proc_normal_exit(struct pending_entry *pe, u64 *ops, + struct timeout_step_t *timeout_step, struct smc_cmd_ret *cmd_ret, + int cmd_index) +{ + enum pending_t pd_ret; + + /* notify and set affinity came first, goto retry directly */ + if (match_ta_affinity(pe)) { + *ops = SMC_OPS_SCHEDTO; + return ST_RETRY; + } + + pd_ret = proc_ta_pending(pe, timeout_step, + cmd_ret->ta, (uint32_t)cmd_index, ops); + if (pd_ret == PD_DONE) + return ST_DONE; + + if (pd_ret == PD_WAKEUP) + timeout_step->timeout_reset = true; + return ST_RETRY; +} + +static enum smc_status_t handle_cmd_working_done( + struct tc_ns_smc_cmd *cmd, u64 *ops, struct tc_ns_smc_cmd *in, + struct cmd_reuse_info *info) +{ + if (copy_smc_out_entry((uint32_t)info->cmd_index, cmd, &info->cmd_usage) != 0) { + cmd->ret_val = TEEC_ERROR_GENERIC; + return ST_DONE; + } + + if (smp_smc_send_cmd_done(info->cmd_index, cmd, in) != 0) { + *ops = SMC_OPS_NORMAL; /* cmd will be reused */ + return ST_RETRY; + } + + return ST_DONE; +} + +static int smp_smc_send_func(struct tc_ns_smc_cmd *in, bool reuse) +{ + struct cmd_reuse_info info = { 0, 0, CLEAR }; + struct smc_cmd_ret cmd_ret = {0}; + struct tc_ns_smc_cmd cmd = { {0}, 0 }; + struct pending_entry *pe = NULL; + u64 ops; + struct timeout_step_t timeout_step = + {{0, 0, 0, 0}, TO_STEP_SIZE, -1, false}; + + if (init_for_smc_send(in, &pe, &cmd, reuse) != 0) + return TEEC_ERROR_GENERIC; + + if (reuse) { + info.saved_index = (int)in->event_nr; + info.cmd_index = (int)in->event_nr; + info.cmd_usage = RESEND; + } + ops = SMC_OPS_NORMAL; + +#ifdef CONFIG_SCHED_SMT_EXPELLING + force_smt_expeller_prepare(); +#endif + +retry: +#ifdef CONFIG_TEE_REBOOT + if (is_tee_rebooting() && in->cmd_id == GLOBAL_CMD_ID_SET_SERVE_CMD) { + return TEE_ERROR_IS_DEAD; + } +#endif + + set_timeout_step(&timeout_step); + + if (smc_ops_normal(&info, &cmd, ops) != 0) { + release_pending_entry(pe); + return TEEC_ERROR_GENERIC; + } + + if (smp_smc_send_process(&cmd, ops, &cmd_ret, info.cmd_index) == -1) + goto clean; + + if (!is_cmd_working_done((uint32_t)info.cmd_index)) { + if (cmd_ret.exit == SMC_EXIT_NORMAL) { + if (proc_normal_exit(pe, &ops, &timeout_step, &cmd_ret, + info.cmd_index) == ST_RETRY) + goto retry; + } else if (cmd_ret.exit == SMC_EXIT_ABORT) { + ops = (u64)process_abort_cmd(info.cmd_index, pe); + goto retry; + } else { + tloge("invalid cmd work state\n"); + cmd.ret_val = TEEC_ERROR_GENERIC; + goto clean; + } + } + + if (handle_cmd_working_done(&cmd, &ops, in, &info) == ST_RETRY) + goto retry; +clean: + clean_smc_resrc(info, &cmd, pe); + return cmd.ret_val; +} + +static int smc_svc_thread_fn(void *arg) +{ + (void)arg; + set_freezable(); + while (!kthread_should_stop()) { + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + int ret; + + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.cmd_id = GLOBAL_CMD_ID_SET_SERVE_CMD; + ret = smp_smc_send_func(&smc_cmd, false); + tlogd("smc svc return 0x%x\n", ret); + } + tloge("smc svc thread stop\n"); + return 0; +} + +void wakeup_tc_siq(uint32_t siq_mode) +{ + uint32_t i; + + if (siq_mode == 0) + return; + + mutex_lock(&g_siq_lock); + i = get_free_siq_index(); + if (i >= MAX_SIQ_NUM) { + tloge("dump is too frequent\n"); + mutex_unlock(&g_siq_lock); + return; + } + g_siq_queue[i] = siq_mode; + atomic_set(&g_siq_th_run, RUN_SIQ_THREAD); + mutex_unlock(&g_siq_lock); + wake_up_interruptible(&siq_th_wait); +} + +/* + * This function first power on crypto cell, then send smc cmd to trustedcore. + * After finished, power off crypto cell. + */ +static int proc_tc_ns_smc(struct tc_ns_smc_cmd *cmd, bool reuse) +{ + int ret; + struct cmd_monitor *item = NULL; + + if (g_sys_crash) { + tloge("ERROR: sys crash happened!!!\n"); + return TEE_ERROR_IS_DEAD; + } + + if (!cmd) { + tloge("invalid cmd\n"); + return TEEC_ERROR_GENERIC; + } + tlogd(KERN_INFO "***smc call start on cpu %d ***\n", + raw_smp_processor_id()); + + item = cmd_monitor_log(cmd); + ret = smp_smc_send_func(cmd, reuse); + cmd_monitor_logend(item); + + return ret; +} + +int tc_ns_smc(struct tc_ns_smc_cmd *cmd) +{ + return proc_tc_ns_smc(cmd, false); +} + +int tc_ns_smc_with_no_nr(struct tc_ns_smc_cmd *cmd) +{ + return proc_tc_ns_smc(cmd, true); +} + +static void smc_work_no_wait(uint32_t type) +{ + (void) raw_smc_send(TSP_REQUEST, g_cmd_phys, type, true); +} + +void send_smc_reset_cmd_buffer(void) +{ + send_smc_cmd_rebooting(TSP_REQUEST, g_cmd_phys, TC_NS_CMD_TYPE_SECURE_CONFIG, NULL); +} + +static void smc_work_set_cmd_buffer(struct work_struct *work) +{ + (void)work; + smc_work_no_wait(TC_NS_CMD_TYPE_SECURE_CONFIG); +} + +void smc_set_cmd_buffer(void) +{ + struct work_struct work; + /* + * If the TEE supports independent reset, the "TEE reset" clears the cmd_buffer information in gtask. + * Therefore, the tzdriver needs to be re-registered cmd_buffer. + * Even if ite has been registerd in the UEFI phase. + */ +#ifndef CONFIG_TEE_RESET + if (g_reserved_cmd_buffer) + return; +#endif + + INIT_WORK_ONSTACK(&work, smc_work_set_cmd_buffer); + /* Run work on CPU 0 */ + schedule_work_on(0, &work); + flush_work(&work); + tlogd("smc set cmd buffer done\n"); +} + +static int alloc_cmd_buffer(void) +{ + if (g_reserved_cmd_buffer) { + tlogi("use reserved cmd buffer"); + g_cmd_data = (struct tc_ns_smc_queue *)get_reserved_cmd_vaddr_of(g_cmd_phys, (uint64_t)g_cmd_size); + if (!g_cmd_data) + return -ENOMEM; + + return 0; + } + g_cmd_data = (struct tc_ns_smc_queue *)(uintptr_t)get_cmd_mem_vaddr(); + if (!g_cmd_data) + return -ENOMEM; + + g_cmd_phys = get_cmd_mem_paddr((uint64_t)(uintptr_t)g_cmd_data); + return 0; +} + +static int init_smc_related_rsrc(const struct device *class_dev) +{ + struct cpumask new_mask; + int ret; + + /* + * TEE Dump will disable IRQ/FIQ for about 500 ms, it's not + * a good choice to ask CPU0/CPU1 to do the dump. + * So, bind this kernel thread to other CPUs rather than CPU0/CPU1. + */ + cpumask_setall(&new_mask); + cpumask_clear_cpu(CPU_ZERO, &new_mask); + cpumask_clear_cpu(CPU_ONE, &new_mask); + koadpt_kthread_bind_mask(g_siq_thread, &new_mask); + /* some products specify the cpu that kthread need to bind */ + tz_kthread_bind_mask(g_siq_thread); + g_ipi_helper_thread = kthread_create(kthread_worker_fn, + &g_ipi_helper_worker, "ipihelper"); + if (IS_ERR_OR_NULL(g_ipi_helper_thread)) { + dev_err(class_dev, "couldn't create ipi helper threads %ld\n", + PTR_ERR(g_ipi_helper_thread)); + ret = (int)PTR_ERR(g_ipi_helper_thread); + return ret; + } + + tz_kthread_bind_mask(g_ipi_helper_thread); + wake_up_process(g_ipi_helper_thread); + wake_up_process(g_siq_thread); + init_cmd_monitor(); + INIT_LIST_HEAD(&g_pending_head); + spin_lock_init(&g_pend_lock); + + return 0; +} + +static int parse_params_from_tee(void) +{ + int ret; + void *buffer = NULL; + + /* enable uefi and reserved buffer, not check teeos compat level */ + if (g_reserved_cmd_buffer) { + tlogw("uefi mode, not check teeos compat level\n"); + return 0; + } + + buffer = (void *)(g_cmd_data->in); + ret = check_teeos_compat_level((uint32_t *)buffer, + COMPAT_LEVEL_BUF_LEN); + if (ret != 0) { + tloge("check teeos compatibility failed\n"); + return ret; + } + if (memset_s(buffer, sizeof(g_cmd_data->in), + 0, sizeof(g_cmd_data->in)) != EOK) { + tloge("Clean the command buffer failed\n"); + ret = -EFAULT; + return ret; + } + return 0; +} + +int smc_context_init(const struct device *class_dev) +{ + int ret; + + if (!class_dev || IS_ERR_OR_NULL(class_dev)) + return -ENOMEM; + + ret = alloc_cmd_buffer(); + if (ret != 0) + return ret; + + /* Send the allocated buffer to TrustedCore for init */ + smc_set_cmd_buffer(); + + ret = parse_params_from_tee(); + if (ret != 0) { + tloge("parse params from tee failed\n"); + goto free_mem; + } + + g_siq_thread = kthread_create(siq_thread_fn, NULL, "siqthread/%d", 0); + if (unlikely(IS_ERR_OR_NULL(g_siq_thread))) { + dev_err(class_dev, "couldn't create siqthread %ld\n", + PTR_ERR(g_siq_thread)); + ret = (int)PTR_ERR(g_siq_thread); + goto free_mem; + } + + ret = init_smc_related_rsrc(class_dev); + if (ret != 0) + goto free_siq_worker; + + return 0; + +free_siq_worker: + kthread_stop(g_siq_thread); + g_siq_thread = NULL; +free_mem: + free_cmd_mem((uint64_t)(uintptr_t)g_cmd_data); + g_cmd_data = NULL; + return ret; +} + +int init_smc_svc_thread(void) +{ + g_smc_svc_thread = kthread_create(smc_svc_thread_fn, NULL, + "smc_svc_thread"); + if (unlikely(IS_ERR_OR_NULL(g_smc_svc_thread))) { + tloge("couldn't create smc_svc_thread %ld\n", + PTR_ERR(g_smc_svc_thread)); + return (int)PTR_ERR(g_smc_svc_thread); + } +#ifdef CONFIG_SCHED_SMT_EXPELLING + set_task_expeller(g_smc_svc_thread, SMT_EXPELLER_FORCE_LONG); +#endif + tz_kthread_bind_mask(g_smc_svc_thread); + wake_up_process(g_smc_svc_thread); + return 0; +} + +int teeos_log_exception_archive(unsigned int eventid, + const char *exceptioninfo) +{ +#ifdef CONFIG_TEE_LOG_EXCEPTION + int ret; + struct imonitor_eventobj *teeos_obj = NULL; + + teeos_obj = imonitor_create_eventobj(eventid); + if (exceptioninfo) { + tlogi("upload exception info: [%s]\n", exceptioninfo); + ret = imonitor_set_param(teeos_obj, 0, (long)(uintptr_t)exceptioninfo); + } else { + ret = imonitor_set_param(teeos_obj, 0, (long)(uintptr_t)"teeos something crash"); + } + if (ret) { + tloge("imonitor_set_param failed\n"); + imonitor_destroy_eventobj(teeos_obj); + return ret; + } + ret = imonitor_add_dynamic_path(teeos_obj, "/data/vendor/log/hisi_logs/tee"); + if (ret) { + tloge("add path failed\n"); + imonitor_destroy_eventobj(teeos_obj); + return ret; + } + ret = imonitor_add_dynamic_path(teeos_obj, "/data/log/tee"); + if (ret) { + tloge("add path failed\n"); + imonitor_destroy_eventobj(teeos_obj); + return ret; + } + ret = imonitor_send_event(teeos_obj); + imonitor_destroy_eventobj(teeos_obj); + return ret; +#else + (void)eventid; + (void)exceptioninfo; + return 0; +#endif +} + +void svc_thread_release(void) +{ + if (!IS_ERR_OR_NULL(g_smc_svc_thread)) { + kthread_stop(g_smc_svc_thread); + g_smc_svc_thread = NULL; + } +} + +void free_smc_data(void) +{ + struct pending_entry *pe = NULL, *temp = NULL; + if (g_reserved_cmd_buffer) + iounmap((void __iomem *)g_cmd_data); + else + free_cmd_mem((uint64_t)(uintptr_t)g_cmd_data); + smc_wakeup_broadcast(); + svc_thread_release(); + if (!IS_ERR_OR_NULL(g_siq_thread)) { + atomic_set(&g_siq_th_run, STOP_SIQ_THREAD); + wake_up_interruptible(&siq_th_wait); + kthread_stop(g_siq_thread); + g_siq_thread = NULL; + } + +#if (KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE) + flush_kthread_worker(&g_ipi_helper_worker); +#else + kthread_flush_worker(&g_ipi_helper_worker); +#endif + if (!IS_ERR_OR_NULL(g_ipi_helper_thread)) { + kthread_stop(g_ipi_helper_thread); + g_ipi_helper_thread = NULL; + } + free_cmd_monitor(); + + spin_lock(&g_pend_lock); + list_for_each_entry_safe(pe, temp, &g_pending_head, list) { + list_del(&pe->list); + put_task_struct(pe->task); + kfree(pe); + } + spin_unlock(&g_pend_lock); +} diff --git a/tzdriver/core/smc_smp.h b/tzdriver/core/smc_smp.h new file mode 100644 index 0000000000000000000000000000000000000000..c4e24790fd35fd6952caf0c7a1f35aaef06a7b2f --- /dev/null +++ b/tzdriver/core/smc_smp.h @@ -0,0 +1,153 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function declaration for sending smc cmd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef SMC_SMP_H +#define SMC_SMP_H + +#include +#include "teek_client_constants.h" +#include "teek_ns_client.h" + +#if (KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE) +#define CURRENT_CPUS_ALLOWED (¤t->cpus_mask) +#else +#define CURRENT_CPUS_ALLOWED (¤t->cpus_allowed) +#endif + +enum tc_ns_cmd_type { + TC_NS_CMD_TYPE_INVALID = 0, + TC_NS_CMD_TYPE_NS_TO_SECURE, + TC_NS_CMD_TYPE_SECURE_TO_NS, + TC_NS_CMD_TYPE_SECURE_TO_SECURE, + TC_NS_CMD_TYPE_SECURE_CONFIG = 0xf, + TC_NS_CMD_TYPE_MAX +}; + +struct pending_entry { + atomic_t users; + struct task_struct *task; +#ifdef CONFIG_TA_AFFINITY + struct cpumask ca_mask; + struct cpumask ta_mask; +#endif + pid_t pid; + wait_queue_head_t wq; + atomic_t run; + struct list_head list; +}; + +#ifdef CONFIG_BIG_SESSION +#define MAX_SMC_CMD CONFIG_BIG_SESSION +#else +#define MAX_SMC_CMD 18 +#endif + +#ifdef DIV_ROUND_UP +#undef DIV_ROUND_UP +#endif +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) + +#define BITS_PER_BYTE 8 + +#ifdef BITS_TO_LONGS +#undef BITS_TO_LONGS +#endif +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(uint64_t)) + +#ifdef BIT_MASK +#undef BIT_MASK +#endif +#define BIT_MASK(nr) (1UL << (((uint64_t)(nr)) % sizeof(uint64_t))) + +#ifdef BIT_WORD +#undef BIT_WORD +#endif +#define BIT_WORD(nr) ((nr) / sizeof(uint64_t)) + +#ifdef DECLARE_BITMAP +#undef DECLARE_BITMAP +#endif +#define DECLARE_BITMAP(name, bits) uint64_t name[BITS_TO_LONGS(bits)] + +#define SIQ_DUMP_TIMEOUT 1U +#define SIQ_DUMP_SHELL 2U + +typedef uint32_t smc_buf_lock_t; + +struct tc_ns_smc_queue { + /* set when CA send cmd_in, clear after cmd_out return */ + DECLARE_BITMAP(in_bitmap, MAX_SMC_CMD); + /* set when gtask get cmd_in, clear after cmd_out return */ + DECLARE_BITMAP(doing_bitmap, MAX_SMC_CMD); + /* set when gtask get cmd_out, clear after cmd_out return */ + DECLARE_BITMAP(out_bitmap, MAX_SMC_CMD); + smc_buf_lock_t smc_lock; + volatile uint32_t last_in; + struct tc_ns_smc_cmd in[MAX_SMC_CMD]; + volatile uint32_t last_out; + struct tc_ns_smc_cmd out[MAX_SMC_CMD]; +}; + +#define SYM_NAME_LEN_MAX 16 +#define SYM_NAME_LEN_1 7 +#define SYM_NAME_LEN_2 4 +#define CRASH_REG_NUM 3 +#define LOW_FOUR_BITE 4 + +union crash_inf { + uint64_t crash_reg[CRASH_REG_NUM]; + struct { + uint8_t halt_reason : LOW_FOUR_BITE; + uint8_t app : LOW_FOUR_BITE; + char sym_name[SYM_NAME_LEN_1]; + uint16_t off; + uint16_t size; + uint32_t far; + uint32_t fault; + union { + char sym_name_append[SYM_NAME_LEN_2]; + uint32_t elr; + }; + } crash_msg; +}; + +#define RESLEEP_TIMEOUT 15 + +bool sigkill_pending(struct task_struct *tsk); +int smc_context_init(const struct device *class_dev); +void free_smc_data(void); +int tc_ns_smc(struct tc_ns_smc_cmd *cmd); +int tc_ns_smc_with_no_nr(struct tc_ns_smc_cmd *cmd); +int teeos_log_exception_archive(unsigned int eventid, const char *exceptioninfo); +void set_cmd_send_state(void); +int init_smc_svc_thread(void); +int smc_wakeup_ca(pid_t ca); +int smc_wakeup_broadcast(void); +int smc_shadow_exit(pid_t ca); +int smc_queue_shadow_worker(uint64_t target); +void fiq_shadow_work_func(uint64_t target); +struct pending_entry *find_pending_entry(pid_t pid); +void foreach_pending_entry(void (*func)(struct pending_entry *)); +void put_pending_entry(struct pending_entry *pe); +void show_cmd_bitmap(void); +void wakeup_tc_siq(uint32_t siq_mode); +void smc_set_cmd_buffer(void); +unsigned long raw_smc_send(uint32_t cmd, phys_addr_t cmd_addr, uint32_t cmd_type, uint8_t wait); +void occupy_clean_cmd_buf(void); +void clr_system_crash_flag(void); +void svc_thread_release(void); +int send_smc_cmd_rebooting(uint32_t cmd_id, phys_addr_t cmd_addr, uint32_t cmd_type, + const struct tc_ns_smc_cmd *in_cmd); +void send_smc_reset_cmd_buffer(void); + +#endif diff --git a/tzdriver/core/tc_client_driver.c b/tzdriver/core/tc_client_driver.c new file mode 100644 index 0000000000000000000000000000000000000000..5d3c2b851ccdd7eef836d2d5eda2b727cc3bbeed --- /dev/null +++ b/tzdriver/core/tc_client_driver.c @@ -0,0 +1,1500 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function for proc open,close session and invoke. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "tc_client_driver.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE) +#include +#include +#include +#endif +#include +#include +#include +#include +#include "smc_smp.h" +#include "teek_client_constants.h" +#include "agent.h" +#include "mem.h" +#include "gp_ops.h" +#include "tc_ns_log.h" +#include "tc_ns_client.h" +#include "mailbox_mempool.h" +#include "shared_mem.h" +#include "tz_spi_notify.h" +#include "client_hash_auth.h" +#include "auth_base_impl.h" +#include "tlogger.h" +#include "tzdebug.h" +#include "session_manager.h" +#include "internal_functions.h" +#include "ko_adapt.h" +#include "tz_pm.h" +#include "tui.h" +#include "dynamic_ion_mem.h" +#include "static_ion_mem.h" +#include "reserved_mempool.h" +#ifdef CONFIG_CMS_SIGNATURE +#include "tz_update_crl.h" +#endif +#ifdef CONFIG_TEE_REBOOT +#include "reboot.h" +#endif + +#ifdef CONFIG_FFA_SUPPORT +#include "ffa_abi.h" +#endif + +static struct class *g_driver_class; +static struct device_node *g_dev_node; + +struct dev_node g_tc_client; +struct dev_node g_tc_private; + +#ifdef CONFIG_ACPI +static int g_acpi_irq; +#endif + +static DEFINE_MUTEX(g_set_ca_hash_lock); + +/* dev_file_id rangde in (0, 32767), 32768 use 4k bitmap */ +#define DEV_FILE_ID_MAX 32768u + +unsigned long *g_dev_bit_map = NULL; +static DEFINE_MUTEX(g_dev_bit_map_lock); + +static int alloc_dev_bitmap(void) +{ + mutex_lock(&g_dev_bit_map_lock); + if (g_dev_bit_map == NULL) { + g_dev_bit_map = bitmap_alloc(DEV_FILE_ID_MAX, GFP_KERNEL | __GFP_ZERO); + if (g_dev_bit_map == NULL) { + tloge("alloc bit map failed\n"); + mutex_unlock(&g_dev_bit_map_lock); + return -1; + } + } + + mutex_unlock(&g_dev_bit_map_lock); + return 0; +} + +static void free_dev_bitmap(void) +{ + mutex_lock(&g_dev_bit_map_lock); + if (g_dev_bit_map != NULL) { + bitmap_free(g_dev_bit_map); + g_dev_bit_map = NULL; + } + mutex_unlock(&g_dev_bit_map_lock); +} + +static bool alloc_dev_file_id(unsigned int *dev_file_id) +{ + int pos; + + mutex_lock(&g_dev_bit_map_lock); + if (dev_file_id == NULL || g_dev_bit_map == NULL) { + tloge("invalid param\n"); + mutex_unlock(&g_dev_bit_map_lock); + return false; + } + + pos = bitmap_find_free_region(g_dev_bit_map, DEV_FILE_ID_MAX, 0); + if (pos < 0) { + tloge("dev file fd full, alloc failed, error = %d\n", pos); + mutex_unlock(&g_dev_bit_map_lock); + return false; + } + + *dev_file_id = (unsigned int)pos; + tlogd("alloc dev file id = %u", *dev_file_id); + mutex_unlock(&g_dev_bit_map_lock); + return true; +} + +static void free_dev_file_id(unsigned int dev_file_id) +{ + mutex_lock(&g_dev_bit_map_lock); + if (g_dev_bit_map == NULL) { + tloge("dev file fd bitmap is null\n"); + mutex_unlock(&g_dev_bit_map_lock); + return; + } + + if (dev_file_id >= DEV_FILE_ID_MAX) { + tloge("dev file fd invalid\n"); + mutex_unlock(&g_dev_bit_map_lock); + return; + } + + /* clear dev_file_id bit for reuse */ + bitmap_release_region(g_dev_bit_map, dev_file_id, 0); + tlogd("clear dev file id %u\n", dev_file_id); + mutex_unlock(&g_dev_bit_map_lock); +} + +/* dev node list and itself has mutex to avoid race */ +struct tc_ns_dev_list g_tc_ns_dev_list; + +static bool g_init_succ = false; + +static void set_tz_init_flag(void) +{ + g_init_succ = true; +} + +static void clear_tz_init_flag(void) +{ + g_init_succ = false; +} + +bool get_tz_init_flag(void) +{ + return g_init_succ; +} + +struct tc_ns_dev_list *get_dev_list(void) +{ + return &g_tc_ns_dev_list; +} + +static int tc_ns_get_tee_version(const struct tc_ns_dev_file *dev_file, + void __user *argp) +{ + unsigned int version; + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + int ret = 0; + struct mb_cmd_pack *mb_pack = NULL; + + if (!argp) { + tloge("error input parameter\n"); + return -EINVAL; + } + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) { + tloge("alloc mb pack failed\n"); + return -ENOMEM; + } + + mb_pack->operation.paramtypes = TEEC_VALUE_OUTPUT; + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.cmd_id = GLOBAL_CMD_ID_GET_TEE_VERSION; + smc_cmd.dev_file_id = dev_file->dev_file_id; + smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; + + if (tc_ns_smc(&smc_cmd) != 0) { + ret = -EPERM; + tloge("smc call returns error ret 0x%x\n", smc_cmd.ret_val); + } + + version = mb_pack->operation.params[0].value.a; + if (copy_to_user(argp, &version, sizeof(unsigned int)) != 0) + ret = -EFAULT; + mailbox_free(mb_pack); + + return ret; +} + +/* + * This is the login information + * and is set teecd when client opens a new session + */ +#define MAX_BUF_LEN 4096 + +static int get_pack_name_len(struct tc_ns_dev_file *dev_file, + const uint8_t *cert_buffer) +{ + uint32_t tmp_len = 0; + + dev_file->pkg_name_len = 0; + if (memcpy_s(&tmp_len, sizeof(tmp_len), cert_buffer, sizeof(tmp_len)) != 0) + return -EFAULT; + + if (tmp_len == 0 || tmp_len >= MAX_PACKAGE_NAME_LEN) { + tloge("invalid pack name len: %u\n", tmp_len); + return -EINVAL; + } + dev_file->pkg_name_len = tmp_len; + tlogd("package name len is %u\n", dev_file->pkg_name_len); + + return 0; +} + +static int get_public_key_len(struct tc_ns_dev_file *dev_file, + const uint8_t *cert_buffer) +{ + uint32_t tmp_len = 0; + + dev_file->pub_key_len = 0; + if (memcpy_s(&tmp_len, sizeof(tmp_len), cert_buffer, sizeof(tmp_len)) != 0) + return -EFAULT; + + if (tmp_len > MAX_PUBKEY_LEN) { + tloge("invalid public key len: %u\n", tmp_len); + return -EINVAL; + } + dev_file->pub_key_len = tmp_len; + tlogd("publick key len is %u\n", dev_file->pub_key_len); + + return 0; +} + +static int get_public_key(struct tc_ns_dev_file *dev_file, + const uint8_t *cert_buffer) +{ + /* get public key */ + if (dev_file->pub_key_len == 0) + return 0; + + if (memcpy_s(dev_file->pub_key, MAX_PUBKEY_LEN, cert_buffer, + dev_file->pub_key_len) != 0) { + tloge("failed to copy pub key len\n"); + return -EINVAL; + } + + return 0; +} + +static bool is_cert_buffer_size_valid(int cert_buffer_size) +{ + /* + * GET PACKAGE NAME AND APP CERTIFICATE: + * The proc_info format is as follows: + * package_name_len(4 bytes) || package_name || + * apk_cert_len(4 bytes) || apk_cert. + * or package_name_len(4 bytes) || package_name + * || exe_uid_len(4 bytes) || exe_uid. + * The apk certificate format is as follows: + * modulus_size(4bytes) ||modulus buffer + * || exponent size || exponent buffer + */ + if (cert_buffer_size > MAX_BUF_LEN || cert_buffer_size == 0) { + tloge("cert buffer size is invalid!\n"); + return false; + } + + return true; +} + +static int alloc_login_buf(struct tc_ns_dev_file *dev_file, + uint8_t **cert_buffer, unsigned int *cert_buffer_size) +{ + *cert_buffer_size = (unsigned int)(MAX_PACKAGE_NAME_LEN + + MAX_PUBKEY_LEN + sizeof(dev_file->pkg_name_len) + + sizeof(dev_file->pub_key_len)); + + *cert_buffer = kmalloc(*cert_buffer_size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)(*cert_buffer))) { + tloge("failed to allocate login buffer!"); + return -ENOMEM; + } + + return 0; +} + +static int client_login_prepare(uint8_t *cert_buffer, + const void __user *buffer, unsigned int cert_buffer_size) +{ + if (!is_cert_buffer_size_valid(cert_buffer_size)) + return -EINVAL; + + if (copy_from_user(cert_buffer, buffer, cert_buffer_size) != 0) { + tloge("Failed to get user login info!\n"); + return -EINVAL; + } + + return 0; +} + +static int tc_login_check(const struct tc_ns_dev_file *dev_file) +{ + int ret = check_teecd_auth(); +#ifdef CONFIG_CADAEMON_AUTH + if (ret != 0) + ret = check_cadaemon_auth(); +#endif + if (ret != 0) { + tloge("teec auth failed, ret %d\n", ret); + return -EACCES; + } + + if (!dev_file) + return -EINVAL; + + return 0; +} + +static int tc_ns_client_login_func(struct tc_ns_dev_file *dev_file, + const void __user *buffer) +{ + int ret; + uint8_t *cert_buffer = NULL; + uint8_t *temp_cert_buffer = NULL; + unsigned int cert_buffer_size = 0; + + if (tc_login_check(dev_file) != 0) + return -EFAULT; + + if (!buffer) { + /* + * We accept no debug information + * because the daemon might have failed + */ + dev_file->pkg_name_len = 0; + dev_file->pub_key_len = 0; + return 0; + } + + mutex_lock(&dev_file->login_setup_lock); + if (dev_file->login_setup) { + tloge("login information cannot be set twice!\n"); + mutex_unlock(&dev_file->login_setup_lock); + return -EINVAL; + } + + ret = alloc_login_buf(dev_file, &cert_buffer, &cert_buffer_size); + if (ret != 0) { + mutex_unlock(&dev_file->login_setup_lock); + return ret; + } + + temp_cert_buffer = cert_buffer; + if (client_login_prepare(cert_buffer, buffer, cert_buffer_size) != 0) { + ret = -EINVAL; + goto error; + } + + ret = get_pack_name_len(dev_file, cert_buffer); + if (ret != 0) + goto error; + cert_buffer += sizeof(dev_file->pkg_name_len); + + if (strncpy_s(dev_file->pkg_name, MAX_PACKAGE_NAME_LEN, cert_buffer, + dev_file->pkg_name_len) != 0) { + ret = -ENOMEM; + goto error; + } + cert_buffer += dev_file->pkg_name_len; + + ret = get_public_key_len(dev_file, cert_buffer); + if (ret != 0) + goto error; + cert_buffer += sizeof(dev_file->pub_key_len); + + ret = get_public_key(dev_file, cert_buffer); + dev_file->login_setup = true; + +error: + kfree(temp_cert_buffer); + mutex_unlock(&dev_file->login_setup_lock); + return ret; +} + +int tc_ns_client_open(struct tc_ns_dev_file **dev_file, uint8_t kernel_api) +{ + struct tc_ns_dev_file *dev = NULL; + + tlogd("tc_client_open\n"); + if (!dev_file) { + tloge("dev_file is NULL\n"); + return -EINVAL; + } + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)dev)) { + tloge("dev malloc failed\n"); + return -ENOMEM; + } + + if (!alloc_dev_file_id(&(dev->dev_file_id))) { + kfree(dev); + return -ENOMEM; + } + + mutex_lock(&g_tc_ns_dev_list.dev_lock); + list_add_tail(&dev->head, &g_tc_ns_dev_list.dev_file_list); + mutex_unlock(&g_tc_ns_dev_list.dev_lock); + INIT_LIST_HEAD(&dev->shared_mem_list); + dev->login_setup = 0; +#ifdef CONFIG_AUTH_HASH + dev->cainfo_hash_setup = 0; +#endif + dev->kernel_api = kernel_api; + dev->load_app_flag = 0; + mutex_init(&dev->service_lock); + mutex_init(&dev->shared_mem_lock); + mutex_init(&dev->login_setup_lock); +#ifdef CONFIG_AUTH_HASH + mutex_init(&dev->cainfo_hash_setup_lock); +#endif + init_completion(&dev->close_comp); + *dev_file = dev; + + return 0; +} + +static void del_dev_node(struct tc_ns_dev_file *dev) +{ + if (!dev) + return; + + mutex_lock(&g_tc_ns_dev_list.dev_lock); + list_del(&dev->head); + mutex_unlock(&g_tc_ns_dev_list.dev_lock); +} + +void free_dev(struct tc_ns_dev_file *dev) +{ + del_dev_node(dev); + tee_agent_clear_dev_owner(dev); + free_dev_file_id(dev->dev_file_id); + if (memset_s(dev, sizeof(*dev), 0, sizeof(*dev)) != 0) + tloge("Caution, memset dev fail!\n"); + kfree(dev); +} + +int tc_ns_client_close(struct tc_ns_dev_file *dev) +{ + if (!dev) { + tloge("invalid dev(null)\n"); + return -EINVAL; + } + + close_unclosed_session_in_kthread(dev); + + if (dev->dev_file_id == tui_attach_device()) + free_tui_caller_info(); + + kill_ion_by_cafd(dev->dev_file_id); + /* for thirdparty agent, code runs here only when agent crashed */ + send_crashed_event_response_all(dev); + free_dev(dev); + + return 0; +} + +void shared_vma_open(struct vm_area_struct *vma) +{ + (void)vma; +} + +void shared_vma_close(struct vm_area_struct *vma) +{ + struct tc_ns_shared_mem *shared_mem = NULL; + struct tc_ns_shared_mem *shared_mem_temp = NULL; + bool find = false; + struct tc_ns_dev_file *dev_file = NULL; + if (!vma) { + tloge("vma is null\n"); + return; + } + dev_file = vma->vm_private_data; + if (!dev_file) { + tloge("vm private data is null\n"); + return; + } + + mutex_lock(&dev_file->shared_mem_lock); + list_for_each_entry_safe(shared_mem, shared_mem_temp, + &dev_file->shared_mem_list, head) { + if (shared_mem) { + if (shared_mem->user_addr == + (void *)(uintptr_t)vma->vm_start) { + shared_mem->user_addr = INVALID_MAP_ADDR; + find = true; + } else if (shared_mem->user_addr_ca == + (void *)(uintptr_t)vma->vm_start) { + shared_mem->user_addr_ca = INVALID_MAP_ADDR; + find = true; + } + + if ((shared_mem->user_addr == INVALID_MAP_ADDR) && + (shared_mem->user_addr_ca == INVALID_MAP_ADDR)) + list_del(&shared_mem->head); + + /* pair with tc client mmap */ + if (find) { + put_sharemem_struct(shared_mem); + break; + } + } + } + mutex_unlock(&dev_file->shared_mem_lock); +} + +static struct vm_operations_struct g_shared_remap_vm_ops = { + .open = shared_vma_open, + .close = shared_vma_close, +}; + +static struct tc_ns_shared_mem *find_sharedmem( + const struct vm_area_struct *vma, + const struct tc_ns_dev_file *dev_file, bool *only_remap) +{ + struct tc_ns_shared_mem *shm_tmp = NULL; + unsigned long len = vma->vm_end - vma->vm_start; + + /* + * using vma->vm_pgoff as share_mem index + * check if aready allocated + */ + list_for_each_entry(shm_tmp, &dev_file->shared_mem_list, head) { + if ((unsigned long)atomic_read(&shm_tmp->offset) == vma->vm_pgoff) { + tlogd("sharemem already alloc, shm tmp->offset=%d\n", + atomic_read(&shm_tmp->offset)); + /* + * args check: + * 1. this shared mem is already mapped + * 2. remap a different size shared_mem + */ + if ((shm_tmp->user_addr_ca != INVALID_MAP_ADDR) || + (vma->vm_end - vma->vm_start != shm_tmp->len)) { + tloge("already remap once!\n"); + return NULL; + } + /* return the same sharedmem specified by vm_pgoff */ + *only_remap = true; + get_sharemem_struct(shm_tmp); + return shm_tmp; + } + } + + /* if not find, alloc a new sharemem */ + return tc_mem_allocate(len); +} + +static int remap_shared_mem(struct vm_area_struct *vma, + const struct tc_ns_shared_mem *shared_mem) +{ + int ret; +#ifdef CONFIG_LIBLINUX + unsigned long pa = virt_to_phys(shared_mem->kernel_addr); + unsigned long va = vma->vm_start; + unsigned long size = vma->vm_end - vma->vm_start; + + pgprot_t pro; + pro.pgprot = VM_READ | VM_WRITE; + ret = remap_pfn_range(NULL, va, pa >> PAGE_SHIFT, size, pro); +#else + if (shared_mem->mem_type == RESERVED_TYPE) { + unsigned long pfn = res_mem_virt_to_phys((uintptr_t)(shared_mem->kernel_addr)) >> PAGE_SHIFT; + unsigned long size = vma->vm_end - vma->vm_start; + ret = remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot); // PAGE_SHARED + if (ret != 0) + tloge("remap pfn for user failed, ret %d", ret); + return ret; + } +#if (KERNEL_VERSION(6, 4, 0) <= LINUX_VERSION_CODE) + vma->__vm_flags |= VM_USERMAP; +#else + vma->vm_flags |= VM_USERMAP; +#endif + ret = remap_vmalloc_range(vma, shared_mem->kernel_addr, 0); +#endif + if (ret != 0) + tloge("can't remap to user, ret = %d\n", ret); + + return ret; +} + +/* + * in this func, we need to deal with follow cases: + * vendor CA alloc sharedmem (alloc and remap); + * HIDL alloc sharedmem (alloc and remap); + * system CA alloc sharedmem (only just remap); + */ +static int tc_client_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret; + struct tc_ns_dev_file *dev_file = NULL; + struct tc_ns_shared_mem *shared_mem = NULL; + bool only_remap = false; + + if (!filp || !vma || !filp->private_data) { + tloge("invalid args for tc mmap\n"); + return -EINVAL; + } + dev_file = filp->private_data; + + mutex_lock(&dev_file->shared_mem_lock); + shared_mem = find_sharedmem(vma, dev_file, &only_remap); + if (IS_ERR_OR_NULL(shared_mem)) { + tloge("alloc shared mem failed\n"); + mutex_unlock(&dev_file->shared_mem_lock); + return -ENOMEM; + } + + ret = remap_shared_mem(vma, shared_mem); + if (ret != 0) { + if (only_remap) + put_sharemem_struct(shared_mem); + else + tc_mem_free(shared_mem); + mutex_unlock(&dev_file->shared_mem_lock); + return ret; + } +#if (KERNEL_VERSION(6, 4, 0) <= LINUX_VERSION_CODE) + vma->__vm_flags |= VM_DONTCOPY; +#else + vma->vm_flags |= VM_DONTCOPY; +#endif + vma->vm_ops = &g_shared_remap_vm_ops; + shared_vma_open(vma); + vma->vm_private_data = (void *)dev_file; + + if (only_remap) { + shared_mem->user_addr_ca = (void *)(uintptr_t)vma->vm_start; + mutex_unlock(&dev_file->shared_mem_lock); + return ret; + } + shared_mem->user_addr = (void *)(uintptr_t)vma->vm_start; + atomic_set(&shared_mem->offset, vma->vm_pgoff); + get_sharemem_struct(shared_mem); + list_add_tail(&shared_mem->head, &dev_file->shared_mem_list); + mutex_unlock(&dev_file->shared_mem_lock); + + return ret; +} + +static int ioctl_register_agent(struct tc_ns_dev_file *dev_file, unsigned long arg) +{ + int ret; + struct agent_ioctl_args args; + + if (arg == 0) { + tloge("arg is NULL\n"); + return -EFAULT; + } + + if (copy_from_user(&args, (void *)(uintptr_t)arg, sizeof(args)) != 0) { + tloge("copy agent args failed\n"); + return -EFAULT; + } + + ret = tc_ns_register_agent(dev_file, args.id, args.buffer_size, + &args.buffer, true); + if (ret == 0) { + if (copy_to_user((void *)(uintptr_t)arg, &args, sizeof(args)) != 0) + tloge("copy agent user addr failed\n"); + } + + return ret; +} + +static int ioctl_check_agent_owner(const struct tc_ns_dev_file *dev_file, + unsigned int agent_id) +{ + struct smc_event_data *event_data = NULL; + + event_data = find_event_control(agent_id); + if (event_data == NULL) { + tloge("invalid agent id\n"); + return -EINVAL; + } + + if (event_data->owner != dev_file) { + tloge("invalid request, access denied!\n"); + put_agent_event(event_data); + return -EPERM; + } + + put_agent_event(event_data); + return 0; +} + +/* ioctls for the secure storage daemon */ +static int public_ioctl(const struct file *file, unsigned int cmd, unsigned long arg, bool is_from_client_node) +{ + int ret = -EINVAL; + struct tc_ns_dev_file *dev_file = file->private_data; + void *argp = (void __user *)(uintptr_t)arg; + if (!dev_file) { + tloge("invalid params\n"); + return -EINVAL; + } + + switch (cmd) { + case TC_NS_CLIENT_IOCTL_WAIT_EVENT: + if (ioctl_check_agent_owner(dev_file, (unsigned int)arg) != 0) + return -EINVAL; + ret = tc_ns_wait_event((unsigned int)arg); + break; + case TC_NS_CLIENT_IOCTL_SEND_EVENT_RESPONSE: + if (ioctl_check_agent_owner(dev_file, (unsigned int)arg) != 0) + return -EINVAL; + ret = tc_ns_send_event_response((unsigned int)arg); + break; + case TC_NS_CLIENT_IOCTL_REGISTER_AGENT: + ret = ioctl_register_agent(dev_file, arg); + break; + case TC_NS_CLIENT_IOCTL_UNREGISTER_AGENT: + if (ioctl_check_agent_owner(dev_file, (unsigned int)arg) != 0) + return -EINVAL; + ret = tc_ns_unregister_agent((unsigned int)arg); + break; + case TC_NS_CLIENT_IOCTL_LOAD_APP_REQ: + ret = tc_ns_load_secfile(file->private_data, argp, is_from_client_node); + break; + default: + tloge("invalid cmd!"); + return ret; + } + tlogd("client ioctl ret = 0x%x\n", ret); + return ret; +} + +static int tc_ns_send_cancel_cmd(struct tc_ns_dev_file *dev_file, void *argp) +{ + struct tc_ns_client_context client_context = {{0}}; + (void)dev_file; + + if (!argp) { + tloge("argp is NULL input buffer\n"); + return -EINVAL; + } + if (copy_from_user(&client_context, argp, sizeof(client_context)) != 0) { + tloge("copy from user failed\n"); + return -ENOMEM; + } + + client_context.returns.code = TEEC_ERROR_GENERIC; + client_context.returns.origin = TEEC_ORIGIN_COMMS; + tloge("not support send cancel cmd now\n"); + if (copy_to_user(argp, &client_context, sizeof(client_context)) != 0) + return -EFAULT; + + return 0; +} + +static int get_agent_id(unsigned long arg, unsigned int cmd, uint32_t *agent_id) +{ + struct agent_ioctl_args args; + switch (cmd) { + case TC_NS_CLIENT_IOCTL_WAIT_EVENT: + case TC_NS_CLIENT_IOCTL_SEND_EVENT_RESPONSE: + case TC_NS_CLIENT_IOCTL_UNREGISTER_AGENT: + *agent_id = (unsigned int)arg; + break; + case TC_NS_CLIENT_IOCTL_REGISTER_AGENT: + if (copy_from_user(&args, (void *)(uintptr_t)arg, sizeof(args))) { + tloge("copy agent args failed\n"); + return -EFAULT; + } + *agent_id = args.id; + break; + default: + return -EFAULT; + } + return 0; +} + +static int tc_client_agent_ioctl(const struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -EFAULT; + uint32_t agent_id; + + switch (cmd) { + case TC_NS_CLIENT_IOCTL_SEND_EVENT_RESPONSE: + case TC_NS_CLIENT_IOCTL_WAIT_EVENT: + case TC_NS_CLIENT_IOCTL_REGISTER_AGENT: + case TC_NS_CLIENT_IOCTL_UNREGISTER_AGENT: + if (get_agent_id(arg, cmd, &agent_id) != 0) + return ret; + if (check_ext_agent_access(agent_id) != 0) { + tloge("the agent is not access\n"); + return -EPERM; + } + ret = public_ioctl(file, cmd, arg, true); + break; + default: + tloge("invalid cmd 0x%x!", cmd); + break; + } + + return ret; +} + +static void handle_cmd_prepare(unsigned int cmd) +{ + if (cmd != TC_NS_CLIENT_IOCTL_WAIT_EVENT && + cmd != TC_NS_CLIENT_IOCTL_SEND_EVENT_RESPONSE) + livepatch_down_read_sem(); +} + +static void handle_cmd_finish(unsigned int cmd) +{ + if (cmd != TC_NS_CLIENT_IOCTL_WAIT_EVENT && + cmd != TC_NS_CLIENT_IOCTL_SEND_EVENT_RESPONSE) + livepatch_up_read_sem(); +} + +static long tc_private_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -EFAULT; + void *argp = (void __user *)(uintptr_t)arg; + handle_cmd_prepare(cmd); + switch (cmd) { + case TC_NS_CLIENT_IOCTL_GET_TEE_VERSION: + ret = tc_ns_get_tee_version(file->private_data, argp); + break; + case TC_NS_CLIENT_IOCTL_SET_NATIVECA_IDENTITY: + mutex_lock(&g_set_ca_hash_lock); + ret = tc_ns_set_native_hash((unsigned long)(uintptr_t)argp, GLOBAL_CMD_ID_SET_CA_HASH); + mutex_unlock(&g_set_ca_hash_lock); + break; + case TC_NS_CLIENT_IOCTL_LATEINIT: + ret = tc_ns_late_init(arg); + break; + case TC_NS_CLIENT_IOCTL_SYC_SYS_TIME: + ret = sync_system_time_from_user( + (struct tc_ns_client_time *)(uintptr_t)arg); + break; + default: + ret = public_ioctl(file, cmd, arg, false); + break; + } + + handle_cmd_finish(cmd); + + return ret; +} + +static long tc_client_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -EFAULT; + void *argp = (void __user *)(uintptr_t)arg; + + handle_cmd_prepare(cmd); + switch (cmd) { + case TC_NS_CLIENT_IOCTL_SES_OPEN_REQ: + case TC_NS_CLIENT_IOCTL_SES_CLOSE_REQ: + case TC_NS_CLIENT_IOCTL_SEND_CMD_REQ: + ret = tc_client_session_ioctl(file, cmd, arg); + break; + case TC_NS_CLIENT_IOCTL_CANCEL_CMD_REQ: + ret = tc_ns_send_cancel_cmd(file->private_data, argp); + break; + case TC_NS_CLIENT_IOCTL_LOGIN: + ret = tc_ns_client_login_func(file->private_data, argp); + break; + case TC_NS_CLIENT_IOCTL_LOAD_APP_REQ: + ret = public_ioctl(file, cmd, arg, true); + break; +#ifdef CONFIG_TEE_TUI + case TC_NS_CLIENT_IOCTL_TUI_EVENT: + ret = tc_ns_tui_event(file->private_data, argp); + break; +#endif +#ifdef CONFIG_CMS_SIGNATURE + case TC_NS_CLIENT_IOCTL_UPDATE_TA_CRL: + ret = tc_ns_update_ta_crl(file->private_data, argp); + break; +#endif + default: { + if (check_teecd_auth() == 0) + ret = tc_private_ioctl(file, cmd, arg); + else + ret = tc_client_agent_ioctl(file, cmd, arg); + break; + } + } + + handle_cmd_finish(cmd); + + tlogd("tc client ioctl ret = 0x%x\n", ret); + return (long)ret; +} + +static int tc_client_open(struct inode *inode, struct file *file) +{ + int ret; + struct tc_ns_dev_file *dev = NULL; + (void)inode; + + ret = check_teecd_auth(); +#ifdef CONFIG_CADAEMON_AUTH + if (ret != 0) + ret = check_cadaemon_auth(); +#endif + if (ret != 0) { + tloge("teec auth failed, ret %d\n", ret); + return -EACCES; + } + + file->private_data = NULL; + ret = tc_ns_client_open(&dev, TEE_REQ_FROM_USER_MODE); + if (ret == 0) + file->private_data = dev; +#ifdef CONFIG_TEE_REBOOT + get_teecd_pid(); +#endif + return ret; +} + +static int tc_client_close(struct inode *inode, struct file *file) +{ + int ret = 0; + struct tc_ns_dev_file *dev = file->private_data; +#ifdef CONFIG_TEE_TUI + /* release tui resource */ + struct teec_tui_parameter tui_param = {0}; +#endif + (void)inode; + +#ifdef CONFIG_TEE_TUI + if (dev->dev_file_id == tui_attach_device()) { + ret = tui_send_event(TUI_POLL_CANCEL, &tui_param); + /* tee tui service is dead, but we need release the buffer in ree */ + if (ret == TEEC_ERROR_TUI_NOT_AVAILABLE) + do_ns_tui_release(); + } +#endif + + livepatch_down_read_sem(); + ret = tc_ns_client_close(dev); + livepatch_up_read_sem(); + file->private_data = NULL; + + return ret; +} + +static int tc_private_close(struct inode *inode, struct file *file) +{ + struct tc_ns_dev_file *dev = file->private_data; + (void)inode; + + /* for teecd close fd */ + if (is_system_agent(dev)) { + /* for teecd agent close fd */ + send_event_response_single(dev); + free_dev(dev); + } else { + /* for ca damon close fd */ + free_dev(dev); + } + file->private_data = NULL; + + return 0; +} + +struct tc_ns_dev_file *tc_find_dev_file(unsigned int dev_file_id) +{ + struct tc_ns_dev_file *dev_file = NULL; + + mutex_lock(&g_tc_ns_dev_list.dev_lock); + list_for_each_entry(dev_file, &g_tc_ns_dev_list.dev_file_list, head) { + if (dev_file->dev_file_id == dev_file_id) { + mutex_unlock(&g_tc_ns_dev_list.dev_lock); + return dev_file; + } + } + mutex_unlock(&g_tc_ns_dev_list.dev_lock); + return NULL; +} + +#ifdef CONFIG_COMPAT +long tc_compat_client_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + long ret; + + if (!file) + return -EINVAL; + + arg = (unsigned long)(uintptr_t)compat_ptr(arg); + ret = tc_client_ioctl(file, cmd, arg); + return ret; +} + +long tc_compat_private_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + long ret; + + if (!file) + return -EINVAL; + + arg = (unsigned long)(uintptr_t)compat_ptr(arg); + ret = tc_private_ioctl(file, cmd, arg); + return ret; +} +#endif + +static const struct file_operations g_tc_ns_client_fops = { + .owner = THIS_MODULE, + .open = tc_client_open, + .release = tc_client_close, + .unlocked_ioctl = tc_client_ioctl, + .mmap = tc_client_mmap, +#ifdef CONFIG_COMPAT + .compat_ioctl = tc_compat_client_ioctl, +#endif +}; + +static const struct file_operations g_teecd_fops = { + .owner = THIS_MODULE, + .open = tc_client_open, + .release = tc_private_close, + .unlocked_ioctl = tc_private_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = tc_compat_private_ioctl, +#endif +}; +#ifdef CONFIG_ACPI + +static int tzdriver_probe(struct platform_device *pdev) +{ + tlogd("tzdriver probe is running"); + g_acpi_irq = platform_get_irq(pdev, 0); + if (g_acpi_irq < 0) { + dev_err(&pdev->dev, "get irq fail; irq:%d\n", g_acpi_irq); + return g_acpi_irq; + } + + return 0; +} + +int get_acpi_tz_irq(void) +{ + return g_acpi_irq; +} + +static const struct acpi_device_id g_tzdriver_acpi_match[] = { + { "HISI03C1", 0 }, + {}, +}; + +MODULE_DEVICE_TABLE(acpi, g_tzdriver_acpi_match); + +#else + +static int tzdriver_probe(struct platform_device *pdev) +{ + (void)pdev; + return 0; +} + +struct of_device_id g_tzdriver_platform_match[] = { + { .compatible = "trusted_core" }, + {}, +}; + +MODULE_DEVICE_TABLE(of, g_tzdriver_platform_match); + +#endif + +const struct dev_pm_ops g_tzdriver_pm_ops = { + .freeze_noirq = tc_s4_pm_suspend, + .restore_noirq = tc_s4_pm_resume, +}; + +static struct platform_driver g_tz_platform_driver = { + .driver = { + .name = "trusted_core", + .owner = THIS_MODULE, +#ifdef CONFIG_ACPI + .acpi_match_table = ACPI_PTR(g_tzdriver_acpi_match), +#else + .of_match_table = of_match_ptr(g_tzdriver_platform_match), +#endif + .pm = &g_tzdriver_pm_ops, + }, + .probe = tzdriver_probe, +}; + +static int load_hw_info(void) +{ + if (platform_driver_register(&g_tz_platform_driver) != 0) { + tloge("platform register driver failed\n"); + return -EFAULT; + } + + /* load hardware info from dts and acpi */ + g_dev_node = of_find_compatible_node(NULL, NULL, "trusted_core"); + if (!g_dev_node) { + tloge("no trusted_core compatible node found\n"); +#ifndef CONFIG_ACPI + platform_driver_unregister(&g_tz_platform_driver); + return -ENODEV; +#endif + } + + return 0; +} + +static int create_dev_node(struct dev_node *node) +{ + int ret; + if (!node || !(node->node_name)) { + tloge("node or member is null\n"); + return -EFAULT; + } + if (alloc_chrdev_region(&(node->devt), 0, 1, + node->node_name) != 0) { + tloge("alloc chrdev region failed"); + ret = -EFAULT; + return ret; + } + node->class_dev = device_create(node->driver_class, NULL, node->devt, + NULL, node->node_name); + if (IS_ERR_OR_NULL(node->class_dev)) { + tloge("class device create failed"); + ret = -ENOMEM; + goto chrdev_region_unregister; + } + node->class_dev->of_node = g_dev_node; + + cdev_init(&(node->char_dev), node->fops); + (node->char_dev).owner = THIS_MODULE; + + return 0; + +chrdev_region_unregister: + unregister_chrdev_region(node->devt, 1); + return ret; +} + +static int init_dev_node(struct dev_node *node, char *node_name, + struct class *driver_class, const struct file_operations *fops) +{ + int ret = -1; + if (!node) { + tloge("node is NULL\n"); + return ret; + } + node->node_name = node_name; + node->driver_class = driver_class; + node->fops = fops; + + ret = create_dev_node(node); + return ret; +} + +static void destory_dev_node(struct dev_node *node, struct class *driver_class) +{ + device_destroy(driver_class, node->devt); + unregister_chrdev_region(node->devt, 1); + return; +} + +static int enable_dev_nodes(void) +{ + int ret; + + ret = cdev_add(&(g_tc_private.char_dev), + MKDEV(MAJOR(g_tc_private.devt), 0), 1); + if (ret < 0) { + tloge("cdev add failed %d", ret); + return ret; + } + + ret = cdev_add(&(g_tc_client.char_dev), + MKDEV(MAJOR(g_tc_client.devt), 0), 1); + if (ret < 0) { + tloge("cdev add failed %d", ret); + cdev_del(&(g_tc_private.char_dev)); + return ret; + } + + return 0; +} + +static int tc_ns_client_init(void) +{ + int ret; + ret = load_hw_info(); + if (ret != 0) + return ret; + + ret = load_reserved_mem(); + if (ret != 0) + return ret; + + ret = load_tz_shared_mem(g_dev_node); + if (ret != 0) + goto unmap_res_mem; + g_driver_class = class_create(THIS_MODULE, TC_NS_CLIENT_DEV); + if (IS_ERR_OR_NULL(g_driver_class)) { + tloge("class create failed"); + ret = -ENOMEM; + goto unmap_res_mem; + } + + ret = init_dev_node(&g_tc_client, TC_NS_CLIENT_DEV, g_driver_class, &g_tc_ns_client_fops); + if (ret != 0) { + class_destroy(g_driver_class); + goto unmap_res_mem; + } + ret = init_dev_node(&g_tc_private, TC_PRIV_DEV, g_driver_class, &g_teecd_fops); + if (ret != 0) { + destory_dev_node(&g_tc_client, g_driver_class); + class_destroy(g_driver_class); + goto unmap_res_mem; + } + + INIT_LIST_HEAD(&g_tc_ns_dev_list.dev_file_list); + mutex_init(&g_tc_ns_dev_list.dev_lock); + init_crypto_hash_lock(); + init_srvc_list(); + return ret; +unmap_res_mem: + unmap_res_mem(); + return ret; +} + +static int tc_teeos_init(struct device *class_dev) +{ + int ret; + + ret = smc_context_init(class_dev); + if (ret != 0) { + tloge("smc context init failed\n"); + return ret; + } + + ret = tee_init_reboot_thread(); + if (ret != 0) { + tloge("init reboot thread failed\n"); + goto smc_data_free; + } + + ret = reserved_mempool_init(); + if (ret != 0) { + tloge("reserved memory init failed\n"); + goto reboot_thread_free; + } + + ret = mailbox_mempool_init(); + if (ret != 0) { + tloge("tz mailbox init failed\n"); + goto release_resmem; + } + + ret = tz_spi_init(class_dev, g_dev_node); + if (ret != 0) { + tloge("tz spi init failed\n"); + goto release_mempool; + } + + return 0; +release_mempool: + free_mailbox_mempool(); +release_resmem: + free_reserved_mempool(); +reboot_thread_free: + free_reboot_thread(); +smc_data_free: + free_smc_data(); + return ret; +} + +static void tc_re_init(const struct device *class_dev) +{ + int ret; + + agent_init(); + ret = tc_ns_register_ion_mem(); + if (ret != 0) + tloge("Failed to register ion mem in tee\n"); + +#ifdef CONFIG_TZDRIVER_MODULE + ret = init_tlogger_service(); + if (ret != 0) + tloge("tlogger init failed\n"); +#endif + if (tzdebug_init() != 0) + tloge("tzdebug init failed\n"); + + ret = init_tui(class_dev); + if (ret != 0) + tloge("init_tui failed 0x%x\n", ret); + +#ifndef CONFIG_DISABLE_SVC + if (init_smc_svc_thread() != 0) { + tloge("init svc thread\n"); + ret = -EFAULT; + } +#endif + + if (init_dynamic_mem() != 0) { + tloge("init dynamic mem Failed\n"); + ret = -EFAULT; + } + +#ifdef CONFIG_LIVEPATCH_ENABLE + /* + * access this sys node only after this function is initialized + */ + if (livepatch_init(class_dev)) { + tloge("livepatch init failed\n"); + ret = -EFAULT; + } +#endif + + if (ret != 0) + tloge("Caution! Running environment init failed!\n"); +} + +static __init int tc_init(void) +{ + int ret = 0; + + init_kthread_cpumask(); + ret = tc_ns_client_init(); + if (ret != 0) + return ret; + +#ifdef CONFIG_FFA_SUPPORT + ffa_abi_register(); +#endif + + ret = tc_teeos_init(g_tc_client.class_dev); + if (ret != 0) { + tloge("tc teeos init failed\n"); + goto class_device_destroy; + } + /* run-time environment init failure don't block tzdriver init proc */ + tc_re_init(g_tc_client.class_dev); + + /* + * Note: the enable_dev_nodes function must be called + * at the end of tc_init + */ + ret = enable_dev_nodes(); + if (ret != 0) { + tloge("enable dev nodes failed\n"); + goto class_device_destroy; + } + + ret = alloc_dev_bitmap(); + if (ret != 0) { + tloge("alloc dev file id bitmap failed\n"); + goto class_device_destroy; + } + + set_tz_init_flag(); + return 0; + +class_device_destroy: + free_dev_bitmap(); + destory_dev_node(&g_tc_client, g_driver_class); + destory_dev_node(&g_tc_private, g_driver_class); + class_destroy(g_driver_class); + platform_driver_unregister(&g_tz_platform_driver); + return ret; +} + +static void free_dev_list(void) +{ + struct tc_ns_dev_file *dev_file = NULL, *temp = NULL; + + mutex_lock(&g_tc_ns_dev_list.dev_lock); + list_for_each_entry_safe(dev_file, temp, &g_tc_ns_dev_list.dev_file_list, head) { + list_del(&dev_file->head); + kfree(dev_file); + } + mutex_unlock(&g_tc_ns_dev_list.dev_lock); +} + +static void tc_exit(void) +{ + tlogi("tz client exit"); + clear_tz_init_flag(); + /* + * You should first execute "cdev_del" to + * prevent access to the device node when uninstalling "tzdriver". + */ + cdev_del(&(g_tc_private.char_dev)); + cdev_del(&(g_tc_client.char_dev)); + free_agent(); + free_reboot_thread(); + free_tui(); + free_tz_spi(g_tc_client.class_dev); + /* run-time environment exit should before teeos exit */ + + destory_dev_node(&g_tc_client, g_driver_class); + destory_dev_node(&g_tc_private, g_driver_class); + free_dev_bitmap(); + platform_driver_unregister(&g_tz_platform_driver); + class_destroy(g_driver_class); + free_smc_data(); + free_event_mem(); +#ifdef CONFIG_TZDRIVER_MODULE + free_tzdebug(); + free_tlogger_service(); +#endif + free_interrupt_trace(); + free_mailbox_mempool(); + free_reserved_mempool(); + free_shash_handle(); + fault_monitor_end(); + free_livepatch(); + free_all_session(); + free_dev_list(); +#ifdef CONFIG_FFA_SUPPORT + ffa_abi_unregister(); +#endif + tlogi("tz client exit finished"); +} + +MODULE_AUTHOR("iTrustee"); +MODULE_DESCRIPTION("TrustCore ns-client driver"); +MODULE_VERSION("1.10"); + +#ifdef CONFIG_TZDRIVER_MODULE +module_init(tc_init); +#else +fs_initcall_sync(tc_init); +#endif +module_exit(tc_exit); +MODULE_LICENSE("GPL"); diff --git a/tzdriver/core/tc_client_driver.h b/tzdriver/core/tc_client_driver.h new file mode 100644 index 0000000000000000000000000000000000000000..c5a53ff2cdce1f5927e3ad4f3ad07f5f04c1611d --- /dev/null +++ b/tzdriver/core/tc_client_driver.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function declaration for proc open,close session and invoke. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TC_CLIENT_DRIVER_H +#define TC_CLIENT_DRIVER_H + +#include +#include +#include "teek_ns_client.h" + +struct dev_node { + struct class *driver_class; + struct cdev char_dev; + dev_t devt; + struct device *class_dev; + const struct file_operations *fops; + char *node_name; +}; + +bool get_tz_init_flag(void); +struct tc_ns_dev_list *get_dev_list(void); +struct tc_ns_dev_file *tc_find_dev_file(unsigned int dev_file_id); +int tc_ns_client_open(struct tc_ns_dev_file **dev_file, uint8_t kernel_api); +int tc_ns_client_close(struct tc_ns_dev_file *dev); +int is_agent_alive(unsigned int agent_id); + +#ifdef CONFIG_ACPI +int get_acpi_tz_irq(void); +#endif + +#endif diff --git a/tzdriver/core/tee_compat_check.c b/tzdriver/core/tee_compat_check.c new file mode 100644 index 0000000000000000000000000000000000000000..d59659188c2909869649845f983566e7637044db --- /dev/null +++ b/tzdriver/core/tee_compat_check.c @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: check compatibility between tzdriver and tee. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "tee_compat_check.h" +#include +#include +#include "teek_ns_client.h" +#include "tc_ns_log.h" + +int32_t check_teeos_compat_level(const uint32_t *buffer, uint32_t size) +{ + const uint16_t major = TEEOS_COMPAT_LEVEL_MAJOR; + const uint16_t minor = TEEOS_COMPAT_LEVEL_MINOR; + + if (!buffer || size != COMPAT_LEVEL_BUF_LEN) { + tloge("check teeos compat level failed, invalid param\n"); + return -EINVAL; + } + + if (buffer[0] != VER_CHECK_MAGIC_NUM) { + tloge("check ver magic num %u failed\n", buffer[0]); + return -EPERM; + } + if (buffer[1] != major) { + tloge("check major ver failed, tzdriver expect teeos version=%u, actual teeos version=%u\n", + major, buffer[1]); + return -EPERM; + } + /* just print warning */ + if (buffer[2] != minor) + tlogw("check minor ver failed, tzdriver expect teeos minor version=%u, actual minor teeos version=%u\n", + minor, buffer[2]); + + return 0; +} \ No newline at end of file diff --git a/tzdriver/core/tee_compat_check.h b/tzdriver/core/tee_compat_check.h new file mode 100644 index 0000000000000000000000000000000000000000..3eb4ebb464043e6bfccae855b722c04a4a82d96a --- /dev/null +++ b/tzdriver/core/tee_compat_check.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: check compatibility between tzdriver and teeos. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TEE_COMPAT_CHECK_H +#define TEE_COMPAT_CHECK_H + +#include + +/* + * this version number MAJOR.MINOR is used + * to identify the compatibility of tzdriver and teeos + */ +#define TEEOS_COMPAT_LEVEL_MAJOR 2 +#define TEEOS_COMPAT_LEVEL_MINOR 0 + +#define VER_CHECK_MAGIC_NUM 0x5A5A5A5A +#define COMPAT_LEVEL_BUF_LEN 12 + +int32_t check_teeos_compat_level(const uint32_t *buffer, uint32_t size); +#endif diff --git a/tzdriver/core/teek_app_load.c b/tzdriver/core/teek_app_load.c new file mode 100644 index 0000000000000000000000000000000000000000..7f3bace5eeda72aa878fc48dc9370684ab53ef32 --- /dev/null +++ b/tzdriver/core/teek_app_load.c @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function declaration for load app operations for kernel CA. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "teek_app_load.h" +#include +#include +#include +#include "session_manager.h" +#include "ko_adapt.h" + +static int32_t teek_open_app_file(struct file *fp, char **file_buf, uint32_t total_img_len) +{ + loff_t pos = 0; + uint32_t read_size; + char *file_buffer = NULL; + + if (total_img_len == 0 || total_img_len > MAX_IMAGE_LEN) { + tloge("img len is invalied %u\n", total_img_len); + return TEEC_ERROR_BAD_PARAMETERS; + } + + file_buffer = vmalloc(total_img_len); + if (!file_buffer) { + tloge("alloc TA file buffer(size=%u) failed\n", total_img_len); + return TEEC_ERROR_GENERIC; + } + + read_size = (uint32_t)kernel_read(fp, file_buffer, total_img_len, &pos); + if (read_size != total_img_len) { + tloge("read ta file failed, read size/total size=%u/%u\n", read_size, total_img_len); + vfree(file_buffer); + return TEEC_ERROR_GENERIC; + } + + *file_buf = file_buffer; + + return TEEC_SUCCESS; +} + +static int32_t teek_read_app(const char *load_file, char **file_buf, uint32_t *file_len) +{ + int32_t ret; + struct file *fp = NULL; + + fp = filp_open(load_file, O_RDONLY, 0); + if (!fp || IS_ERR(fp)) { + tloge("open file error %ld\n", PTR_ERR(fp)); + return TEEC_ERROR_BAD_PARAMETERS; + } + + if (!fp->f_inode) { + tloge("node is NULL\n"); + filp_close(fp, 0); + return TEEC_ERROR_BAD_PARAMETERS; + } + + *file_len = (uint32_t)(fp->f_inode->i_size); + + ret = teek_open_app_file(fp, file_buf, *file_len); + if (ret != TEEC_SUCCESS) + tloge("do read app fail\n"); + + if (fp != NULL) { + filp_close(fp, 0); + fp = NULL; + } + + return ret; +} + +void teek_free_app(bool load_app_flag, char **file_buf) +{ + if (load_app_flag && file_buf != NULL && *file_buf != NULL) { + vfree(*file_buf); + *file_buf = NULL; + } +} + +int32_t teek_get_app(const char *ta_path, char **file_buf, uint32_t *file_len) +{ + int32_t ret; + + /* ta path is NULL means no need to load TA */ + if (!ta_path) + return TEEC_SUCCESS; + + if (!file_buf || !file_len) { + tloge("load app params invalied\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + + ret = teek_read_app(ta_path, file_buf, file_len); + if (ret != TEEC_SUCCESS) + tloge("teec load app error %d\n", ret); + + return ret; +} diff --git a/tzdriver/core/teek_app_load.h b/tzdriver/core/teek_app_load.h new file mode 100644 index 0000000000000000000000000000000000000000..deabb4a1014114af56118ffea1b01cff1bef6fe8 --- /dev/null +++ b/tzdriver/core/teek_app_load.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function declaration for load app operations for kernel CA. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TEEK_APP_LOAD_H +#define TEEK_APP_LOAD_H + +#include "teek_client_api.h" +#include "tc_ns_client.h" + +#define MAX_IMAGE_LEN 0x800000 /* max image len */ + +int32_t teek_get_app(const char *ta_path, char **file_buf, uint32_t *file_len); +void teek_free_app(bool load_app_flag, char **file_buf); + +#endif diff --git a/tzdriver/core/teek_client_api.c b/tzdriver/core/teek_client_api.c new file mode 100644 index 0000000000000000000000000000000000000000..45e82bfc746549d0855e3b37738494448dd3ecec --- /dev/null +++ b/tzdriver/core/teek_client_api.c @@ -0,0 +1,800 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function definition for libteec interface for kernel CA. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "teek_client_api.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tc_ns_log.h" +#include "tc_ns_client.h" +#include "gp_ops.h" +#include "internal_functions.h" +#include "session_manager.h" +#include "tc_client_driver.h" +#include "teek_app_load.h" +#include "dynamic_ion_mem.h" + +static void encode_for_part_mem(struct tc_ns_client_context *context, + const struct teec_operation *oper, uint32_t idex, uint32_t *param_type) +{ + uint32_t diff = (uint32_t)TEEC_MEMREF_PARTIAL_INPUT - + (uint32_t)TEEC_MEMREF_TEMP_INPUT; + uint64_t size_addr, buffer_addr; + + if (idex >= TEE_PARAM_NUM) + return; + + if (param_type[idex] == TEEC_MEMREF_WHOLE) { + context->params[idex].memref.offset = 0; + size_addr = (__u64)(uintptr_t)(&(oper->params[idex].memref.parent->size)); + } else { + context->params[idex].memref.offset = oper->params[idex].memref.offset; + size_addr = (__u64)(uintptr_t)(&(oper->params[idex].memref.size)); + } + context->params[idex].memref.size_addr = (__u32)size_addr; + context->params[idex].memref.size_h_addr = (__u32)(size_addr >> ADDR_TRANS_NUM); + + if (oper->params[idex].memref.parent->is_allocated) { + buffer_addr = (__u64)(uintptr_t)oper->params[idex].memref.parent->buffer; + } else { + buffer_addr = (__u64)(uintptr_t) + oper->params[idex].memref.parent->buffer + + oper->params[idex].memref.offset; + context->params[idex].memref.offset = 0; + } + context->params[idex].memref.buffer = (__u32)buffer_addr; + context->params[idex].memref.buffer_h_addr = (__u32)(buffer_addr >> ADDR_TRANS_NUM); + + /* translate the paramType to know the driver */ + if (param_type[idex] == TEEC_MEMREF_WHOLE) { + switch (oper->params[idex].memref.parent->flags) { + case TEEC_MEM_INPUT: + param_type[idex] = TEEC_MEMREF_PARTIAL_INPUT; + break; + case TEEC_MEM_OUTPUT: + param_type[idex] = TEEC_MEMREF_PARTIAL_OUTPUT; + break; + case TEEC_MEM_INOUT: + param_type[idex] = TEEC_MEMREF_PARTIAL_INOUT; + break; + default: + param_type[idex] = TEEC_MEMREF_PARTIAL_INOUT; + break; + } + } + + /* if not allocated, trans PARTIAL_XXX to MEMREF_TEMP_XXX */ + if (!oper->params[idex].memref.parent->is_allocated) + param_type[idex] = param_type[idex] - diff; +} + +static uint32_t proc_teek_encode(struct tc_ns_client_context *cli_context, + const struct teec_operation *operation) +{ + uint32_t param_type[TEE_PARAM_NUM]; + uint32_t idex; + uint64_t buffer_addr, size_addr, a_addr, b_addr; + + param_type[0] = + teec_param_type_get(operation->paramtypes, 0); + param_type[1] = + teec_param_type_get(operation->paramtypes, 1); + param_type[2] = + teec_param_type_get(operation->paramtypes, 2); + param_type[3] = + teec_param_type_get(operation->paramtypes, 3); + for (idex = 0; idex < TEE_PARAM_NUM; idex++) { + if (is_tmp_mem(param_type[idex])) { + buffer_addr = (__u64)(uintptr_t)(operation->params[idex].tmpref.buffer); + size_addr = (__u64)(uintptr_t)(&operation->params[idex].tmpref.size); + cli_context->params[idex].memref.buffer = (__u32)buffer_addr; + cli_context->params[idex].memref.buffer_h_addr = (__u32)(buffer_addr >> ADDR_TRANS_NUM); + cli_context->params[idex].memref.size_addr = (__u32)size_addr; + cli_context->params[idex].memref.size_h_addr = (__u32)(size_addr >> ADDR_TRANS_NUM); + } else if (is_ref_mem(param_type[idex])) { + encode_for_part_mem(cli_context, operation, + idex, param_type); + } else if (is_val_param(param_type[idex])) { + a_addr = (__u64)(uintptr_t)(&(operation->params[idex].value.a)); + b_addr = (__u64)(uintptr_t)(&(operation->params[idex].value.b)); + cli_context->params[idex].value.a_addr = (__u32)a_addr; + cli_context->params[idex].value.a_h_addr = (__u32)(a_addr >> ADDR_TRANS_NUM); + cli_context->params[idex].value.b_addr = (__u32)b_addr; + cli_context->params[idex].value.b_h_addr = (__u32)(b_addr >> ADDR_TRANS_NUM); + } else if (is_ion_param(param_type[idex])) { + a_addr = (__u64)(uintptr_t)(&(operation->params[idex].ionref.ion_share_fd)); + b_addr = (__u64)(uintptr_t)(&(operation->params[idex].ionref.ion_size)); + cli_context->params[idex].value.a_addr = (__u32)a_addr; + cli_context->params[idex].value.a_h_addr = (__u32)(a_addr >> ADDR_TRANS_NUM); + cli_context->params[idex].value.b_addr = (__u32)b_addr; + cli_context->params[idex].value.b_h_addr = (__u32)(b_addr >> ADDR_TRANS_NUM); + } else if (param_type[idex] == TEEC_NONE) { + /* do nothing */ + } else { + tloge("param_type[%u]=%u not correct\n", idex, + param_type[idex]); + return TEEC_ERROR_BAD_PARAMETERS; + } + } + cli_context->param_types = teec_param_types(param_type[0], + param_type[1], param_type[2], param_type[3]); + + tlogv("cli param type %u\n", cli_context->param_types); + return TEEC_SUCCESS; +} + +static uint32_t teek_init_context(struct tc_ns_client_context *cli_context, + struct teec_uuid service_id, uint32_t session_id, uint32_t cmd_id, + const struct tc_ns_client_login *cli_login) +{ + uint32_t diff; + + diff = (uint32_t)TEEC_MEMREF_PARTIAL_INPUT - + (uint32_t)TEEC_MEMREF_TEMP_INPUT; + + if (memset_s(cli_context, sizeof(*cli_context), + 0x00, sizeof(*cli_context)) != 0) { + tloge("memset error, init cli context failed\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + + cli_context->returns.origin = TEEC_ORIGIN_COMMS; + + if (memcpy_s(cli_context->uuid, sizeof(cli_context->uuid), + (uint8_t *)&service_id, sizeof(service_id)) != 0) { + tloge("memcpy error, init cli context failed\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + cli_context->session_id = session_id; + cli_context->cmd_id = cmd_id; + cli_context->returns.code = 0; + cli_context->login.method = cli_login->method; + cli_context->login.mdata = cli_login->mdata; + + return TEEC_SUCCESS; +} + +static uint32_t teek_check_tmp_mem( + const struct teec_tempmemory_reference *tmpref) +{ + if (!tmpref->buffer || (tmpref->size == 0)) { + tloge("tmpref buffer is null, or size is zero\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + + return TEEC_SUCCESS; +} + +static bool is_partical_mem(uint32_t param_type) +{ + if (param_type == TEEC_MEMREF_PARTIAL_INPUT || + param_type == TEEC_MEMREF_PARTIAL_OUTPUT || + param_type == TEEC_MEMREF_PARTIAL_INOUT) + return true; + + return false; +} + +static bool is_offset_invalid( + const struct teec_registeredmemory_reference *memref) +{ + if ((memref->offset + memref->size > memref->parent->size) || + (memref->offset + memref->size < memref->offset) || + (memref->offset + memref->size < memref->size)) + return true; + + return false; +} + +static uint32_t teek_check_ref_mem( + const struct teec_registeredmemory_reference *memref, + uint32_t param_type) +{ + if (!memref->parent || !memref->parent->buffer) { + tloge("parent of memref is null, or the buffer is zero\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + if (param_type == TEEC_MEMREF_PARTIAL_INPUT) { + if ((memref->parent->flags & TEEC_MEM_INPUT) == 0) + return TEEC_ERROR_BAD_PARAMETERS; + } else if (param_type == TEEC_MEMREF_PARTIAL_OUTPUT) { + if ((memref->parent->flags & TEEC_MEM_OUTPUT) == 0) + return TEEC_ERROR_BAD_PARAMETERS; + } else if (param_type == TEEC_MEMREF_PARTIAL_INOUT) { + if ((memref->parent->flags & TEEC_MEM_INPUT) == 0) + return TEEC_ERROR_BAD_PARAMETERS; + if ((memref->parent->flags & TEEC_MEM_OUTPUT) == 0) + return TEEC_ERROR_BAD_PARAMETERS; + } else if (param_type == TEEC_MEMREF_WHOLE) { + /* if type is TEEC_MEMREF_WHOLE, ignore it */ + } else { + return TEEC_ERROR_BAD_PARAMETERS; + } + + if (is_partical_mem(param_type)) { + if (is_offset_invalid(memref)) { + tloge("offset + size exceed the parent size\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + } + return TEEC_SUCCESS; +} + +/* + * This function checks a operation is valid or not. + */ +uint32_t teek_check_operation(const struct teec_operation *operation) +{ + uint32_t param_type[TEE_PARAM_NUM] = {0}; + uint32_t idex; + uint32_t ret = TEEC_SUCCESS; + + /* + * GP Support operation is NULL + * operation: a pointer to a Client Application initialized struct, + * or NULL if there is no payload to send or + * if the Command does not need to support cancellation. + */ + if (!operation) + return TEEC_SUCCESS; + + if (operation->started == 0) { + tloge("sorry, cancellation not support\n"); + return TEEC_ERROR_NOT_IMPLEMENTED; + } + + param_type[0] = + teec_param_type_get(operation->paramtypes, 0); + param_type[1] = + teec_param_type_get(operation->paramtypes, 1); + param_type[2] = + teec_param_type_get(operation->paramtypes, 2); + param_type[3] = + teec_param_type_get(operation->paramtypes, 3); + for (idex = 0; idex < TEE_PARAM_NUM; idex++) { + if (is_tmp_mem(param_type[idex])) { + ret = teek_check_tmp_mem( + &(operation->params[idex].tmpref)); + if (ret != TEEC_SUCCESS) + break; + } else if (is_ref_mem(param_type[idex])) { + ret = teek_check_ref_mem( + &(operation->params[idex].memref), + param_type[idex]); + if (ret != TEEC_SUCCESS) + break; + } else if (is_val_param(param_type[idex])) { + /* do nothing */ + } else if (is_ion_param(param_type[idex])) { + if (operation->params[idex].ionref.ion_share_fd < 0) { + tloge("ion_handle is invalid!\n"); + ret = TEEC_ERROR_BAD_PARAMETERS; + break; + } + } else if (param_type[idex] == TEEC_NONE) { + /* do nothing */ + } else { + tloge("paramType[%u]=%x is not support\n", idex, + param_type[idex]); + ret = TEEC_ERROR_BAD_PARAMETERS; + break; + } + } + + return ret; +} + +/* + * This function check if the special agent is launched.Used For HDCP key. + * e.g. If sfs agent is not alive, you can not do HDCP key write to SRAM. + */ +int teek_is_agent_alive(unsigned int agent_id) +{ + return is_agent_alive(agent_id); +} + +/* + * This function initializes a new TEE Context, + * forming a connection between this Client Application + * and the TEE identified by the string identifier name. + */ +uint32_t teek_initialize_context(const char *name, + struct teec_context *context) +{ + int32_t ret; + /* name current not used */ + (void)(name); + if (!get_tz_init_flag()) return (uint32_t)TEEC_ERROR_BUSY; + tlogd("teek_initialize_context Started:\n"); + /* First, check parameters is valid or not */ + if (!context) { + tloge("context is null, not correct\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + context->dev = NULL; + context->ta_path = NULL; + /* Paramters right, start execution */ + ret = tc_ns_client_open((struct tc_ns_dev_file **)&context->dev, + TEE_REQ_FROM_KERNEL_MODE); + if (ret != TEEC_SUCCESS) { + tloge("open device failed\n"); + return TEEC_ERROR_GENERIC; + } + tlogd("open device success\n"); + return TEEC_SUCCESS; +} +EXPORT_SYMBOL(teek_initialize_context); + +/* + * This function finalizes an initialized TEE Context. + */ +void teek_finalize_context(struct teec_context *context) +{ + if (!get_tz_init_flag()) return; + tlogd("teek_finalize_context started\n"); + if (!context || !context->dev) { + tloge("context or dev is null, not correct\n"); + return; + } + + tlogd("close device\n"); + tc_ns_client_close(context->dev); + context->dev = NULL; +} +EXPORT_SYMBOL(teek_finalize_context); + +static bool is_oper_param_valid(const struct teec_operation *operation) +{ + uint32_t param_type[TEE_PARAM_NUM] = {0}; + + param_type[3] = + teec_param_type_get(operation->paramtypes, 3); + param_type[2] = + teec_param_type_get(operation->paramtypes, 2); + + if (param_type[3] != TEEC_MEMREF_TEMP_INPUT || + param_type[2] != TEEC_MEMREF_TEMP_INPUT) { + tloge("invalid param type 0x%x\n", operation->paramtypes); + return false; + } + + if (!operation->params[3].tmpref.buffer || + !operation->params[2].tmpref.buffer || + operation->params[3].tmpref.size == 0 || + operation->params[2].tmpref.size == 0) { + tloge("invalid operation params(NULL)\n"); + return false; + } + return true; +} + +static uint32_t check_open_sess_params(struct teec_context *context, + const struct teec_operation *operation, const struct teec_uuid *destination, + uint32_t connection_method) +{ + struct tc_ns_dev_file *dev_file = NULL; + uint32_t teec_ret; + + if (!context || !operation || !destination || + connection_method != TEEC_LOGIN_IDENTIFY) { + tloge("invalid input params\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + + if (!is_oper_param_valid(operation)) + return TEEC_ERROR_BAD_PARAMETERS; + + dev_file = (struct tc_ns_dev_file *)(context->dev); + if (!dev_file) { + tloge("invalid context->dev (NULL)\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + + dev_file->pkg_name_len = operation->params[3].tmpref.size; + if (operation->params[3].tmpref.size > MAX_PACKAGE_NAME_LEN - 1) { + return TEEC_ERROR_BAD_PARAMETERS; + } else { + if (memset_s(dev_file->pkg_name, sizeof(dev_file->pkg_name), + 0, MAX_PACKAGE_NAME_LEN) != 0) { + tloge("memset error\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + if (memcpy_s(dev_file->pkg_name, sizeof(dev_file->pkg_name), + operation->params[3].tmpref.buffer, + operation->params[3].tmpref.size) != 0) { + tloge("memcpy error\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + } + + dev_file->pub_key_len = 0; + dev_file->login_setup = 1; + teec_ret = teek_check_operation(operation); + if (teec_ret != TEEC_SUCCESS) { + tloge("operation is invalid\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + + return teec_ret; +} + +static uint32_t open_session_and_switch_ret(struct teec_session *session, + struct teec_context *context, const struct teec_uuid *destination, + struct tc_ns_client_context *cli_context, uint32_t *origin) +{ + int32_t ret; + uint32_t teec_ret; + + ret = tc_ns_open_session(context->dev, cli_context); + if (ret == 0) { + tlogd("open session success\n"); + session->session_id = cli_context->session_id; + session->service_id = *destination; + session->ops_cnt = 0; + session->context = context; + return TEEC_SUCCESS; + } else if (ret < 0) { + tloge("open session failed, ioctl errno = %d\n", ret); + if (ret == -EFAULT) + teec_ret = TEEC_ERROR_ACCESS_DENIED; + else if (ret == -ENOMEM) + teec_ret = TEEC_ERROR_OUT_OF_MEMORY; + else if (ret == -EINVAL) + teec_ret = TEEC_ERROR_BAD_PARAMETERS; + else if (ret == -ERESTARTSYS) + teec_ret = TEEC_CLIENT_INTR; + else + teec_ret = TEEC_ERROR_GENERIC; + *origin = TEEC_ORIGIN_COMMS; + return teec_ret; + } else { + tloge("open session failed, code=0x%x, origin=%u\n", + cli_context->returns.code, + cli_context->returns.origin); + teec_ret = (uint32_t)cli_context->returns.code; + *origin = cli_context->returns.origin; + } + return teec_ret; +} + +static uint32_t proc_teek_open_session(struct teec_context *context, + struct teec_session *session, const struct teec_uuid *destination, + uint32_t connection_method, const void *connection_data, + const struct teec_operation *operation, uint32_t *return_origin) +{ + uint32_t teec_ret; + uint32_t origin = TEEC_ORIGIN_API; + struct tc_ns_client_context cli_context; + struct tc_ns_client_login cli_login = {0}; + bool load_app_flag = false; + char *file_buffer = NULL; + + /* connectionData current not used */ + (void)(connection_data); + if (return_origin) + *return_origin = origin; + + /* First, check parameters is valid or not */ + if (!session) { + tloge("invalid session\n"); + teec_ret = TEEC_ERROR_BAD_PARAMETERS; + goto set_ori; + } + + /* + * ca may call closesession even if opensession failed, + * we set session->context here to avoid receive a illegal ptr, + * same as libteec_vendor + */ + session->context = context; + + cli_login.method = TEEC_LOGIN_IDENTIFY; + teec_ret = check_open_sess_params(context, operation, destination, connection_method); + if (teec_ret != TEEC_SUCCESS) + goto set_ori; + + teec_ret = teek_init_context(&cli_context, *destination, 0, + GLOBAL_CMD_ID_OPEN_SESSION, &cli_login); + if (teec_ret != TEEC_SUCCESS) + goto set_ori; + + /* support when operation is null */ + if (operation) { + cli_context.started = operation->cancel_flag; + teec_ret = proc_teek_encode(&cli_context, operation); + if (teec_ret != TEEC_SUCCESS) + goto set_ori; + } + + teec_ret = (uint32_t)teek_get_app(context->ta_path, &file_buffer, + &cli_context.file_size); + if (teec_ret != TEEC_SUCCESS) + goto set_ori; + cli_context.memref.file_addr = (uint32_t)(uintptr_t)file_buffer; + cli_context.memref.file_h_addr = (uint32_t)(((uint64_t)(uintptr_t)file_buffer) >> ADDR_TRANS_NUM); + load_app_flag = true; + + livepatch_down_read_sem(); + teec_ret = open_session_and_switch_ret(session, context, + destination, &cli_context, &origin); + livepatch_up_read_sem(); + +set_ori: + if (teec_ret != TEEC_SUCCESS && return_origin != NULL) + *return_origin = origin; + + teek_free_app(load_app_flag, &file_buffer); + return teec_ret; +} + +#define RETRY_TIMES 5 +uint32_t teek_open_session(struct teec_context *context, + struct teec_session *session, const struct teec_uuid *destination, + uint32_t connection_method, const void *connection_data, + const struct teec_operation *operation, uint32_t *return_origin) +{ + int i; + uint32_t ret; + if (!get_tz_init_flag()) return (uint32_t)TEEC_ERROR_BUSY; + for (i = 0; i < RETRY_TIMES; i++) { + ret = proc_teek_open_session(context, session, + destination, connection_method, connection_data, + operation, return_origin); + if (ret != (uint32_t)TEEC_CLIENT_INTR) + return ret; + } + return ret; +} +EXPORT_SYMBOL(teek_open_session); + +/* + * This function closes an opened Session. + */ + +static bool is_close_sess_param_valid(const struct teec_session *session) +{ + tlogd("teek_close_session started\n"); + + if (!session || !session->context || !session->context->dev) { + tloge("input invalid param\n"); + return false; + } + + return true; +} + +void teek_close_session(struct teec_session *session) +{ + int32_t ret; + struct tc_ns_client_context cli_context; + struct tc_ns_client_login cli_login = {0}; + + if (!get_tz_init_flag()) return; + if (!is_close_sess_param_valid(session)) + return; + + if (teek_init_context(&cli_context, session->service_id, + session->session_id, GLOBAL_CMD_ID_CLOSE_SESSION, + &cli_login) != TEEC_SUCCESS) { + tloge("init cli context failed just return\n"); + return; + } + livepatch_down_read_sem(); + ret = tc_ns_close_session(session->context->dev, &cli_context); + livepatch_up_read_sem(); + if (ret == 0) { + session->session_id = 0; + if (memset_s((uint8_t *)(&session->service_id), + sizeof(session->service_id), 0x00, UUID_LEN) != 0) + tloge("memset error\n"); + session->ops_cnt = 0; + session->context = NULL; + } else { + tloge("close session failed\n"); + } +} +EXPORT_SYMBOL(teek_close_session); + +static uint32_t proc_invoke_cmd(struct teec_session *session, + struct tc_ns_client_context *cli_context, uint32_t *origin) +{ + int32_t ret; + uint32_t teec_ret; + + livepatch_down_read_sem(); + ret = tc_ns_send_cmd(session->context->dev, cli_context); + livepatch_up_read_sem(); + + if (ret == 0) { + tlogd("invoke cmd success\n"); + teec_ret = TEEC_SUCCESS; + } else if (ret < 0) { + tloge("invoke cmd failed, ioctl errno = %d\n", ret); + if (ret == -EFAULT) + teec_ret = TEEC_ERROR_ACCESS_DENIED; + else if (ret == -ENOMEM) + teec_ret = TEEC_ERROR_OUT_OF_MEMORY; + else if (ret == -EINVAL) + teec_ret = TEEC_ERROR_BAD_PARAMETERS; + else + teec_ret = TEEC_ERROR_GENERIC; + *origin = TEEC_ORIGIN_COMMS; + } else { + tloge("invoke cmd failed, code=0x%x, origin=%d\n", + cli_context->returns.code, + cli_context->returns.origin); + teec_ret = (uint32_t)cli_context->returns.code; + *origin = cli_context->returns.origin; + } + return teec_ret; +} + +/* This function invokes a Command within the specified Session. */ +uint32_t teek_invoke_command(struct teec_session *session, uint32_t cmd_id, + struct teec_operation *operation, uint32_t *return_origin) +{ + uint32_t teec_ret = TEEC_ERROR_BAD_PARAMETERS; + uint32_t origin = TEEC_ORIGIN_API; + struct tc_ns_client_context cli_context; + struct tc_ns_client_login cli_login = { 0, 0 }; + + if (!get_tz_init_flag()) return (uint32_t)TEEC_ERROR_BUSY; + /* First, check parameters is valid or not */ + if (!session || !session->context) { + tloge("input invalid session or session->context is null\n"); + goto set_ori; + } + + teec_ret = teek_check_operation(operation); + if (teec_ret != 0) { + tloge("operation is invalid\n"); + goto set_ori; + } + + /* Paramters all right, start execution */ + teec_ret = teek_init_context(&cli_context, session->service_id, + session->session_id, cmd_id, &cli_login); + if (teec_ret != 0) { + tloge("init cli context failed\n"); + goto set_ori; + } + + /* support when operation is null */ + if (operation) { + cli_context.started = operation->cancel_flag; + teec_ret = proc_teek_encode(&cli_context, operation); + if (teec_ret != 0) { + goto set_ori; + } + } + + teec_ret = proc_invoke_cmd(session, &cli_context, &origin); + +set_ori: + if ((teec_ret != 0) && return_origin) + *return_origin = origin; + return teec_ret; +} +EXPORT_SYMBOL(teek_invoke_command); + +uint32_t teek_send_secfile(struct teec_session *session, + const char *file_buffer, unsigned int file_size) +{ + uint32_t ret; + + if (!get_tz_init_flag()) return (uint32_t)TEEC_ERROR_BUSY; + if (!file_buffer || (file_size == 0) || !session || + !session->context || !session->context->dev) { + tloge("params error!\n"); + return TEEC_ERROR_BAD_PARAMETERS; + } + livepatch_down_read_sem(); + ret = (uint32_t)tc_ns_load_image_with_lock(session->context->dev, + file_buffer, file_size, LOAD_TA); + livepatch_up_read_sem(); + return ret; +} +EXPORT_SYMBOL(teek_send_secfile); + +TEEC_Result TEEK_SendSecfile(TEEC_Session *session, + const char *file_buffer, unsigned int file_size) +{ + return (TEEC_Result)teek_send_secfile((struct teec_session *)session, + file_buffer, file_size); +} +EXPORT_SYMBOL(TEEK_SendSecfile); + +/* + * This function registers a block of existing Client Application memory + * as a block of Shared Memory within the scope of the specified TEE Context. + */ +uint32_t teek_register_shared_memory(struct teec_context *context, + struct teec_sharedmemory *sharedmem) +{ + (void)context; + (void)sharedmem; + tloge("teek_register_shared_memory not supported\n"); + return TEEC_ERROR_NOT_SUPPORTED; +} + +/* begin: for KERNEL-HAL out interface */ +int TEEK_IsAgentAlive(unsigned int agent_id) +{ + return teek_is_agent_alive(agent_id); +} +EXPORT_SYMBOL(TEEK_IsAgentAlive); + +TEEC_Result TEEK_InitializeContext(const char *name, TEEC_Context *context) +{ + return (TEEC_Result)teek_initialize_context(name, + (struct teec_context *)context); +} +EXPORT_SYMBOL(TEEK_InitializeContext); + +void TEEK_FinalizeContext(TEEC_Context *context) +{ + teek_finalize_context((struct teec_context *)context); +} +EXPORT_SYMBOL(TEEK_FinalizeContext); + +/* + * Function: TEEK_OpenSession + * Description: This function opens a new Session + * Parameters: context: a pointer to an initialized TEE Context. + * session: a pointer to a Session structure to open. + * destination: a pointer to a UUID structure. + * connectionMethod: the method of connection to use. + * connectionData: any necessary data required to support the connection method chosen. + * operation: a pointer to an Operation containing a set of Parameters. + * returnOrigin: a pointer to a variable which will contain the return origin. + * Return: TEEC_SUCCESS: success other: failure + */ + +TEEC_Result TEEK_OpenSession(TEEC_Context *context, TEEC_Session *session, + const TEEC_UUID *destination, uint32_t connectionMethod, + const void *connectionData, TEEC_Operation *operation, + uint32_t *returnOrigin) +{ + return (TEEC_Result)teek_open_session( + (struct teec_context *)context, (struct teec_session *)session, + (const struct teec_uuid *)destination, connectionMethod, connectionData, + (struct teec_operation *)operation, returnOrigin); +} +EXPORT_SYMBOL(TEEK_OpenSession); + +void TEEK_CloseSession(TEEC_Session *session) +{ + teek_close_session((struct teec_session *)session); +} +EXPORT_SYMBOL(TEEK_CloseSession); + +TEEC_Result TEEK_InvokeCommand(TEEC_Session *session, uint32_t commandID, + TEEC_Operation *operation, uint32_t *returnOrigin) +{ + return (TEEC_Result)teek_invoke_command( + (struct teec_session *)session, commandID, + (struct teec_operation *)operation, returnOrigin); +} +EXPORT_SYMBOL(TEEK_InvokeCommand); + +/* end: for KERNEL-HAL out interface */ diff --git a/tzdriver/core/teek_client_ext.c b/tzdriver/core/teek_client_ext.c new file mode 100644 index 0000000000000000000000000000000000000000..b49cca8ffc6a9d92d718d76eb4db1a3d9d8ee44f --- /dev/null +++ b/tzdriver/core/teek_client_ext.c @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: exported funcs for teek client ext. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "teek_client_ext.h" +#include "smc_smp.h" +#include "mailbox_mempool.h" +#include "teek_client_constants.h" +#include "tz_update_crl.h" +#include "internal_functions.h" +#include "tc_client_driver.h" + +#ifdef CONFIG_CMS_SIGNATURE +/* update crl */ +uint32_t teek_update_crl(uint8_t *crl, uint32_t crl_len) +{ + if (!get_tz_init_flag()) return EFAULT; + if (crl == NULL || crl_len == 0 || crl_len > DEVICE_CRL_MAX) { + tloge("bad params\n"); + return -EINVAL; + } + + livepatch_down_read_sem(); + int ret = send_crl_to_tee(crl, crl_len, NULL); + livepatch_up_read_sem(); + if (ret != 0) + tloge("update crl failed, ret %d\n", ret); + + return ret; +} +EXPORT_SYMBOL(teek_update_crl); +#endif \ No newline at end of file diff --git a/tzdriver/core/tz_pm.c b/tzdriver/core/tz_pm.c new file mode 100644 index 0000000000000000000000000000000000000000..0fd0a2a606b269f109ae3ae3984c994fadfb7243 --- /dev/null +++ b/tzdriver/core/tz_pm.c @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function for proc open,close session and invoke. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "tz_pm.h" +#include +#include +#include +#include +#include +#include "tc_ns_client.h" +#include "teek_ns_client.h" +#include "tc_ns_log.h" +#include "smc_call.h" + +#define S4_ADDR_4G 0xffffffff +#define RESERVED_SECOS_PHYMEM_BASE 0x22800000 +#define RESERVED_SECOS_PHYMEM_SIZE (0x3000000) +#define RESERVED_SECOS_S4_BASE 0x27760000 +#define RESERVED_SECOS_S4_SIZE (0x100000) + +static char *g_s4_kernel_mem_addr; +static char *g_s4_buffer_vaddr; +static uint64_t g_s4_buffer_paddr; +static uint32_t g_s4_buffer_size; + +static void *tc_vmap(phys_addr_t paddr, size_t size) +{ + uint32_t i; + void *vaddr = NULL; + pgprot_t pgprot = PAGE_KERNEL; + uintptr_t offset; + uint32_t pages_count; + struct page **pages = NULL; + + offset = paddr & ~PAGE_MASK; + paddr &= PAGE_MASK; + pages_count = (uint32_t)(PAGE_ALIGN(size + offset) / PAGE_SIZE); + + pages = kzalloc(sizeof(struct page *) * pages_count, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)pages)) + return NULL; + + for (i = 0; i < pages_count; i++) + *(pages + i) = phys_to_page((uintptr_t)(paddr + PAGE_SIZE * i)); + + vaddr = vmap(pages, pages_count, VM_MAP, pgprot); + kfree(pages); + if (vaddr == NULL) + return NULL; + + return offset + (char *)vaddr; +} + +static int tc_s4_alloc_crypto_buffer(struct device *dev, + char **kernel_mem_addr) +{ + (void)dev; + if (RESERVED_SECOS_S4_BASE > S4_ADDR_4G) { + tloge("addr is invalid\n"); + return -EFAULT; + } + + g_s4_buffer_vaddr = tc_vmap(RESERVED_SECOS_S4_BASE, RESERVED_SECOS_S4_SIZE); + if (g_s4_buffer_vaddr == NULL) { + tloge("vmap failed for s4\n"); + return -EFAULT; + } + g_s4_buffer_paddr = RESERVED_SECOS_S4_BASE; + g_s4_buffer_size = RESERVED_SECOS_S4_SIZE; + + *kernel_mem_addr = vmalloc(RESERVED_SECOS_PHYMEM_SIZE); + if (*kernel_mem_addr == NULL) { + vunmap(g_s4_buffer_vaddr); + g_s4_buffer_paddr = 0; + g_s4_buffer_vaddr = NULL; + g_s4_buffer_size = 0; + tloge("vmalloc failed for s4\n"); + return -ENOMEM; + } + + return 0; +} + +static void free_resource(const char *kernel_mem_addr) +{ + vunmap(g_s4_buffer_vaddr); + vfree(kernel_mem_addr); + g_s4_kernel_mem_addr = NULL; + g_s4_buffer_paddr = 0; + g_s4_buffer_vaddr = NULL; + g_s4_buffer_size = 0; +} + +static uint64_t tc_s4_suspend_or_resume(uint32_t power_op) +{ + u64 smc_id = (u64)power_op; + u64 smc_ret = 0xffff; + struct smc_in_params in_param = { smc_id }; + struct smc_out_params out_param = { smc_ret }; + smc_req(&in_param, &out_param, 0); + smc_ret = out_param.ret; + return smc_ret; +} + +static uint64_t tc_s4_crypto_and_copy(uint32_t crypt_op, + uint64_t middle_mem_addr, + uintptr_t secos_mem, + uint32_t size, uint32_t index) +{ + u64 smc_id = (u64)crypt_op; + u64 arg0 = (u64)middle_mem_addr; + u64 arg1 = (u64)secos_mem; + u64 arg2 = (u64)size; + u64 arg3 = (u64)index; + u64 smc_ret = 0xffff; + struct smc_in_params in_param = { smc_id, arg0, arg1, arg2, arg3 }; + struct smc_out_params out_param = { smc_ret }; + + smc_req(&in_param, &out_param, 0); + smc_ret = out_param.ret; + return smc_ret; +} + +static int tc_s4_transfer_data(char *kernel_mem_addr, uint32_t crypt_op) +{ + uint32_t index = 0; + uint32_t copied_size = 0; + + while (copied_size < RESERVED_SECOS_PHYMEM_SIZE) { + if (crypt_op == TSP_S4_DECRYPT_AND_COPY) { + if (memcpy_s(g_s4_buffer_vaddr, g_s4_buffer_size, + kernel_mem_addr + copied_size, + g_s4_buffer_size) != EOK) { + tloge("mem copy for decrypt failed\n"); + return -EFAULT; + } + } + + if (tc_s4_crypto_and_copy(crypt_op, g_s4_buffer_paddr, + RESERVED_SECOS_PHYMEM_BASE + copied_size, + g_s4_buffer_size, index) != 0) { + tloge("crypto and copy failed\n"); + return -EFAULT; + } + + if (crypt_op == TSP_S4_ENCRYPT_AND_COPY) { + if (memcpy_s(kernel_mem_addr + copied_size, + g_s4_buffer_size, g_s4_buffer_vaddr, + g_s4_buffer_size) != EOK) { + tloge("mem copy for encrypt failed\n"); + return -EFAULT; + } + } + + copied_size += g_s4_buffer_size; + index++; + } + + return 0; +} + +static int tc_s4_pm_ops(struct device *dev, uint32_t power_op, + uint32_t crypt_op, char *kernel_mem_addr) +{ + int ret; + (void)dev; + + if (power_op == TSP_S4_SUSPEND) + g_s4_kernel_mem_addr = kernel_mem_addr; + else + kernel_mem_addr = g_s4_kernel_mem_addr; + + /* notify TEEOS to suspend all pm driver */ + if (power_op == TSP_S4_SUSPEND) { + ret = (int)tc_s4_suspend_or_resume(power_op); + if (ret != 0) { + tloge("tc s4 suspend failed\n"); + return ret; + } + } + + ret = tc_s4_transfer_data(kernel_mem_addr, crypt_op); + if (ret != 0) { + tloge("transfer data failed, power_op=0x%x\n", power_op); + return ret; + } + + /* notify TEEOS to resume all pm driver */ + if (power_op == TSP_S4_RESUME) { + ret = (int)tc_s4_suspend_or_resume(power_op); + if (ret != 0) { + tloge("tc s4 resume failed\n"); + return ret; + } + } + + return 0; +} + +int tc_s4_pm_suspend(struct device *dev) +{ + int ret; + char *kernel_mem_addr = NULL; + + ret = tc_s4_alloc_crypto_buffer(dev, &kernel_mem_addr); + if (ret != 0) { + tloge("alloc buffer failed\n"); + return ret; + } + + ret = tc_s4_pm_ops(dev, TSP_S4_SUSPEND, TSP_S4_ENCRYPT_AND_COPY, kernel_mem_addr); + if (ret != 0) { + free_resource(kernel_mem_addr); + tloge("s4 suspend failed\n"); + } + + return ret; +} + +int tc_s4_pm_resume(struct device *dev) +{ + int ret; + + ret = tc_s4_pm_ops(dev, TSP_S4_RESUME, TSP_S4_DECRYPT_AND_COPY, g_s4_kernel_mem_addr); + if (ret != 0) + tloge("s4 resume failed\n"); + + free_resource(g_s4_kernel_mem_addr); + return ret; +} diff --git a/tzdriver/core/tz_pm.h b/tzdriver/core/tz_pm.h new file mode 100644 index 0000000000000000000000000000000000000000..79a3bb7f22a134953a6792e442c5e8cc3e42da77 --- /dev/null +++ b/tzdriver/core/tz_pm.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: suspend or freeze func declaration for tzdriver. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef TZ_PM_H +#define TZ_PM_H +#include + +#define TSP_S4_SUSPEND 0xB200000C +#define TSP_S4_RESUME 0xB200000D +#define TSP_S4_ENCRYPT_AND_COPY 0xB2000010 +#define TSP_S4_DECRYPT_AND_COPY 0xB2000011 + +int tc_s4_pm_suspend(struct device *dev); + +int tc_s4_pm_resume(struct device *dev); + +#endif diff --git a/tzdriver/core/tz_spi_notify.c b/tzdriver/core/tz_spi_notify.c new file mode 100644 index 0000000000000000000000000000000000000000..1f5f33fbcd21726c132b98886077be760c3ebb58 --- /dev/null +++ b/tzdriver/core/tz_spi_notify.c @@ -0,0 +1,731 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: exported funcs for spi interrupt actions. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "tz_spi_notify.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "teek_client_constants.h" +#include "tc_ns_client.h" +#include "tc_ns_log.h" +#include "tc_client_driver.h" +#include "gp_ops.h" +#include "mailbox_mempool.h" +#include "smc_smp.h" +#include "session_manager.h" +#include "internal_functions.h" +#include "shared_mem.h" + +#define DEFAULT_SPI_NUM 111 + +#define MAX_CALLBACK_COUNT 100 +#define UUID_SIZE 16 +struct teec_timer_property; + +enum timer_class_type { + /* timer event using timer10 */ + TIMER_GENERIC, + /* timer event using RTC */ + TIMER_RTC +}; + +struct teec_timer_property { + unsigned int type; + unsigned int timer_id; + unsigned int timer_class; + unsigned int reserved2; +}; + +struct notify_context_timer { + unsigned int dev_file_id; + unsigned char uuid[UUID_SIZE]; + unsigned int session_id; + struct teec_timer_property property; + uint32_t expire_time; +}; + + +struct notify_context_wakeup { + pid_t ca_thread_id; +}; + +struct notify_context_shadow { + uint64_t target_tcb; +}; + +#ifdef CONFIG_TA_AFFINITY + +#define AFF_BITS_SIZE 64 + +#define AFF_BITS_NUM ((CONFIG_TA_AFFINITY_CPU_NUMS % AFF_BITS_SIZE == 0) ? \ + (CONFIG_TA_AFFINITY_CPU_NUMS / AFF_BITS_SIZE) : \ + (CONFIG_TA_AFFINITY_CPU_NUMS / AFF_BITS_SIZE + 1)) + +#define aff_bits_mask(cpuid) \ + (1LLU << (cpuid - (cpuid / AFF_BITS_SIZE) * AFF_BITS_SIZE)) + +struct aff_bits_t { + uint64_t aff_bits[AFF_BITS_NUM]; +}; + +struct notify_context_set_affinity { + pid_t ca_thread_id; + struct aff_bits_t aff; +}; + +#endif + +struct notify_context_stats { + uint32_t send_s; + uint32_t recv_s; + uint32_t send_w; + uint32_t recv_w; +#ifdef CONFIG_TA_AFFINITY + uint32_t send_af; + uint32_t recv_af; +#endif + uint32_t missed; +}; + +union notify_context { + struct notify_context_timer timer; + struct notify_context_wakeup wakeup; + struct notify_context_shadow shadow; +#ifdef CONFIG_TA_AFFINITY + struct notify_context_set_affinity affinity; +#endif + struct notify_context_stats meta; +}; +#ifndef CONFIG_BIG_ENDIAN +struct notify_data_entry { + uint32_t entry_type : 31; + uint32_t filled : 1; + union notify_context context; +}; +#else +struct notify_data_entry { + uint32_t resv; + uint32_t filled : 1; + uint32_t entry_type : 31; + union notify_context context; +}; +#endif + +#ifdef CONFIG_BIG_SESSION + +#define NOTIFY_DATA_ENTRY_COUNT \ + (((PAGE_SIZE * ((1U) << (CONFIG_NOTIFY_PAGE_ORDER))) \ + / sizeof(struct notify_data_entry)) - 1) +#else +#define NOTIFY_DATA_ENTRY_COUNT \ + ((PAGE_SIZE / sizeof(struct notify_data_entry)) - 1) +#endif + +struct notify_data_struct { + struct notify_data_entry entry[NOTIFY_DATA_ENTRY_COUNT]; + struct notify_data_entry meta; +}; + +static struct notify_data_struct *g_notify_data; +static struct notify_data_entry *g_notify_data_entry_shadow; +static spinlock_t g_notify_lock; + +enum notify_data_type { + NOTIFY_DATA_ENTRY_UNUSED, + NOTIFY_DATA_ENTRY_TIMER, + NOTIFY_DATA_ENTRY_RTC, + NOTIFY_DATA_ENTRY_WAKEUP, + NOTIFY_DATA_ENTRY_SHADOW, + NOTIFY_DATA_ENTRY_FIQSHD, + NOTIFY_DATA_ENTRY_SHADOW_EXIT, +#ifdef CONFIG_TA_AFFINITY + NOTIFY_DATA_ENTRY_SET_AFFINITY, +#endif + NOTIFY_DATA_ENTRY_MAX, +}; + +struct tc_ns_callback { + unsigned char uuid[UUID_SIZE]; + struct mutex callback_lock; + void (*callback_func)(void *); + struct list_head head; +}; + +struct tc_ns_callback_list { + unsigned int callback_count; + struct mutex callback_list_lock; + struct list_head callback_list; +}; + +static void tc_notify_fn(struct work_struct *dummy); +static struct tc_ns_callback_list g_ta_callback_func_list; +static DECLARE_WORK(tc_notify_work, tc_notify_fn); +static struct workqueue_struct *g_tz_spi_wq; + +static void walk_callback_list( + struct notify_context_timer *tc_notify_data_timer) +{ + struct tc_ns_callback *callback_func_t = NULL; + + mutex_lock(&g_ta_callback_func_list.callback_list_lock); + list_for_each_entry(callback_func_t, + &g_ta_callback_func_list.callback_list, head) { + if (memcmp(callback_func_t->uuid, tc_notify_data_timer->uuid, + UUID_SIZE) != 0) + continue; + + if (tc_notify_data_timer->property.timer_class == + TIMER_RTC) { + tlogd("start to call callback func\n"); + callback_func_t->callback_func( + &(tc_notify_data_timer->property)); + tlogd("end to call callback func\n"); + } else if (tc_notify_data_timer->property.timer_class == + TIMER_GENERIC) { + tlogd("timer60 no callback func\n"); + } + } + mutex_unlock(&g_ta_callback_func_list.callback_list_lock); +} + +static int find_notify_sess( + const struct notify_context_timer *tc_notify_data_timer, + struct tc_ns_session **temp_ses, bool *enc_found) +{ + struct tc_ns_dev_file *temp_dev_file = NULL; + struct tc_ns_dev_list *dev_list = NULL; + struct tc_ns_service *temp_svc = NULL; + + dev_list = get_dev_list(); + if (!dev_list) { + tloge("dev list is invalid\n"); + return -ENOENT; + } + + mutex_lock(&dev_list->dev_lock); + list_for_each_entry(temp_dev_file, &dev_list->dev_file_list, head) { + tlogd("dev file id1 = %u, id2 = %u\n", + temp_dev_file->dev_file_id, + tc_notify_data_timer->dev_file_id); + if (temp_dev_file->dev_file_id == + tc_notify_data_timer->dev_file_id) { + mutex_lock(&temp_dev_file->service_lock); + temp_svc = + tc_find_service_in_dev(temp_dev_file, + tc_notify_data_timer->uuid, UUID_LEN); + get_service_struct(temp_svc); + mutex_unlock(&temp_dev_file->service_lock); + if (!temp_svc) + break; + mutex_lock(&temp_svc->session_lock); + *temp_ses = + tc_find_session_withowner( + &temp_svc->session_list, + tc_notify_data_timer->session_id, + temp_dev_file); + get_session_struct(*temp_ses); + mutex_unlock(&temp_svc->session_lock); + put_service_struct(temp_svc); + temp_svc = NULL; + if (*temp_ses) { + tlogd("send cmd ses id %u\n", + (*temp_ses)->session_id); + *enc_found = true; + break; + } + break; + } + } + mutex_unlock(&dev_list->dev_lock); + + return 0; +} + +static void tc_notify_timer_fn(struct notify_data_entry *notify_data_entry) +{ + struct tc_ns_session *temp_ses = NULL; + bool enc_found = false; + struct notify_context_timer *tc_notify_data_timer = NULL; + + tc_notify_data_timer = &(notify_data_entry->context.timer); + notify_data_entry->filled = 0; + tlogd("notify data timer type is 0x%x, timer ID is 0x%x\n", + tc_notify_data_timer->property.type, + tc_notify_data_timer->property.timer_id); + walk_callback_list(tc_notify_data_timer); + + if (find_notify_sess(tc_notify_data_timer, &temp_ses, &enc_found) != 0) + return; + + if (tc_notify_data_timer->property.timer_class == TIMER_GENERIC) { + tlogd("timer60 wake up event\n"); + if (enc_found && temp_ses) { + temp_ses->wait_data.send_wait_flag = 1; + wake_up(&temp_ses->wait_data.send_cmd_wq); + put_session_struct(temp_ses); + temp_ses = NULL; + } + } else { + tlogd("RTC do not need to wakeup\n"); + } +} + +static noinline int get_notify_data_entry(struct notify_data_entry *copy) +{ + uint32_t i; + int filled; + int ret = -1; + + if (!copy || !g_notify_data) { + tloge("bad parameters or notify data is NULL"); + return ret; + } + + spin_lock(&g_notify_lock); + /* TIMER and RTC use fix entry, skip them. */ + for (i = NOTIFY_DATA_ENTRY_UNUSED; i < NOTIFY_DATA_ENTRY_COUNT; i++) { + struct notify_data_entry *e = &g_notify_data->entry[i]; + filled = e->filled; + smp_mb(); + if (filled == 0) + continue; + switch (e->entry_type) { + case NOTIFY_DATA_ENTRY_TIMER: + case NOTIFY_DATA_ENTRY_RTC: + break; + case NOTIFY_DATA_ENTRY_SHADOW: + case NOTIFY_DATA_ENTRY_SHADOW_EXIT: + case NOTIFY_DATA_ENTRY_FIQSHD: + g_notify_data->meta.context.meta.recv_s++; + break; + case NOTIFY_DATA_ENTRY_WAKEUP: + g_notify_data->meta.context.meta.recv_w++; + break; +#ifdef CONFIG_TA_AFFINITY + case NOTIFY_DATA_ENTRY_SET_AFFINITY: + g_notify_data->meta.context.meta.recv_af++; + break; +#endif + default: + tloge("invalid notify type=%u\n", e->entry_type); + goto exit; + } + if (memcpy_s(copy, sizeof(*copy), e, sizeof(*e)) != EOK) { + tloge("memcpy entry failed\n"); + break; + } + smp_mb(); + e->filled = 0; + ret = 0; + break; + } +exit: + spin_unlock(&g_notify_lock); + return ret; +} + +static void tc_notify_wakeup_fn(const struct notify_data_entry *entry) +{ + const struct notify_context_wakeup *tc_notify_wakeup = NULL; + + tc_notify_wakeup = &(entry->context.wakeup); + smc_wakeup_ca(tc_notify_wakeup->ca_thread_id); + tlogd("notify data entry wakeup ca: %d\n", + tc_notify_wakeup->ca_thread_id); +} + +static void tc_notify_shadow_fn(const struct notify_data_entry *entry) +{ + const struct notify_context_shadow *tc_notify_shadow = NULL; + + tc_notify_shadow = &(entry->context.shadow); + smc_queue_shadow_worker(tc_notify_shadow->target_tcb); +} + +static void tc_notify_fiqshd_fn(const struct notify_data_entry *entry) +{ + const struct notify_context_shadow *tc_notify_shadow = NULL; + + if (!entry) { + /* for NOTIFY_DATA_ENTRY_FIQSHD missed */ + fiq_shadow_work_func(0); + return; + } + tc_notify_shadow = &(entry->context.shadow); + fiq_shadow_work_func(tc_notify_shadow->target_tcb); +} + +static void tc_notify_shadowexit_fn(const struct notify_data_entry *entry) +{ + const struct notify_context_wakeup *tc_notify_wakeup = NULL; + + tc_notify_wakeup = &(entry->context.wakeup); + if (smc_shadow_exit(tc_notify_wakeup->ca_thread_id) != 0) + tloge("shadow ca exit failed: %d\n", + (int)tc_notify_wakeup->ca_thread_id); +} + +#ifdef CONFIG_TA_AFFINITY +static void tc_notify_set_affinity(struct notify_data_entry *entry) +{ + struct notify_context_set_affinity *af_data = NULL; + struct pending_entry *pe = NULL; + + af_data = &(entry->context.affinity); + pe = find_pending_entry(af_data->ca_thread_id); + if (pe != NULL) { + struct cpumask mask; + uint32_t i; + + cpumask_clear(&mask); + for_each_online_cpu(i) { + struct aff_bits_t *aff = &af_data->aff; + if (aff->aff_bits[i / AFF_BITS_SIZE] & aff_bits_mask(i)) + cpumask_set_cpu(i, &mask); + } + + /* + * we don't set ca's cpumask here but in ca's own thread + * context after ca is wakeup in smc_send_func, or + * scheduler will set task's allow cpumask failure in that case. + */ + cpumask_copy(&pe->ta_mask, &mask); + smc_wakeup_ca(af_data->ca_thread_id); + tlogd("set affinity for ca thread id %u\n", af_data->ca_thread_id); + put_pending_entry(pe); + } else { + tloge("invalid ca thread id %u for set affinity\n", + af_data->ca_thread_id); + /* + * if a TEE tcb without CA bind(CA is 0) cause a affinity set, + * the CA tid(current cpu context) may wrong + * (in tc_notify_fiqshd_fn, don't init_pending_entry, + * in this case, cannot find pending_entry), + * but we must set affinity for CA otherwise the TA can't run, + * so we wakeup all blocked CA. + */ + (void)smc_wakeup_broadcast(); + } +} +#endif + +#define MISSED_COUNT 4 +static void spi_broadcast_notifications(void) +{ + uint32_t missed; + + smp_mb(); + + if (!g_notify_data) { + tloge("notify data is NULL\n"); + return; + } + + missed = (uint32_t)__xchg(0, &g_notify_data->meta.context.meta.missed, MISSED_COUNT); + if (missed == 0) + return; + if ((missed & (1U << NOTIFY_DATA_ENTRY_WAKEUP)) != 0) { + smc_wakeup_broadcast(); + missed &= ~(1U << NOTIFY_DATA_ENTRY_WAKEUP); + } + if ((missed & (1U << NOTIFY_DATA_ENTRY_FIQSHD)) != 0) { + tc_notify_fiqshd_fn(NULL); + missed &= ~(1U << NOTIFY_DATA_ENTRY_FIQSHD); + } + if (missed != 0) + tloge("missed spi notification mask %x\n", missed); +} + +static void tc_notify_fn(struct work_struct *dummy) +{ + struct notify_data_entry copy = {0}; + (void)dummy; + + while (get_notify_data_entry(©) == 0) { + switch (copy.entry_type) { + case NOTIFY_DATA_ENTRY_TIMER: + case NOTIFY_DATA_ENTRY_RTC: + tc_notify_timer_fn(©); + break; + case NOTIFY_DATA_ENTRY_WAKEUP: + tc_notify_wakeup_fn(©); + break; + case NOTIFY_DATA_ENTRY_SHADOW: + tc_notify_shadow_fn(©); + break; + case NOTIFY_DATA_ENTRY_FIQSHD: + tc_notify_fiqshd_fn(©); + break; + case NOTIFY_DATA_ENTRY_SHADOW_EXIT: + tc_notify_shadowexit_fn(©); + break; +#ifdef CONFIG_TA_AFFINITY + case NOTIFY_DATA_ENTRY_SET_AFFINITY: + tc_notify_set_affinity(©); + break; +#endif + default: + tloge("invalid entry type = %u\n", copy.entry_type); + } + if (memset_s(©, sizeof(copy), 0, sizeof(copy)) != 0) + tloge("memset copy failed\n"); + } + spi_broadcast_notifications(); +} + +static irqreturn_t tc_secure_notify(int irq, void *dev_id) +{ +#define N_WORK 8 + int i; + int queued = 0; + static struct work_struct tc_notify_works[N_WORK]; + static int init; + (void)dev_id; + + if (init == 0) { + for (i = 0; i < N_WORK; i++) + INIT_WORK(&tc_notify_works[i], tc_notify_fn); + init = 1; + } + for (i = 0; i < N_WORK; i++) { + if (queue_work(g_tz_spi_wq, &tc_notify_works[i])) { + queued = 1; + break; + } + } + if (queued == 1) + tee_trace_add_event(INTERRUPT_HANDLE_SPI_REE_RESPONSE, (uint64_t)irq); + else + tee_trace_add_event(INTERRUPT_HANDLE_SPI_REE_MISS, (uint64_t)irq); +#undef N_WORK + + return IRQ_HANDLED; +} + +int tc_ns_register_service_call_back_func(const char *uuid, void *func, + const void *private_data) +{ + struct tc_ns_callback *callback_func = NULL; + struct tc_ns_callback *new_callback = NULL; + int ret = 0; + + if (!uuid || !func) + return -EINVAL; + + (void)private_data; + mutex_lock(&g_ta_callback_func_list.callback_list_lock); + if (g_ta_callback_func_list.callback_count > MAX_CALLBACK_COUNT) { + mutex_unlock(&g_ta_callback_func_list.callback_list_lock); + tloge("callback_count is out\n"); + return -ENOMEM; + } + list_for_each_entry(callback_func, + &g_ta_callback_func_list.callback_list, head) { + if (memcmp(callback_func->uuid, uuid, UUID_SIZE) == 0) { + callback_func->callback_func = (void (*)(void *))func; + tlogd("succeed to find uuid ta_callback_func_list\n"); + goto find_callback; + } + } + /* create a new callback struct if we couldn't find it in list */ + new_callback = kzalloc(sizeof(*new_callback), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)new_callback)) { + tloge("kzalloc failed\n"); + ret = -ENOMEM; + goto find_callback; + } + + if (memcpy_s(new_callback->uuid, UUID_SIZE, uuid, UUID_SIZE) != 0) { + kfree(new_callback); + new_callback = NULL; + ret = -ENOMEM; + goto find_callback; + } + g_ta_callback_func_list.callback_count++; + tlogd("callback count is %u\n", + g_ta_callback_func_list.callback_count); + INIT_LIST_HEAD(&new_callback->head); + new_callback->callback_func = (void (*)(void *))func; + mutex_init(&new_callback->callback_lock); + list_add_tail(&new_callback->head, + &g_ta_callback_func_list.callback_list); +find_callback: + mutex_unlock(&g_ta_callback_func_list.callback_list_lock); + return ret; +} + +int TC_NS_RegisterServiceCallbackFunc(const char *uuid, void *func, + const void *private_data) +{ + const char *uuid_in = uuid; + + if (!get_tz_init_flag()) return EFAULT; + return tc_ns_register_service_call_back_func(uuid_in, + func, private_data); +} +EXPORT_SYMBOL(TC_NS_RegisterServiceCallbackFunc); + +int send_notify_cmd(unsigned int cmd_id) +{ + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + int ret = 0; + struct mb_cmd_pack *mb_pack = NULL; + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) + return -ENOMEM; + + mb_pack->operation.paramtypes = + TEE_PARAM_TYPE_VALUE_INPUT | + TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM; + mb_pack->operation.params[0].value.a = + (unsigned int)(get_spi_mem_paddr((uintptr_t)g_notify_data)); + mb_pack->operation.params[0].value.b = + (unsigned int)(get_spi_mem_paddr((uintptr_t)g_notify_data) >> ADDR_TRANS_NUM); + mb_pack->operation.params[1].value.a = SZ_4K; + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.cmd_id = cmd_id; + smc_cmd.operation_phys = + (unsigned int)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = + (unsigned int)((uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM); + + if (is_tee_rebooting()) + ret = send_smc_cmd_rebooting(TSP_REQUEST, 0, 0, &smc_cmd); + else + ret = tc_ns_smc(&smc_cmd); + + if (ret != 0) { + ret = -EPERM; + tloge("register notify mem failed\n"); + } + + mailbox_free(mb_pack); + + return ret; +} + +static unsigned int g_irq = DEFAULT_SPI_NUM; +static int config_spi_context(struct device *class_dev, struct device_node *np) +{ + int ret; + +#ifndef CONFIG_ACPI + if (!np) { + tloge("device node not found\n"); + return -EINVAL; + } +#endif + + /* Map IRQ 0 from the OF interrupts list */ +#ifdef CONFIG_ACPI + g_irq = (unsigned int)get_acpi_tz_irq(); +#else + g_irq = irq_of_parse_and_map(np, 0); +#endif + ret = devm_request_irq(class_dev, g_irq, tc_secure_notify, + IRQF_NO_SUSPEND, TC_NS_CLIENT_DEV, NULL); + if (ret < 0) { + tloge("device irq %u request failed %d", g_irq, ret); + return ret; + } + + g_ta_callback_func_list.callback_count = 0; + INIT_LIST_HEAD(&g_ta_callback_func_list.callback_list); + mutex_init(&g_ta_callback_func_list.callback_list_lock); + + return 0; +} + +int tz_spi_init(struct device *class_dev, struct device_node *np) +{ + int ret; + + if (!class_dev) /* here np can be NULL */ + return -EINVAL; + + spin_lock_init(&g_notify_lock); + g_tz_spi_wq = alloc_workqueue("g_tz_spi_wq", + WQ_UNBOUND | WQ_HIGHPRI, TZ_WQ_MAX_ACTIVE); + if (!g_tz_spi_wq) { + tloge("it failed to create workqueue g_tz_spi_wq\n"); + return -ENOMEM; + } + tz_workqueue_bind_mask(g_tz_spi_wq, WQ_HIGHPRI); + + ret = config_spi_context(class_dev, np); + if (ret != 0) + goto clean; + + if (!g_notify_data) { + g_notify_data = (struct notify_data_struct *)(uintptr_t)get_spi_mem_vaddr(); + if (!g_notify_data) { + tloge("get free page failed for notification data\n"); + ret = -ENOMEM; + goto clean; + } + + ret = send_notify_cmd(GLOBAL_CMD_ID_REGISTER_NOTIFY_MEMORY); + if (ret != 0) { + tloge("shared memory failed ret is 0x%x\n", ret); + ret = -EFAULT; + free_spi_mem((uint64_t)(uintptr_t)g_notify_data); + g_notify_data = NULL; + goto clean; + } + + g_notify_data_entry_shadow = + &g_notify_data->entry[NOTIFY_DATA_ENTRY_SHADOW - 1]; + tlogd("target is: %llx\n", + g_notify_data_entry_shadow->context.shadow.target_tcb); + } + + return 0; +clean: + free_tz_spi(class_dev); + return ret; +} + +void free_tz_spi(struct device *class_dev) +{ + if (g_notify_data) { + free_spi_mem((uint64_t)(uintptr_t)g_notify_data); + g_notify_data = NULL; + } + + if (g_tz_spi_wq) { + flush_workqueue(g_tz_spi_wq); + destroy_workqueue(g_tz_spi_wq); + g_tz_spi_wq = NULL; + } + if (!class_dev) + return; + + devm_free_irq(class_dev, g_irq, NULL); +} diff --git a/tzdriver/core/tz_spi_notify.h b/tzdriver/core/tz_spi_notify.h new file mode 100644 index 0000000000000000000000000000000000000000..41e1f4b12557e5aacf9922c8246114f5f0007c7c --- /dev/null +++ b/tzdriver/core/tz_spi_notify.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: exported funcs for spi interrupt actions. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TZ_SPI_NOTIFY_H +#define TZ_SPI_NOTIFY_H +#include +#include +#include "teek_ns_client.h" + +int tz_spi_init(struct device *class_dev, struct device_node *np); +void free_tz_spi(struct device *class_dev); +int send_notify_cmd(unsigned int cmd_id); + +#endif diff --git a/tzdriver/core/tz_update_crl.c b/tzdriver/core/tz_update_crl.c new file mode 100644 index 0000000000000000000000000000000000000000..d12afa60798b2f412a8e66bb600230a327ac831a --- /dev/null +++ b/tzdriver/core/tz_update_crl.c @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function for update crl. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "tz_update_crl.h" +#include +#include +#include +#include +#include +#include "mailbox_mempool.h" +#include "smc_smp.h" + +#define D_PATH_LEN 256 + +static DEFINE_MUTEX(g_cms_crl_update_lock); + +int send_crl_to_tee(const char *crl_buffer, uint32_t crl_len, const struct tc_ns_dev_file *dev_file) +{ + int ret; + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + struct mb_cmd_pack *mb_pack = NULL; + char *mb_param = NULL; + + /* dev_file not need check null */ + if (crl_buffer == NULL || crl_len == 0 || crl_len > DEVICE_CRL_MAX) { + tloge("invalid params\n"); + return -EINVAL; + } + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) { + tloge("alloc mb pack failed\n"); + return -ENOMEM; + } + mb_param = mailbox_copy_alloc(crl_buffer, crl_len); + if (!mb_param) { + tloge("alloc mb param failed\n"); + ret = -ENOMEM; + goto clean; + } + mb_pack->operation.paramtypes = TEEC_MEMREF_TEMP_INPUT; + mb_pack->operation.params[0].memref.buffer = (unsigned int )mailbox_virt_to_phys((uintptr_t)mb_param); + mb_pack->operation.buffer_h_addr[0] = + (unsigned int)((uint64_t)mailbox_virt_to_phys((uintptr_t)mb_param) >> ADDR_TRANS_NUM); + mb_pack->operation.params[0].memref.size = crl_len; + smc_cmd.cmd_id = GLOBAL_CMD_ID_UPDATE_TA_CRL; + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + if (dev_file != NULL) + smc_cmd.dev_file_id = dev_file->dev_file_id; + smc_cmd.context_id = 0; + smc_cmd.operation_phys = (unsigned int)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = + (unsigned int)((uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM); + + mutex_lock(&g_cms_crl_update_lock); + ret = tc_ns_smc(&smc_cmd); + mutex_unlock(&g_cms_crl_update_lock); + if (ret != 0) + tloge("smc call returns error ret 0x%x\n", ret); +clean: + mailbox_free(mb_pack); + mb_pack = NULL; + if (mb_param) + mailbox_free(mb_param); + + return ret; +} + +int tc_ns_update_ta_crl(const struct tc_ns_dev_file *dev_file, void __user *argp) +{ + int ret; + struct tc_ns_client_crl context = {0}; + void *buffer_addr = NULL; + uint8_t *crl_buffer = NULL; + + if (!dev_file || !argp) { + tloge("invalid params\n"); + return -EINVAL; + } + + if (copy_from_user(&context, argp, sizeof(context)) != 0) { + tloge("copy from user failed\n"); + return -ENOMEM; + } + + if (context.size > DEVICE_CRL_MAX) { + tloge("crl size is too long\n"); + return -ENOMEM; + } + + crl_buffer = kmalloc(context.size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)(crl_buffer))) { + tloge("failed to allocate crl buffer\n"); + return -ENOMEM; + } + + buffer_addr = (void *)(uintptr_t)(context.memref.buffer_addr | + (((uint64_t)context.memref.buffer_h_addr) << ADDR_TRANS_NUM)); + if (copy_from_user(crl_buffer, buffer_addr, context.size) != 0) { + tloge("copy from user failed\n"); + goto clean; + } + + ret = send_crl_to_tee(crl_buffer, context.size, dev_file); + if (ret != 0) { + tloge("send crl to tee failed\n"); + goto clean; + } + +clean: + kfree(crl_buffer); + return ret; +} + +static struct file *crl_file_open(const char *file_path) +{ + struct file *fp = NULL; + int ret; + char *dpath = NULL; + char tmp_buf[D_PATH_LEN] = {0}; + struct path base_path = { + .mnt = NULL, + .dentry = NULL + }; + + ret = kern_path(file_path, LOOKUP_FOLLOW, &base_path); + if (ret != 0) + return NULL; + + dpath = d_path(&base_path, tmp_buf, D_PATH_LEN); + if (!dpath || IS_ERR(dpath)) + goto clean; + + fp = filp_open(dpath, O_RDONLY, 0); + +clean: + path_put(&base_path); + return fp; +} + +int tz_update_crl(const char *file_path, const struct tc_ns_dev_file *dev_file) +{ + struct file *fp = NULL; + uint32_t crl_len; + char *crl_buffer = NULL; + uint32_t read_size; + loff_t pos = 0; + int ret = 0; + + if (!dev_file || !file_path) { + tloge("invalid params\n"); + return -EINVAL; + } + + fp = crl_file_open(file_path); + if (!fp || IS_ERR(fp)) { + tloge("open crl file error, ret = %ld\n", PTR_ERR(fp)); + return -ENOENT; + } + if (!fp->f_inode) { + tloge("node is NULL\n"); + ret = -EINVAL; + goto clean; + } + + crl_len = (uint32_t)(fp->f_inode->i_size); + if (crl_len > DEVICE_CRL_MAX) { + tloge("crl file len is invalid %u\n", crl_len); + ret = -EINVAL; + goto clean; + } + + crl_buffer = vmalloc(crl_len); + if (!crl_buffer) { + tloge("alloc crl file buffer(size=%u) failed\n", crl_len); + ret = -ENOMEM; + goto clean; + } + + read_size = (uint32_t)kernel_read(fp, crl_buffer, crl_len, &pos); + if (read_size != crl_len) { + tloge("read crl file failed, read size/total size=%u/%u\n", read_size, crl_len); + ret = -ENOENT; + goto clean; + } + + ret = send_crl_to_tee(crl_buffer, crl_len, dev_file); + if (ret != 0) { + tloge("send crl to tee failed\n"); + goto clean; + } + +clean: + filp_close(fp, 0); + fp = NULL; + if (crl_buffer) + vfree(crl_buffer); + return ret; +} \ No newline at end of file diff --git a/tzdriver/core/tz_update_crl.h b/tzdriver/core/tz_update_crl.h new file mode 100644 index 0000000000000000000000000000000000000000..ec3b8cbca892c97743d96928f3deaf95b71203ed --- /dev/null +++ b/tzdriver/core/tz_update_crl.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: function for update crl. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TZ_UPDATE_CRL_H +#define TZ_UPDATE_CRL_H +#include "teek_ns_client.h" + +#define DEVICE_CRL_MAX 0x4000 /* 16KB */ +int send_crl_to_tee(const char *crl_buffer, uint32_t crl_len, const struct tc_ns_dev_file *dev_file); +int tc_ns_update_ta_crl(const struct tc_ns_dev_file *dev_file, void __user *argp); +int tz_update_crl(const char *file_path, const struct tc_ns_dev_file *dev_file); + +#endif \ No newline at end of file diff --git a/tzdriver/core/tzdebug.c b/tzdriver/core/tzdebug.c new file mode 100644 index 0000000000000000000000000000000000000000..f57562ed1cb3aa41b63df2f3d194ea4d0d0adc00 --- /dev/null +++ b/tzdriver/core/tzdebug.c @@ -0,0 +1,458 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: for tzdriver debug. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "tzdebug.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tc_ns_log.h" +#include "tc_ns_client.h" +#include "tc_client_driver.h" +#include "teek_ns_client.h" +#include "smc_smp.h" +#include "teek_client_constants.h" +#include "mailbox_mempool.h" +#include "tlogger.h" +#include "cmdmonitor.h" +#include "session_manager.h" +#include "internal_functions.h" + +#define DEBUG_OPT_LEN 128 + +#ifdef CONFIG_TA_MEM_INUSE_ONLY +#define TA_MEMSTAT_ALL 0 +#else +#define TA_MEMSTAT_ALL 1 +#endif + +static struct dentry *g_tz_dbg_dentry; + +typedef void (*tzdebug_opt_func)(const char *param); + +struct opt_ops { + char *name; + tzdebug_opt_func func; +}; + +static DEFINE_MUTEX(g_meminfo_lock); +static struct tee_mem g_tee_meminfo; +static void tzmemdump(const char *param); +static int send_dump_mem(int flag, int history, const struct tee_mem *statmem) +{ + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + struct mb_cmd_pack *mb_pack = NULL; + int ret = 0; + + if (!statmem) { + tloge("statmem is NULL\n"); + return -EINVAL; + } + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) + return -ENOMEM; + + smc_cmd.cmd_id = GLOBAL_CMD_ID_DUMP_MEMINFO; + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + mb_pack->operation.paramtypes = teec_param_types( + TEE_PARAM_TYPE_MEMREF_INOUT, TEE_PARAM_TYPE_VALUE_INPUT, + TEE_PARAM_TYPE_NONE, TEE_PARAM_TYPE_NONE); + mb_pack->operation.params[0].memref.buffer = (unsigned int)mailbox_virt_to_phys((uintptr_t)statmem); + mb_pack->operation.params[0].memref.size = sizeof(*statmem); + mb_pack->operation.buffer_h_addr[0] = + (unsigned int)((uint64_t)mailbox_virt_to_phys((uintptr_t)statmem) >> ADDR_TRANS_NUM); + mb_pack->operation.params[1].value.a = (unsigned int)flag; + mb_pack->operation.params[1].value.b = (unsigned int)history; + smc_cmd.operation_phys = + (unsigned int)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = + (unsigned int)((uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM); + + livepatch_down_read_sem(); + if (tc_ns_smc(&smc_cmd) != 0) { + ret = -EPERM; + tloge("send dump mem failed\n"); + } + livepatch_up_read_sem(); + + tz_log_write(); + mailbox_free(mb_pack); + return ret; +} + +void tee_dump_mem(void) +{ + tzmemdump(NULL); + if (tlogger_store_msg(CONFIG_TEE_LOG_ACHIVE_PATH, + sizeof(CONFIG_TEE_LOG_ACHIVE_PATH)) < 0) + tloge("[cmd_monitor_tick]tlogger store lastmsg failed\n"); +} + +/* get meminfo (tee_mem + N * ta_mem < 4Kbyte) from tee */ +static int get_tee_meminfo_cmd(void) +{ + int ret; + struct tee_mem *mem = NULL; + + mem = mailbox_alloc(sizeof(*mem), MB_FLAG_ZERO); + if (!mem) + return -ENOMEM; + + ret = send_dump_mem(0, TA_MEMSTAT_ALL, mem); + if (ret != 0) { + tloge("send dump failed\n"); + mailbox_free(mem); + return ret; + } + + mutex_lock(&g_meminfo_lock); + ret = memcpy_s(&g_tee_meminfo, sizeof(g_tee_meminfo), mem, sizeof(*mem)); + if (ret != 0) + tloge("memcpy failed\n"); + + mutex_unlock(&g_meminfo_lock); + mailbox_free(mem); + + return ret; +} + +static atomic_t g_cmd_send = ATOMIC_INIT(1); + +void set_cmd_send_state(void) +{ + atomic_set(&g_cmd_send, 1); +} + +int get_tee_meminfo(struct tee_mem *meminfo) +{ + errno_t s_ret; + + if (!get_tz_init_flag()) return EFAULT; + + if (!meminfo) + return -EINVAL; + + if (atomic_read(&g_cmd_send) != 0) { + if (get_tee_meminfo_cmd() != 0) + return -EFAULT; + } else { + atomic_set(&g_cmd_send, 0); + } + + mutex_lock(&g_meminfo_lock); + s_ret = memcpy_s(meminfo, sizeof(*meminfo), + &g_tee_meminfo, sizeof(g_tee_meminfo)); + mutex_unlock(&g_meminfo_lock); + if (s_ret != 0) + return -1; + + return 0; +} +EXPORT_SYMBOL(get_tee_meminfo); + +static void tzdump(const char *param) +{ + (void)param; + show_cmd_bitmap(); + wakeup_tc_siq(SIQ_DUMP_SHELL); +} + +static void tzmemdump(const char *param) +{ + struct tee_mem *mem = NULL; + + (void)param; + mem = mailbox_alloc(sizeof(*mem), MB_FLAG_ZERO); + if (!mem) { + tloge("mailbox alloc failed\n"); + return; + } + + if (send_dump_mem(1, 1, mem) != 0) + tloge("send dump mem failed\n"); + + mailbox_free(mem); +} + +static struct opt_ops g_opt_arr[] = { + {"dump", tzdump}, + {"memdump", tzmemdump}, + {"dump_service", dump_services_status}, +}; + +static ssize_t tz_dbg_opt_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char *obuf = NULL; + char *p = NULL; + ssize_t ret; + uint32_t oboff = 0; + uint32_t i; + + (void)(filp); + + obuf = kzalloc(DEBUG_OPT_LEN, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)obuf)) + return -ENOMEM; + p = obuf; + + for (i = 0; i < ARRAY_SIZE(g_opt_arr); i++) { + int len = snprintf_s(p, DEBUG_OPT_LEN - oboff, DEBUG_OPT_LEN -oboff -1, + "%s ", g_opt_arr[i].name); + if (len < 0) { + kfree(obuf); + tloge("snprintf opt name of idx %d failed\n", i); + return -EINVAL; + } + p += len; + oboff += (uint32_t)len; + } + obuf[oboff - 1] = '\n'; + + ret = simple_read_from_buffer(ubuf, cnt, ppos, obuf, oboff); + kfree(obuf); + + return ret; +} + +static ssize_t tz_dbg_opt_write(struct file *filp, + const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[128] = {0}; + char *value = NULL; + char *p = NULL; + uint32_t i = 0; + + if (!ubuf || !filp || !ppos) + return -EINVAL; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (cnt == 0) + return -EINVAL; + + if (copy_from_user(buf, ubuf, cnt) != 0) + return -EFAULT; + + buf[cnt] = 0; + if (cnt > 0 && buf[cnt -1] == '\n') + buf[cnt - 1] = 0; + value = buf; + p = strsep(&value, ":"); /* when buf has no :, value may be NULL */ + if (!p) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(g_opt_arr); i++) { + if ((strncmp(p, g_opt_arr[i].name, + strlen(g_opt_arr[i].name)) ==0) && + strlen(p) == strlen(g_opt_arr[i].name)) { + g_opt_arr[i].func(value); + return (ssize_t)cnt; + } + } + return -EFAULT; +} + +static const struct file_operations g_tz_dbg_opt_fops = { + .owner = THIS_MODULE, + .read = tz_dbg_opt_read, + .write = tz_dbg_opt_write, +}; + +#ifdef CONFIG_MEMSTAT_DEBUGFS +static int memstat_debug_show(struct seq_file *m, void *v) +{ + struct tee_mem *mem_stat = NULL; + int ret; + uint32_t i; + (void)v; + + mem_stat = kzalloc(sizeof(*mem_stat), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)mem_stat)) + return -ENOMEM; + + ret = get_tee_meminfo(mem_stat); + if (ret != 0) { + tloge("get tee meminfo failed\n"); + kfree(mem_stat); + mem_stat = NULL; + return -EINVAL; + } + + seq_printf(m, "TotalMem:%u Pmem:%u Free_Mem:%u Free_Mem_Min:%u TA_Num:%u\n", + mem_stat->total_mem, mem_stat->pmem, mem_stat->free_mem, mem_stat->free_mem_min, mem_stat->ta_num); + + for (i = 0; i < mem_stat->ta_num; i++) + seq_printf(m, "ta_name:%s ta_pmem:%u pmem_max:%u pmem_limit:%u\n", + mem_stat->ta_mem_info[i].ta_name, mem_stat->ta_mem_info[i].pmem, + mem_stat->ta_mem_info[i].pmem_max, mem_stat->ta_mem_info[i].pmem_limit); + + kfree(mem_stat); + mem_stat = NULL; + return 0; +} + +static int tz_memstat_open(struct inode *inode, struct file *file) +{ + (void)inode; + return single_open(file, memstat_debug_show, NULL); +} + +static const struct file_operations g_tz_dbg_memstat_fops = { + .owner = THIS_MODULE, + .open = tz_memstat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +#ifdef CONFIG_TEE_TRACE +static int tee_trace_event_show(struct seq_file *m, void *v) +{ + struct tee_trace_view_t view = { 0, 0, 0, 0, { 0 }, { 0 } }; + struct trace_log_info log_info; + (void)v; + + get_tee_trace_start(&view); + if (view.buffer_is_full == 1) + seq_printf(m, "Total Trace Events: %u (Notice: Buffer is full)\n", view.total); + else + seq_printf(m, "Total Trace Events: %u\n", view.total); + + if (view.total > 0) { + uint32_t i = 0; + + while (get_tee_trace_next(&view, &log_info, false) != -1) { + uint32_t task_ca = (uint32_t)(log_info.add_info); + uint32_t task_idx = (uint32_t)(log_info.add_info >> 32); + + if (log_info.event_id == SCHED_IN || log_info.event_id == SCHED_OUT) { + seq_printf(m, "[%4u][cpu%3u][ca-%5u] %10llu : %s %u %s\n", + i++, log_info.cpu, log_info.ca_pid, log_info.time, log_info.event_name, + task_ca, get_tee_trace_task_name(task_idx)); + } else { + seq_printf(m, "[%4u][cpu%3u][ca-%5u] %10llu : %s %llu\n", + i++, log_info.cpu, log_info.ca_pid, log_info.time, log_info.event_name, + log_info.add_info); + } + } + } + + return 0; +} + +static int tee_trace_event_open(struct inode *inode, struct file *file) +{ + return single_open(file, tee_trace_event_show, NULL); +} + +struct tee_trace_cmd_t { + const char *cmd; + int (*func)(void); +} tee_trace_cmd[] = { + {"start", tee_trace_event_start}, + {"loop_record", tee_trace_event_start_loop_record}, + {"stop", tee_trace_event_stop} +}; + +static ssize_t tee_trace_event_write(struct file *filp, + const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[32] = {0}; + uint32_t i = 0; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, ubuf, cnt)) + return -EINVAL; + + buf[cnt] = 0; + if (cnt > 0 && buf[cnt - 1] == '\n') + buf[cnt - 1] = 0; + + for (i = 0; i < ARRAY_SIZE(tee_trace_cmd); i++) { + if (!strncmp(buf, tee_trace_cmd[i].cmd, + strlen(tee_trace_cmd[i].cmd))) { + tee_trace_cmd[i].func(); + return cnt; + } + } + return -EINVAL; +} + +static const struct file_operations tee_trace_event_fops = { + .owner = THIS_MODULE, + .open = tee_trace_event_open, + .read = seq_read, + .write = tee_trace_event_write, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +int tzdebug_init(void) +{ +#if defined(DEF_ENG) || defined(CONFIG_TZDRIVER_MODULE) + g_tz_dbg_dentry = debugfs_create_dir("tzdebug", NULL); + if (!g_tz_dbg_dentry) + return -1; + + debugfs_create_file("opt", 0660, g_tz_dbg_dentry, NULL, + &g_tz_dbg_opt_fops); + +#ifdef CONFIG_MEMSTAT_DEBUGFS + debugfs_create_file("memstat", 0444, g_tz_dbg_dentry, NULL, + &g_tz_dbg_memstat_fops); +#endif + +#ifdef CONFIG_TEE_TRACE + debugfs_create_file("tee_trace", 0660, g_tz_dbg_dentry, NULL, + &tee_trace_event_fops); + tee_trace_event_enable(); +#endif + +#else + (void)g_tz_dbg_dentry; + (void)g_tz_dbg_opt_fops; +#endif + return 0; +} + +void free_tzdebug(void) +{ +#if defined(DEF_ENG) || defined(CONFIG_TZDRIVER_MODULE) + if (!g_tz_dbg_dentry) + return; + + debugfs_remove_recursive(g_tz_dbg_dentry); + g_tz_dbg_dentry = NULL; +#endif +} \ No newline at end of file diff --git a/tzdriver/core/tzdebug.h b/tzdriver/core/tzdebug.h new file mode 100644 index 0000000000000000000000000000000000000000..e544235de5c627a843fb4d12a51475034a878dd0 --- /dev/null +++ b/tzdriver/core/tzdebug.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TZDEBUG_H +#define TZDEBUG_H + +#include +struct ta_mem { + char ta_name[64]; + uint32_t pmem; + uint32_t pmem_max; + uint32_t pmem_limit; +}; +#define MEMINFO_TA_MAX 100 +struct tee_mem { + uint32_t total_mem; + uint32_t pmem; + uint32_t free_mem; + uint32_t free_mem_min; + uint32_t ta_num; + struct ta_mem ta_mem_info[MEMINFO_TA_MAX]; +}; + +int get_tee_meminfo(struct tee_mem *meminfo); +void tee_dump_mem(void); +int tzdebug_init(void); +void free_tzdebug(void); + +#endif \ No newline at end of file diff --git a/tzdriver/dynamic_mem.h b/tzdriver/dynamic_mem.h new file mode 120000 index 0000000000000000000000000000000000000000..3dc5dd8d71bccb27fff362e5c0582cd5c03917f2 --- /dev/null +++ b/tzdriver/dynamic_mem.h @@ -0,0 +1 @@ +ion/dynamic_ion_mem.h \ No newline at end of file diff --git a/tzdriver/figures/tzdriver.drawio.png b/tzdriver/figures/tzdriver.drawio.png new file mode 100644 index 0000000000000000000000000000000000000000..da8614e5d3b9e03811c3ad84e9ca4f0f778e89ee Binary files /dev/null and b/tzdriver/figures/tzdriver.drawio.png differ diff --git a/tzdriver/figures/tzdriver.drawio_en.png b/tzdriver/figures/tzdriver.drawio_en.png new file mode 100644 index 0000000000000000000000000000000000000000..8f4329fd4b47b81e4141eb0732a23d34dc7a4373 Binary files /dev/null and b/tzdriver/figures/tzdriver.drawio_en.png differ diff --git a/tzdriver/include/internal_functions.h b/tzdriver/include/internal_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..336da09e6ef5b62028ab0ced8bf939138584dccd --- /dev/null +++ b/tzdriver/include/internal_functions.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * Description: tzdriver internal functions. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef INTERNAL_FUNCTIONS_H +#define INTERNAL_FUNCTIONS_H + +#include +#include +#include "teek_ns_client.h" +#include "teek_client_constants.h" + +#ifndef CONFIG_TEE_FAULT_MANAGER +static inline void fault_monitor_start(int32_t type) +{ + (void)type; + return; +} + +static inline void fault_monitor_end(void) +{ + return; +} +#endif + +#ifdef CONFIG_KTHREAD_AFFINITY +#include "tz_kthread_affinity.h" +#else +static inline void init_kthread_cpumask(void) +{ +} + +static inline void tz_kthread_bind_mask(struct task_struct *kthread) +{ + (void)kthread; +} + +static inline void tz_workqueue_bind_mask(struct workqueue_struct *wq, + uint32_t flag) +{ + (void)wq; + (void)flag; +} +#endif + +#ifdef CONFIG_LIVEPATCH_ENABLE +#include "livepatch_cmd.h" +#else +static inline int livepatch_init(const struct device *dev) +{ + (void)dev; + return 0; +} +static inline void livepatch_down_read_sem(void) +{ +} +static inline void livepatch_up_read_sem(void) +{ +} +static inline void free_livepatch(void) +{ +} +#endif + +#ifdef CONFIG_TEE_TRACE +#include "tee_trace_event.h" +#include "tee_trace_interrupt.h" +#else +static inline void tee_trace_add_event(enum tee_event_id id, uint64_t add_info) +{ + (void)id; + (void)add_info; +} +static inline void free_event_mem(void) +{ +} +static inline void free_interrupt_trace(void) +{ +} +#endif + +#ifdef CONFIG_TEE_REBOOT +#include "reboot.h" +#else +static inline bool is_tee_rebooting(void) +{ + return false; +} +static inline int tee_init_reboot_thread(void) +{ + return 0; +} +static inline int tee_wake_up_reboot(void) +{ + return 0; +} +static inline void free_reboot_thread(void) +{ + return; +} +#endif +#endif \ No newline at end of file diff --git a/tzdriver/ion/Kconfig b/tzdriver/ion/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..04838bac5f87a779c5d3f60cbb988fe922974b67 --- /dev/null +++ b/tzdriver/ion/Kconfig @@ -0,0 +1,13 @@ +config DYNAMIC_ION + bool "Dynamic Ion Feature" + default n + depends on TZDRIVER + help + TEEOS dynamic ion + +config STATIC_ION + bool "Static Ion Feature" + default n + depends on TZDRIVER + help + TEEOS static ion diff --git a/tzdriver/ion/Makefile b/tzdriver/ion/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f851cc267ddebb3d9cc5c4623be8d5fe1b312f30 --- /dev/null +++ b/tzdriver/ion/Makefile @@ -0,0 +1,20 @@ +KERNEL_DIR :=$(srctree) + +ifneq ($(TARGET_BUILD_VARIANT),user) + ccflags-y += -DDEF_ENG +endif + +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/core +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/staging/android + +obj-$(CONFIG_STATIC_ION) += static_ion_mem.o +obj-$(CONFIG_DYNAMIC_ION) += dynamic_ion_mem.o +ifneq ($(CONFIG_MTK_PLATFORM), ) +obj-$(CONFIG_STATIC_ION) += mplat/declare_static_ion.o +else +obj-$(CONFIG_STATIC_ION) += generic/declare_static_ion.o +endif + +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/include +EXTRA_CFLAGS += -include internal_functions.h \ No newline at end of file diff --git a/tzdriver/ion/declare_static_ion.h b/tzdriver/ion/declare_static_ion.h new file mode 100644 index 0000000000000000000000000000000000000000..0e79e3b84748bc7cf9f23ba4c7b7f42c63658592 --- /dev/null +++ b/tzdriver/ion/declare_static_ion.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * Description: set static mem info. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef DECLARE_STATIC_ION_H +#define DECLARE_STATIC_ION_H +#include "static_ion_mem.h" + +void set_ion_mem_info(struct register_ion_mem_tag *memtag); + +#endif \ No newline at end of file diff --git a/tzdriver/ion/dynamic_ion_mem.c b/tzdriver/ion/dynamic_ion_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..dce1516c693874b044a96a1b635c5c263a9397c9 --- /dev/null +++ b/tzdriver/ion/dynamic_ion_mem.c @@ -0,0 +1,653 @@ +/* + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * Description: dynamic Ion memory allocation and free. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dynamic_ion_mem.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef CONFIG_DMABUF_MM +#include +#endif +#include +#include +#include +#include +#if ((defined CONFIG_ION_MM) || (defined CONFIG_ION_MM_SECSG)) +#include +#endif +#ifdef CONFIG_DMABUF_MM +#include +#endif +#include "tc_ns_log.h" +#include "tc_ns_client.h" +#include "smc_smp.h" +#include "gp_ops.h" +#include "teek_client_constants.h" +#include "mailbox_mempool.h" +#include "dynamic_ion_uuid.h" + +static DEFINE_MUTEX(dynamic_mem_lock); +struct dynamic_mem_list { + struct list_head list; +}; + +static const struct dynamic_mem_config g_dyn_mem_config[] = { + #ifdef DEF_ENG + {TEE_SERVICE_UT, SEC_EID}, + {TEE_SERVICE_TEST_DYNION, SEC_AI_ION}, + #endif + {TEE_SECIDENTIFICATION1, SEC_EID}, + {TEE_SECIDENTIFICATION3, SEC_EID}, + {TEE_SERVICE_AI, SEC_AI_ION}, + {TEE_SERVICE_AI_TINY, SEC_AI_ION}, + {TEE_SERVICE_VCODEC, SEC_DRM_TEE}, +}; + +static struct dynamic_mem_list g_dynamic_mem_list; +static const uint32_t g_dyn_mem_config_num = ARRAY_SIZE(g_dyn_mem_config); + +static int release_ion_srv(const struct tc_uuid *uuid) +{ + struct tc_ns_smc_cmd smc_cmd = {{0}, 0}; + + smc_cmd.err_origin = TEEC_ORIGIN_COMMS; + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.cmd_id = GLOBAL_CMD_ID_RELEASE_ION_SRV; + if (memcpy_s(&smc_cmd.uuid, sizeof(smc_cmd.uuid), uuid, sizeof(*uuid))) { + tloge("copy uuid failed\n"); + return -ENOMEM; + } + + if (tc_ns_smc(&smc_cmd)) { + tloge("send release ion srv cmd failed\n"); + return -EPERM; + } + return 0; +} + + +static int get_ion_sglist(struct dynamic_mem_item *mem_item) +{ + struct sglist *tmp_sglist = NULL; + struct scatterlist *sg = NULL; + struct page *page = NULL; + uint32_t sglist_size; + uint32_t i = 0; + struct sg_table *ion_sg_table = mem_item->memory.dyn_sg_table; + + if (!ion_sg_table) + return -EINVAL; + + if (ion_sg_table->nents <= 0 || ion_sg_table->nents > MAX_ION_NENTS) + return -EINVAL; + + for_each_sg(ion_sg_table->sgl, sg, ion_sg_table->nents, i) { + if (!sg) { + tloge("an error sg when get ion sglist\n"); + return -EINVAL; + } + } + + sglist_size = sizeof(struct ion_page_info) * ion_sg_table->nents + sizeof(*tmp_sglist); + tmp_sglist = (struct sglist *)mailbox_alloc(sglist_size, MB_FLAG_ZERO); + if (!tmp_sglist) { + tloge("mailbox alloc failed\n"); + return -ENOMEM; + } + + tmp_sglist->sglist_size = (uint64_t)sglist_size; + tmp_sglist->ion_size = (uint64_t)mem_item->size; + tmp_sglist->info_length = (uint64_t)ion_sg_table->nents; + for_each_sg(ion_sg_table->sgl, sg, ion_sg_table->nents, i) { + page = sg_page(sg); + tmp_sglist->page_info[i].phys_addr = page_to_phys(page); + tmp_sglist->page_info[i].npages = sg->length / PAGE_SIZE; + } + mem_item->memory.ion_phys_addr = mailbox_virt_to_phys((uintptr_t)(void *)tmp_sglist); + mem_item->memory.len = sglist_size; + return 0; +} + +static int send_dyn_ion_cmd(struct dynamic_mem_item *mem_item, unsigned int cmd_id, int32_t *ret_origin) +{ + struct tc_ns_smc_cmd smc_cmd = {{0}, 0}; + int ret; + struct mb_cmd_pack *mb_pack = NULL; + + if (!mem_item) { + tloge("mem_item is null\n"); + return -EINVAL; + } + + ret = get_ion_sglist(mem_item); + if (ret != 0) + return ret; + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) { + mailbox_free(phys_to_virt(mem_item->memory.ion_phys_addr)); + tloge("alloc cmd pack failed\n"); + return -ENOMEM; + } + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.cmd_id = cmd_id; + smc_cmd.err_origin = TEEC_ORIGIN_COMMS; + mb_pack->operation.paramtypes = teec_param_types( + TEE_PARAM_TYPE_ION_SGLIST_INPUT, + TEE_PARAM_TYPE_VALUE_INPUT, + TEE_PARAM_TYPE_VALUE_INPUT, + TEE_PARAM_TYPE_NONE); + + mb_pack->operation.params[0].memref.size = (uint32_t)mem_item->memory.len; + mb_pack->operation.params[0].memref.buffer = + (uint32_t)(mem_item->memory.ion_phys_addr & 0xFFFFFFFF); + mb_pack->operation.buffer_h_addr[0] = + (uint64_t)(mem_item->memory.ion_phys_addr) >> ADDR_TRANS_NUM; + mb_pack->operation.params[1].value.a = (uint32_t)mem_item->size; + mb_pack->operation.params[2].value.a = mem_item->configid; + smc_cmd.operation_phys = (unsigned int)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; + + if (tc_ns_smc(&smc_cmd)) { + if (ret_origin) + *ret_origin = smc_cmd.err_origin; + ret = -EPERM; + tlogd("send loadapp ion failed\n"); + } + mailbox_free(phys_to_virt(mem_item->memory.ion_phys_addr)); + mailbox_free(mb_pack); + return ret; +} + +static struct dynamic_mem_item *find_memitem_by_configid_locked(uint32_t configid) +{ + struct dynamic_mem_item *item = NULL; + list_for_each_entry(item, &g_dynamic_mem_list.list, head) { + if (item->configid == configid) + return item; + } + return NULL; +} + +static struct dynamic_mem_item *find_memitem_by_uuid_locked(const struct tc_uuid *uuid) +{ + struct dynamic_mem_item *item = NULL; + list_for_each_entry(item, &g_dynamic_mem_list.list, head) { + if (!memcmp(&item->uuid, uuid, sizeof(*uuid))) + return item; + } + return NULL; +} + +#define BLOCK_64KB_SIZE (64 * 1024) /* 64 */ +#define BLOCK_64KB_MASK 0xFFFFFFFFFFFF0000 +/* size should be aligned with 64KB */ +#define BLOCK_64KB_SIZE_MASK (BLOCK_64KB_SIZE -1) +static int proc_alloc_dyn_mem(struct dynamic_mem_item *mem_item) +{ + struct sg_table *ion_sg_table = NULL; + + if (mem_item->size + BLOCK_64KB_SIZE_MASK < mem_item->size) { + tloge("ion size is error, size = %x\n", mem_item->size); + return -EINVAL; + } + mem_item->memory.len = (mem_item ->size + BLOCK_64KB_SIZE_MASK) & BLOCK_64KB_MASK; + + ion_sg_table = mm_secmem_alloc(mem_item->addr_sec_region, + mem_item->memory.len); + if (!ion_sg_table) { + tloge("failed to get ion page, configid = %d\n", + mem_item->configid); + return -ENOMEM; + } + mem_item->memory.dyn_sg_table = ion_sg_table; + return 0; +} + +static void proc_free_dyn_mem(struct dynamic_mem_item *mem_item) +{ + if (!mem_item->memory.dyn_sg_table) { + tloge("ion_phys_addr is NULL\n"); + return; + } + mm_secmem_free(mem_item->ddr_sec_region, + mem_item->memory.dyn_sg_table); + mem_item->memory.dyn_sg_table = NULL; + return; +} + +int init_dynamic_mem(void) +{ + INIT_LIST_HEAD(&(g_dynamic_mem_list.list)); + return 0; +} + +static int32_t find_ddr_sec_region_by_uuid(const struct tc_uuid *uuid, + uint32_t *ddr_sec_region) +{ + uint32_t i; + for (i = 0; i < g_dyn_mem_config_num; i++) { + if (!memcmp(&(g_dyn_mem_config[i].uuid), uuid, + sizeof(*uuid))) { + *ddr_sec_region = g_dyn_mem_config[i].ddr_sec_region; + return 0; + } + } + return -EINVAL; +} + +static struct dynamic_mem_item *alloc_dyn_mem_item(uint32_t configid, + uint32_t cafd, const struct tc_uuid *uuid, uint32_t size) +{ + uint32_t ddr_sec_region; + struct dynamic_mem_item *mem_item = NULL; + int32_t result; + + result = find_ddr_sec_region_by_uuid(uuid, &ddr_sec_region); + if (result != 0) { + tloge("find ddr sec region failed\n"); + return NULL; + } + + mem_item = kzalloc(sizeof(*mem_item), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)mem_item)) { + tloge("alloc mem item failed\n"); + return NULL; + } + + mem_item->ddr_sec_region = ddr_sec_region; + mem_item->configid = configid; + mem_item->size = size; + mem_item->cafd = cafd; + result = memcpy_s(&mem_item->uuid, sizeof(mem_item->uuid), uuid, + sizeof(*uuid)); + if(result != EOK) { + tloge("memcpy uuid failed\n"); + kfree(mem_item); + return NULL; + } + return mem_item; +} + + +static int trans_configid2memid(uint32_t configid, uint32_t cafd, + const struct tc_uuid *uuid, uint32_t size, int32_t *ret_origin) +{ + int result; + + if (!uuid) + return -EINVAL; + mutex_lock(&dynamic_mem_lock); + do { + struct dynamic_mem_item *mem_item = + find_memitem_by_configid_locked(configid); + if (mem_item) { + result = -EINVAL; + break; + } + + mem_item = alloc_dyn_mem_item(configid, cafd, uuid, size); + if (!mem_item) { + tloge("alloc dyn mem item failed\n"); + result = -ENOMEM; + break; + } + + result = proc_alloc_dyn_mem(mem_item); + if (result != 0) { + tloge("alloc dyn mem failed , ret = %d\n", result); + kfree(mem_item); + break; + } + /* register to tee */ + result = send_dyn_ion_cmd(mem_item, GLOBAL_CMD_ID_ADD_DYNAMIC_ION, ret_origin); + if (result != 0) { + tloge("register to tee failed, result = %d\n", result); + proc_free_dyn_mem(mem_item); + kfree(mem_item); + break; + } + list_add_tail(&mem_item->head, &g_dynamic_mem_list.list); + tloge("log import:alloc ion configid=%d\n", + mem_item->configid); + } while (0); + + mutex_unlock(&dynamic_mem_lock); + return result; +} + +static void release_configid_mem_locked(uint32_t configid) +{ + int result; + /* if config id is memid map, and can reuse */ + do { + struct dynamic_mem_item *mem_item = + find_memitem_by_configid_locked(configid); + if (!mem_item) { + tloge("fail to find memitem by configid\n"); + break; + } + + result = send_dyn_ion_cmd(mem_item, GLOBAL_CMD_ID_DEL_DYNAMIC_ION, NULL); + if (result != 0) { + tloge("unregister_from_tee configid=%d, result =%d\n", + mem_item->configid, result); + break; + } + proc_free_dyn_mem(mem_item); + list_del(&mem_item->head); + kfree(mem_item); + tloge("log import: free ion\n"); + } while (0); + + return; +} + + +int load_app_use_configid(uint32_t configid, uint32_t cafd, + const struct tc_uuid *uuid, uint32_t size, int32_t *ret_origin) +{ + int result; + + if (!uuid) + return -EINVAL; + + result = trans_configid2memid(configid, cafd, uuid, size, ret_origin); + if (result != 0) { + tloge("trans_configid2memid failed ret = %d\n", result); + if (release_ion_srv(uuid) != 0) + tloge("release ion srv failed\n"); + } + return result; +} + + +void kill_ion_by_uuid(const struct tc_uuid *uuid) +{ + if (!uuid) { + tloge("uuid is null\n"); + return; + } + mutex_lock(&dynamic_mem_lock); + do { + struct dynamic_mem_item *mem_item = + find_memitem_by_uuid_locked(uuid); + if (!mem_item) + break; + tlogd("kill ION by UUID\n"); + release_configid_mem_locked(mem_item->configid); + } while (0); + mutex_unlock(&dynamic_mem_lock); +} + +void kill_ion_by_cafd(unsigned int cafd) +{ + struct dynamic_mem_item *item = NULL; + struct dynamic_mem_item *temp = NULL; + tlogd("kill_ion_by_cafd:\n"); + mutex_lock(&dynamic_mem_lock); + list_for_each_entry_safe(item, temp, &g_dynamic_mem_list.list, head) { + if (item->cafd == cafd) + release_configid_mem_locked(item->configid); + } + mutex_unlock(&dynamic_mem_lock); +} + +int load_image_for_ion(const struct load_img_params *params, int32_t *ret_origin) +{ + int ret = 0; + + if (!params) + return -EFAULT; + /* check need to add ionmem */ + uint32_t configid = params->mb_pack->operation.params[1].value.a; + uint32_t ion_size = params->mb_pack->operation.params[1].value.b; + int32_t check_result = (configid != 0 && ion_size != 0); + + tloge("check load result=%d, cfgid=%d, ion_size=%d, uuid=%x\n", + check_result, configid, ion_size, params->uuid_return->time_low); + if (check_result) { + ret = load_app_use_configid(configid, params->dev_file->dev_file_id, + params->uuid_return, ion_size, ret_origin); + if (ret != 0) { + tloge("load app use configid failed ret=%d\n", ret); + return -EFAULT; + } + } + return ret; +} + +bool is_ion_param(uint32_t param_type) +{ + if (param_type == TEEC_ION_INPUT || + param_type == TEEC_ION_SGLIST_INPUT) + return true; + return false; +} + +static void fill_sg_list(struct sg_table *ion_table, + uint32_t ion_list_num, struct sglist *tmp_sglist) +{ + uint32_t i; + struct page *page = NULL; + struct scatterlist *sg = NULL; + + for_each_sg(ion_table->sgl, sg, ion_list_num, i) { + page = sg_page(sg); + tmp_sglist->page_info[i].phys_addr = page_to_phys(page); + tmp_sglist->page_info[i].npages = sg->length / PAGE_SIZE; + } +} + +static int check_sg_list(const struct sg_table *ion_table, uint32_t ion_list_num) +{ + struct scatterlist *sg = NULL; + uint32_t i; + for_each_sg(ion_table->sgl, sg, ion_list_num, i) { + if (!sg) { + tloge("an error sg when get ion sglist \n"); + return -EFAULT; + } + } + return 0; +} + +static int get_ion_sg_list_from_fd(uint32_t ion_shared_fd, + uint32_t ion_alloc_size, phys_addr_t *sglist_table, + size_t *ion_sglist_size) +{ + struct sg_table *ion_table = NULL; + struct sglist *tmp_sglist = NULL; + uint64_t ion_id = 0; + enum SEC_SVC ion_type = 0; + uint32_t ion_list_num = 0; + uint32_t sglist_size; +#ifdef CONFIG_DMABUF_MM + if (mm_dma_heap_secmem_get_buffer(ion_shared_fd, &ion_table, &ion_id, &ion_type)) { +#else + if (secmem_get_buffer(ion_shared_fd, &ion_table, &ion_id, &ion_type)) { +#endif + tloge("get ion table failed. \n"); + return -EFAULT; + } + + if (ion_type != SEC_DRM_TEE) { + if (ion_table->nents <= 0 || ion_table->nents > MAX_ION_NENTS) + return -EFAULT; + ion_list_num = (uint32_t)(ion_table->nents & INT_MAX); + if (check_sg_list(ion_table, ion_list_num) != 0) + return -EFAULT; + } + /* ion_list_num is less than 1024, so sglist_size won't flow */ + sglist_size = sizeof(struct ion_page_info) * ion_list_num + sizeof(*tmp_sglist); + tmp_sglist = (struct sglist *)mailbox_alloc(sglist_size, MB_FLAG_ZERO); + if (!tmp_sglist) { + tloge("sglist mem alloc failed\n"); + return -ENOMEM; + } + tmp_sglist->sglist_size = (uint64_t)sglist_size; + tmp_sglist->ion_size = (uint64_t)ion_alloc_size; + tmp_sglist->info_length = (uint64_t)ion_list_num; + if (ion_type != SEC_DRM_TEE) + fill_sg_list(ion_table, ion_list_num, tmp_sglist); + else + tmp_sglist->ion_id = ion_id; + + *sglist_table = mailbox_virt_to_phys((uintptr_t)tmp_sglist); + *ion_sglist_size = sglist_size; + return 0; +} + +int alloc_for_ion_sglist(const struct tc_call_params *call_params, + struct tc_op_params *op_params, uint8_t kernel_params, + uint32_t param_type, unsigned int index) +{ + struct tc_ns_operation *operation = NULL; + size_t ion_sglist_size = 0; + phys_addr_t ion_sglist_addr = 0x0; + union tc_ns_client_param *client_param = NULL; + unsigned int ion_shared_fd = 0; + unsigned int ion_alloc_size; + uint64_t a_addr, b_addr; + + /* this never happens */ + if (index >= TEE_PARAM_NUM || !call_params || !op_params) + return -EINVAL; + + operation = &op_params->mb_pack->operation; + client_param = &(call_params->context->params[index]); + a_addr = client_param->value.a_addr | + ((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM); + b_addr = client_param->value.b_addr | + ((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM); + + if (read_from_client(&operation->params[index].value.a, + sizeof(operation->params[index].value.a), + (void *)(uintptr_t)a_addr, + sizeof(operation->params[index].value.a), kernel_params)) { + tloge("valuea copy failed\n"); + return -EFAULT; + } + if (read_from_client(&operation->params[index].value.b, + sizeof(operation->params[index].value.b), + (void *)(uintptr_t)b_addr, + sizeof(operation->params[index].value.b), kernel_params)) { + tloge("valueb copy failed\n"); + return -EFAULT; + } + ion_shared_fd = operation->params[index].value.a; + ion_alloc_size = operation->params[index].value.b; + + if(get_ion_sg_list_from_fd(ion_shared_fd, ion_alloc_size, + &ion_sglist_addr, &ion_sglist_size)) { + tloge("get ion sglist failed, fd=%u\n", ion_shared_fd); + return -EFAULT; + } + op_params->local_tmpbuf[index].temp_buffer = phys_to_virt(ion_sglist_addr); + op_params->local_tmpbuf[index].size = ion_sglist_size; + + operation->params[index].memref.buffer = (unsigned int)ion_sglist_addr; + operation->buffer_h_addr[index] = + (uint64_t)ion_sglist_addr >> ADDR_TRANS_NUM; + operation->params[index].memref.size = (unsigned int)ion_sglist_size; + op_params->trans_paramtype[index] = param_type; + + return 0; +} + +static int transfer_ion_params(struct tc_ns_operation *operation, + union tc_ns_client_param *client_param, uint8_t kernel_params, + unsigned int index) +{ + uint64_t a_addr = client_param->value.a_addr | + ((uint64_t)client_param->value.a_h_addr << ADDR_TRANS_NUM); + uint64_t b_addr = client_param->value.b_addr | + ((uint64_t)client_param->value.b_h_addr << ADDR_TRANS_NUM); + + if (read_from_client(&operation->params[index].value.a, + sizeof(operation->params[index].value.a), + (void *)(uintptr_t)a_addr, + sizeof(operation->params[index].value.a), kernel_params)) { + tloge("value.a_addr copy failed\n"); + return -EFAULT; + } + + if (read_from_client(&operation->params[index].value.b, + sizeof(operation->params[index].value.b), + (void *)(uintptr_t)b_addr, + sizeof(operation->params[index].value.b), kernel_params)) { + tloge("value.b_addr copy failed\n"); + return -EFAULT; + } + + return 0; +} + +int alloc_for_ion(const struct tc_call_params *call_params, + struct tc_op_params *op_params, uint8_t kernel_params, + uint32_t param_type, unsigned int index) +{ + struct tc_ns_operation *operation = NULL; + size_t drm_ion_size = 0; + phys_addr_t drm_ion_phys = 0x0; + struct dma_buf *drm_dma_buf = NULL; + union tc_ns_client_param *client_param = NULL; + unsigned int ion_shared_fd = 0; + int ret = 0; + + /* this never happens */ + if (index >= TEE_PARAM_NUM || !call_params || !op_params) + return -EINVAL; + + operation = &op_params->mb_pack->operation; + client_param = &(call_params->context->params[index]); + if (transfer_ion_params(operation, client_param, kernel_params, index)) + return -EFAULT; + + ion_shared_fd = operation->params[index].value.a; + drm_dma_buf = dma_buf_get(ion_shared_fd); + if (IS_ERR_OR_NULL(drm_dma_buf)) { + tloge("drm dma buf is err, ret = %d fd = %u\n", ret, ion_shared_fd); + return -EFAULT; + } +#ifdef CONFIG_DMABUF_MM + ret = mm_dma_heap_secmem_get_phys(drm_dma_buf, &drm_ion_phys, &drm_ion_size); +#else + ret = ion_secmem_get_phys(drm_dma_buf, &drm_ion_phys, &drm_ion_size); +#endif + if (ret != 0) { + tloge("in %s err:ret=%d fd=%u\n", __func__, ret, ion_shared_fd); + dma_buf_put(drm_dma_buf); + return -EFAULT; + } + + if (drm_ion_size > operation->params[index].value.b) + drm_ion_size = operation->params[index].value.b; + operation->params[index].value.a = (unsigned int)drm_ion_phys; + operation->params[index].value.b = (unsigned int)drm_ion_size; + op_params->trans_paramtype[index] = param_type; + dma_buf_put(drm_dma_buf); + + return ret; +} \ No newline at end of file diff --git a/tzdriver/ion/dynamic_ion_mem.h b/tzdriver/ion/dynamic_ion_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..8ae69300a552172fbe0c8a310810a5e1d5556751 --- /dev/null +++ b/tzdriver/ion/dynamic_ion_mem.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * Description: dynamic ion memory function declaration. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef DYNAMIC_MMEM_H +#define DYNAMIC_MMEM_H + +#include +#include +#include "teek_ns_client.h" + +#ifdef CONFIG_DYNAMIC_ION +#ifdef CONFIG_DMABUF_MM +#include +#else +#include +#endif +#endif + +struct sg_memory { + int dyn_shared_fd; + struct sg_table *dyn_sg_table; + struct dma_buf *dyn_dma_buf; + phys_addr_t ion_phys_addr; + size_t len; + void *ion_virt_addr; +}; + +struct dynamic_mem_item { + struct list_head head; + uint32_t configid; + uint32_t size; + struct sg_memory memory; + uint32_t cafd; + struct tc_uuid uuid; + uint32_t ddr_sec_region; +}; + +struct dynamic_mem_config { + struct tc_uuid uuid; + uint32_t ddr_sec_region; +}; + +#define MAX_ION_NENTS 1024 +typedef struct ion_page_info { + phys_addr_t phys_addr; + uint32_t npages; +}tz_page_info; + +typedef struct sglist { + uint64_t sglist_size; + uint64_t ion_size; + uint64_t ion_id; + uint64_t info_length; + struct ion_page_info page_info[0]; +}tz_sg_list; + +#ifdef CONFIG_DYNAMIC_ION + +bool is_ion_param(uint32_t param_type); +int init_dynamic_mem(void); +int load_app_use_configid(uint32_t configid, uint32_t cafd, + const struct tc_uuid *uuid, uint32_t size, int32_t *ret_origin); +void kill_ion_by_cafd(unsigned int cafd); +void kill_ion_by_uuid(const struct tc_uuid *uuid); +int load_image_for_ion(const struct load_img_params *params, int32_t *ret_origin); +int alloc_for_ion_sglist(const struct tc_call_params *call_params, + struct tc_op_params *op_params, uint8_t kernel_params, + uint32_t param_type, unsigned int index); +int alloc_for_ion(const struct tc_call_params *call_params, + struct tc_op_params *op_params, uint8_t kernel_params, + uint32_t param_type, unsigned int index); +#else +static inline bool is_ion_param(uint32_t param_type) +{ + (void)param_type; + return false; +} + +static inline int load_image_for_ion(const struct load_img_params *params, int32_t *ret_origin) +{ + (void)params; + (void)ret_origin; + return 0; +} + +static inline int init_dynamic_mem(void) +{ + return 0; +} + +static inline int load_app_use_configid(uint32_t configid, uint32_t cafd, + const struct tc_uuid *uuid, uint32_t size) +{ + (void)configid; + (void)cafd; + (void)uuid; + (void)size; + return 0; +} + +static inline void kill_ion_by_cafd(unsigned int cafd) +{ + (void)cafd; + return; +} + +static inline void kill_ion_by_uuid(const struct tc_uuid *uuid) +{ + (void)uuid; + return; +} + +static inline int alloc_for_ion_sglist(const struct tc_call_params *call_params, + struct tc_op_params *op_params, uint8_t kernel_params, + uint32_t param_type, unsigned int index) +{ + (void)call_params; + (void)op_params; + (void)kernel_params; + (void)param_type; + (void)index; + tloge("not support seg and related feature!\n"); + return -1; +} + +static inline int alloc_for_ion(const struct tc_call_params *call_params, + struct tc_op_params *op_params, uint8_t kernel_params, + uint32_t param_type, unsigned int index) +{ + (void)call_params; + (void)op_params; + (void)kernel_params; + (void)param_type; + (void)index; + tloge("not support ion and related feature!\n"); + return -1; +} +#endif +#endif \ No newline at end of file diff --git a/tzdriver/ion/dynamic_ion_uuid.h b/tzdriver/ion/dynamic_ion_uuid.h new file mode 100644 index 0000000000000000000000000000000000000000..0d99c8b76b817ffbea35a3b1a0108c91a7163415 --- /dev/null +++ b/tzdriver/ion/dynamic_ion_uuid.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * Description: dynamic ion uuid declaration. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef DYNAMIC_ION_UUID_H +#define DYNAMIC_ION_UUID_H + +#ifdef DEF_ENG +#define TEE_SERVICE_UT \ +{ \ + 0x03030303, \ + 0x0303, \ + 0x0303, \ + { \ + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 \ + }\ +} + +#define TEE_SERVICE_TEST_DYNION \ +{ \ + 0x7f313b2a, \ + 0x68b9, \ + 0x4e92, \ + { \ + 0xac, 0xf9, 0x13, 0x3e, 0xbb, 0x54, 0xeb, 0x56 \ + } \ +} +#endif + +#define TEE_SECIDENTIFICATION1 \ +{ \ + 0x8780dda1, \ + 0xa49e, \ + 0x45f4, \ + { \ + 0x96, 0x97, 0xc7, 0xed, 0x9e, 0x38, 0x5e, 0x83 \ + } \ +} + +#define TEE_SECIDENTIFICATION3 \ +{ \ + 0x335129cd, \ + 0x41fa, \ + 0x4b53, \ + { \ + 0x97, 0x97, 0x5c, 0xcb, 0x20, 0x2a, 0x52, 0xd4 \ + } \ +} + +#define TEE_SERVICE_AI \ +{ \ + 0xf4a8816d, \ + 0xb6fb, \ + 0x4d4f, \ + { \ + 0xa2, 0xb9, 0x7d, 0xae, 0x57, 0x33, 0x13, 0xc0 \ + } \ +} + +#define TEE_SERVICE_AI_TINY \ +{ \ + 0xc123c643, \ + 0x5b5b, \ + 0x4c9f, \ + { \ + 0x90, 0x98, 0xbb, 0x09, 0x56, 0x4d, 0x6e, 0xda \ + } \ +} + +#define TEE_SERVICE_VCODEC \ +{ \ + 0x528822b7, \ + 0xfc78, \ + 0x466b, \ + { \ + 0xb5, 0x7e, 0x62, 0x09, 0x3d, 0x60, 0x34, 0xa7 \ + } \ +} +#endif \ No newline at end of file diff --git a/tzdriver/ion/generic/declare_static_ion.c b/tzdriver/ion/generic/declare_static_ion.c new file mode 100644 index 0000000000000000000000000000000000000000..3a9f4cdfbc6cf672dc98b808d6f6777561cb1d9a --- /dev/null +++ b/tzdriver/ion/generic/declare_static_ion.c @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * Description: get and set static mem info. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "declare_static_ion.h" +#include +#include +#include "tc_ns_log.h" + +static u64 g_ion_mem_addr; +static u64 g_ion_mem_size; + +static int supersonic_reserve_tee_mem(const struct reserved_mem *rmem) +{ + if (rmem) { + g_ion_mem_addr = rmem->base; + g_ion_mem_size = rmem->size; + } else { + tloge("rmem is NULL\n"); + } + + return 0; +} + +RESERVEDMEM_OF_DECLARE(supersonic, "platform-supersonic", + supersonic_reserve_tee_mem); + +static u64 g_secfacedetect_mem_addr; +static u64 g_secfacedetect_mem_size; + +static int secfacedetect_reserve_tee_mem(const struct reserved_mem *rmem) +{ + if (rmem) { + g_secfacedetect_mem_addr = rmem->base; + g_secfacedetect_mem_size = rmem->size; + } else { + tloge("secfacedetect_reserve_tee_mem mem is NULL\n"); + } + return 0; +} +RESERVEDMEM_OF_DECLARE(secfacedetect, "platform-secfacedetect", + secfacedetect_reserve_tee_mem); + +static u64 g_pt_addr = 0; +static u64 g_pt_size = 0; + +static int reserve_pt_mem(const struct reserved_mem *rmem) +{ + if (rmem) { + g_pt_size = rmem->size; + g_pt_addr = rmem->base; + tloge("reserve pt mem is not NULL\n"); + } else { + tloge("reserve pt mem is NULL\n"); + } + return 0; +} + +RESERVEDMEM_OF_DECLARE(pagetable, "platform-ai-pagetable", + reserve_pt_mem); + +static u64 g_pp_addr = 0; +static u64 g_pp_size = 0; + +static int reserve_pp_mem(const struct reserved_mem *rmem) +{ + if (rmem) { + g_pp_addr = rmem->base; + g_pp_size = rmem->size; + } else { + tloge("reserve pp mem is NULL\n"); + } + return 0; +} + +RESERVEDMEM_OF_DECLARE(ai_running, "platform-ai-running", + reserve_pp_mem); + +static u64 g_voiceid_addr = 0; +static u64 g_voiceid_size = 0; +static int voiceid_reserve_tee_mem(const struct reserved_mem *rmem) +{ + if (rmem) { + g_voiceid_addr = rmem->base; + g_voiceid_size = rmem->size; + } else { + tloge("voiceid reserve tee mem is NULL\n"); + } + return 0; +} +RESERVEDMEM_OF_DECLARE(voiceid, "platform-voiceid", + voiceid_reserve_tee_mem); + +static u64 g_secos_ex_addr; +static u64 g_secos_ex_size; +static int secos_reserve_tee_mem(const struct reserved_mem *rmem) +{ + if (rmem) { + g_secos_ex_addr = rmem->base; + g_secos_ex_size = rmem->size; + } else { + tloge("secos reserve tee mem is NULL\n"); + } + return 0; +} +RESERVEDMEM_OF_DECLARE(secos_ex, "platform-secos-ex", + secos_reserve_tee_mem); + +static u64 g_ion_ex_mem_addr; +static u64 g_ion_ex_mem_size; +static int supersonic_ex_reserve_tee_mem(const struct reserved_mem *rmem) +{ + if (rmem) { + g_ion_ex_mem_addr = rmem->base; + g_ion_ex_mem_size = rmem->size; + } else { + tloge("rmem is NULL\n"); + } + return 0; +} +RESERVEDMEM_OF_DECLARE(supersonic_ex, "platform-supersonic-ex", + supersonic_ex_reserve_tee_mem); + +static void set_mem_tag(struct register_ion_mem_tag *memtag, + u64 addr, u64 size, uint32_t tag, uint32_t *pos) +{ + memtag->memaddr[*pos] = addr; + memtag->memsize[*pos] = size; + memtag->memtag[*pos] = tag; + (*pos)++; +} + +void set_ion_mem_info(struct register_ion_mem_tag *memtag) +{ + uint32_t pos = 0; + if(!memtag) { + tloge("invalid memtag\n"); + return; + } + + tlogi("ion mem static reserved for tee face=%d, finger=%d,voiceid=%d," + "secos=%d,finger-ex=%d, pt_size= %d,pp_size=%d\n", + (uint32_t)g_secfacedetect_mem_size, (uint32_t)g_ion_mem_size, + (uint32_t)g_voiceid_size, (uint32_t)g_secos_ex_size, + (uint32_t)g_ion_ex_mem_size, (uint32_t)g_pt_size, + (uint32_t)g_pp_size); + + if (g_ion_mem_addr != (u64)0 && g_ion_mem_size != (u64)0) + set_mem_tag(memtag,g_ion_mem_addr, g_ion_mem_size, PP_MEM_TAG, &pos); + if (g_secfacedetect_mem_addr != (u64)0 && g_secfacedetect_mem_size != (u64)0) + set_mem_tag(memtag,g_secfacedetect_mem_addr, g_secfacedetect_mem_size, PP_MEM_TAG, &pos); + if (g_voiceid_addr != (u64)0 && g_voiceid_size != (u64)0) + set_mem_tag(memtag, g_voiceid_addr, g_voiceid_size, PP_MEM_TAG, &pos); + if (g_secos_ex_addr != (u64)0 && g_secos_ex_size != (u64)0) + set_mem_tag(memtag, g_secos_ex_addr, g_secos_ex_size, PP_MEM_TAG, &pos); + if (g_pt_addr != (u64)0 && g_pt_size != (u64)0) + set_mem_tag(memtag, g_pt_addr, g_pt_size, PT_MEM_TAG, &pos); + if (g_pp_addr != (u64)0 && g_pp_size != (u64)0) + set_mem_tag(memtag, g_pp_addr, g_pp_size, PRI_PP_MEM_TAG, &pos); + if (g_ion_ex_mem_addr != (u64)0 && g_ion_ex_mem_size != (u64)0) + set_mem_tag(memtag, g_ion_ex_mem_addr, g_ion_ex_mem_size, PP_MEM_TAG, &pos); + /* here pos max is 7, memaddr[] has 10 positions, just 3 free */ + memtag->size = pos; + return; +} \ No newline at end of file diff --git a/tzdriver/ion/mplat/declare_static_ion.c b/tzdriver/ion/mplat/declare_static_ion.c new file mode 100644 index 0000000000000000000000000000000000000000..1239c8be9189ff6b3106d30cb98c470438849fa3 --- /dev/null +++ b/tzdriver/ion/mplat/declare_static_ion.c @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * Description: get and set static mem info. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "declare_static_ion.h" +#include +#include +#include "tc_ns_log.h" + +static u64 g_secos_ex_addr; +static u64 g_secos_ex_size; +static int secos_reserve_tee_mem(const struct reserved_mem *rmem) +{ + if (rmem) { + g_secos_ex_addr = rmem->base; + g_secos_ex_size = rmem->size; + } else { + tloge("secos reserve tee mem is NULL\n"); + } + return 0; +} +RESERVEDMEM_OF_DECLARE(secos_ex, "mediatek,tee_os_reserved_memory", + secos_reserve_tee_mem); + +void set_ion_mem_info(struct register_ion_mem_tag *memtag) +{ + uint32_t pos = 0; + if(!memtag) { + tloge("invalid memtag\n"); + return; + } + + tlogi("ion mem static reserved for tee secos=0x%x\n", (uint32_t)g_secos_ex_size); + + if (g_secos_ex_addr != (u64)0 && g_secos_ex_size != (u64)0) { + memtag->memaddr[pos] = g_secos_ex_addr; + memtag->memsize[pos] = g_secos_ex_size; + memtag->memtag[pos] = PP_MEM_TAG; + pos++; + } + memtag->size = pos; + return; +} \ No newline at end of file diff --git a/tzdriver/ion/static_ion_mem.c b/tzdriver/ion/static_ion_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..d9e8f141f541106b6ff5f2b604655e34e0cfe06a --- /dev/null +++ b/tzdriver/ion/static_ion_mem.c @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * Description: memory init, register for mailbox pool. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "static_ion_mem.h" +#include +#include +#include +#include +#include +#ifdef DEF_ENG +#include +#include +#endif +#include "smc_smp.h" +#include "teek_ns_client.h" +#include "mailbox_mempool.h" +#include "tc_ns_log.h" +#include "declare_static_ion.h" + +/* send the ion static memory to tee */ +int tc_ns_register_ion_mem(void) +{ + struct tc_ns_smc_cmd smc_cmd = {{0}, 0}; + int ret = 0; + struct mb_cmd_pack *mb_pack = NULL; + struct register_ion_mem_tag *memtag = NULL; + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) { + tloge("mailbox alloc failed\n"); + return -ENOMEM; + } + memtag = mailbox_alloc(sizeof(*memtag), 0); + if (!memtag) { + mailbox_free(mb_pack); + return -ENOMEM; + } + set_ion_mem_info(memtag); + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.cmd_id = GLOBAL_CMD_ID_REGISTER_ION_MEM; + + mb_pack->operation.paramtypes = TEE_PARAM_TYPE_MEMREF_INPUT; + mb_pack->operation.params[0].memref.buffer = + mailbox_virt_to_phys((uintptr_t)(void *)memtag); + mb_pack->operation.buffer_h_addr[0] = + (uint64_t)mailbox_virt_to_phys((uintptr_t)(void *)memtag) >> ADDR_TRANS_NUM; + mb_pack->operation.params[0].memref.size = sizeof(*memtag); + + smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; + + if (tc_ns_smc(&smc_cmd)) { + ret = -EPERM; + tloge("send ion mem info failed\n"); + } + mailbox_free(mb_pack); + mailbox_free(memtag); + + return ret; +} \ No newline at end of file diff --git a/tzdriver/ion/static_ion_mem.h b/tzdriver/ion/static_ion_mem.h new file mode 100644 index 0000000000000000000000000000000000000000..a8d0690c339208b045d08d137d44af39567bc6e0 --- /dev/null +++ b/tzdriver/ion/static_ion_mem.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * Description: memory init, register for mailbox pool. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef STATIC_ION_MEM_H +#define STATIC_ION_MEM_H +#include + +#define ION_MEM_MAX_SIZE 10 + +struct register_ion_mem_tag { + uint32_t size; + uint64_t memaddr[ION_MEM_MAX_SIZE]; + uint32_t memsize[ION_MEM_MAX_SIZE]; + uint32_t memtag[ION_MEM_MAX_SIZE]; +}; + +enum static_mem_tag { + MEM_TAG_MIN = 0, + PP_MEM_TAG = 1, + PRI_PP_MEM_TAG = 2, + PT_MEM_TAG = 3, + MEM_TAG_MAX, +}; + +#ifdef CONFIG_STATIC_ION +int tc_ns_register_ion_mem(void); +#else +static inline int tc_ns_register_ion_mem(void) +{ + return 0; +} +#endif + +#endif \ No newline at end of file diff --git a/tzdriver/ko_adapt.c b/tzdriver/ko_adapt.c new file mode 100644 index 0000000000000000000000000000000000000000..5800394f7da0ecffab07ee094b4a44c17370fb04 --- /dev/null +++ b/tzdriver/ko_adapt.c @@ -0,0 +1,165 @@ +/* + * ko_adapt.c + * + * function for find symbols not exported + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "ko_adapt.h" +#include +#include +#include +#include +#include +#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE) +#include +#endif +#include +#include +#include +#include "tc_ns_log.h" + +typedef const struct cred *(get_task_cred_func)(struct task_struct *); +typedef void (kthread_bind_mask_func)(struct task_struct *, const struct cpumask *); +typedef struct page *(alloc_pages_func)(gfp_t gfp_mask, unsigned int order); +typedef struct workqueue_attrs *(alloc_workqueue_attrs_func)(gfp_t gfp_mask); +typedef void (free_workqueue_attrs_func)(struct workqueue_attrs *attrs); + +const struct cred *koadpt_get_task_cred(struct task_struct *task) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) + return get_task_cred(task); +#else + static get_task_cred_func *get_task_cred_pt = NULL; + + if (!task) + return NULL; + + if (!get_task_cred_pt) { + get_task_cred_pt = (get_task_cred_func *) + (uintptr_t)kallsyms_lookup_name("get_task_cred"); + if (IS_ERR_OR_NULL(get_task_cred_pt)) { + tloge("fail to find symbol get task cred\n"); + return NULL; + } + } + return get_task_cred_pt(task); +#endif +} + +void koadpt_kthread_bind_mask(struct task_struct *task, + const struct cpumask *mask) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) + (void)set_cpus_allowed_ptr(task, mask); +#else + static kthread_bind_mask_func *kthread_bind_mask_pt = NULL; + + if (!task || !mask) + return; + + if (!kthread_bind_mask_pt) { + kthread_bind_mask_pt = (kthread_bind_mask_func *) + (uintptr_t)kallsyms_lookup_name("kthread_bind_mask"); + if (IS_ERR_OR_NULL(kthread_bind_mask_pt)) { + tloge("fail to find symbol kthread bind mask\n"); + return; + } + } + kthread_bind_mask_pt(task, mask); +#endif +} + +struct page *koadpt_alloc_pages(gfp_t gfp_mask, unsigned int order) +{ +#ifdef CONFIG_NUMA +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) + return alloc_pages(gfp_mask, order); +#else + static alloc_pages_func *alloc_pages_pt = NULL; + + if (!alloc_pages_pt) { + alloc_pages_pt = (alloc_pages_func *) + (uintptr_t)kallsyms_lookup_name("alloc_pages_current"); + if (IS_ERR_OR_NULL(alloc_pages_pt)) { + tloge("fail to find symbol alloc pages current\n"); + return NULL; + } + } + return alloc_pages_pt(gfp_mask, order); +#endif +#else + return alloc_pages(gfp_mask, order); +#endif +} + +struct workqueue_attrs *koadpt_alloc_workqueue_attrs(gfp_t gfp_mask) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) + struct workqueue_attrs *attrs; + (void)gfp_mask; + + attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); + if (!attrs) { + tloge("alloc workqueue attr fail\n"); + return NULL; + } + + if (alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL) == false) { + tloge("alloc cpumask var fail\n"); + kfree(attrs); + return NULL; + } + + cpumask_copy(attrs->cpumask, cpu_possible_mask); + + return attrs; +#else + static alloc_workqueue_attrs_func *alloc_workqueue_attrs_pt = NULL; + + if (!alloc_workqueue_attrs_pt) { + alloc_workqueue_attrs_pt = (alloc_workqueue_attrs_func *) + (uintptr_t)kallsyms_lookup_name("alloc_workqueue_attrs"); + if (IS_ERR_OR_NULL(alloc_workqueue_attrs_pt)) { + tloge("fail to find symbol alloc workqueue attrs\n"); + return NULL; + } + } + return alloc_workqueue_attrs_pt(gfp_mask); +#endif +} + +void koadpt_free_workqueue_attrs(struct workqueue_attrs *attrs) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) + if (!attrs) + return; + + free_cpumask_var(attrs->cpumask); + kfree(attrs); +#else + static free_workqueue_attrs_func *free_workqueue_attrs_pt = NULL; + + if (!attrs) + return; + + if (!free_workqueue_attrs_pt) { + free_workqueue_attrs_pt = (free_workqueue_attrs_func *) + (uintptr_t)kallsyms_lookup_name("free_workqueue_attrs"); + if (IS_ERR_OR_NULL(free_workqueue_attrs_pt)) { + tloge("fail to find symbol free workqueue attrs\n"); + return; + } + } + free_workqueue_attrs_pt(attrs); +#endif +} \ No newline at end of file diff --git a/tzdriver/ko_adapt.h b/tzdriver/ko_adapt.h new file mode 100755 index 0000000000000000000000000000000000000000..ea1e77b0b753d1dd3e16075ede625e3a43400a26 --- /dev/null +++ b/tzdriver/ko_adapt.h @@ -0,0 +1,78 @@ +/* + * ko_adapt.h + * + * function for find symbols not exported + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef KO_ADAPT_H +#define KO_ADAPT_H + +#include +#include +#include +#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE) +#include +#endif +#include +#include +#include +#include +#include + +#ifdef CONFIG_TZDRIVER_MODULE + +const struct cred *koadpt_get_task_cred(struct task_struct *task); +void koadpt_kthread_bind_mask(struct task_struct *task, + const struct cpumask *mask); +struct page *koadpt_alloc_pages(gfp_t gfp_mask, unsigned int order); +struct workqueue_attrs *koadpt_alloc_workqueue_attrs(gfp_t gfp_mask); +void koadpt_free_workqueue_attrs(struct workqueue_attrs *attrs); + +#else + +static inline const struct cred *koadpt_get_task_cred(struct task_struct *task) +{ + return get_task_cred(task); +} + +static inline void koadpt_kthread_bind_mask(struct task_struct *task, + const struct cpumask *mask) +{ + kthread_bind_mask(task, mask); +} + +static inline struct page *koadpt_alloc_pages(gfp_t gfp_mask, unsigned int order) +{ + return alloc_pages(gfp_mask, order); +} + +static inline struct workqueue_attrs *koadpt_alloc_workqueue_attrs( + gfp_t gfp_mask) +{ +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 19, 0)) + return alloc_workqueue_attrs(gfp_mask); +#else + (void)gfp_mask; + return alloc_workqueue_attrs(); +#endif +} + +static inline void koadpt_free_workqueue_attrs(struct workqueue_attrs *attrs) +{ + return free_workqueue_attrs(attrs); +} + +#endif + +#endif diff --git a/tzdriver/tc_ns_client.h b/tzdriver/tc_ns_client.h new file mode 100755 index 0000000000000000000000000000000000000000..9f55c652466f409636eb9aeb6bf1f365111647c6 --- /dev/null +++ b/tzdriver/tc_ns_client.h @@ -0,0 +1,197 @@ +/* + * tc_ns_client.h + * + * data structure declaration for nonsecure world + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TC_NS_CLIENT_H +#define TC_NS_CLIENT_H + +#include +#include + +#define UUID_LEN 16 +#define PARAM_NUM 4 +#define ADDR_TRANS_NUM 32 + +#define teec_param_types(param0_type, param1_type, param2_type, param3_type) \ + ((param3_type) << 12 | (param2_type) << 8 | \ + (param1_type) << 4 | (param0_type)) + +#define teec_param_type_get(param_types, index) \ + (((param_types) >> ((index) << 2)) & 0x0F) + +#ifndef ZERO_SIZE_PTR +#define ZERO_SIZE_PTR ((void *)16) +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= (unsigned long)ZERO_SIZE_PTR) +#endif + +#if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE) +#define mm_sem_lock(mm) (mm)->mmap_lock +#else +#define mm_sem_lock(mm) (mm)->mmap_sem +#endif + +struct tc_ns_client_login { + __u32 method; + __u32 mdata; +}; + +union tc_ns_client_param { + struct { + __u32 buffer; + __u32 buffer_h_addr; + __u32 offset; + __u32 h_offset; + __u32 size_addr; + __u32 size_h_addr; + } memref; + struct { + __u32 a_addr; + __u32 a_h_addr; + __u32 b_addr; + __u32 b_h_addr; + } value; +}; + +struct tc_ns_client_return { + int code; + __u32 origin; +}; + +struct tc_ns_client_context { + unsigned char uuid[UUID_LEN]; + __u32 session_id; + __u32 cmd_id; + struct tc_ns_client_return returns; + struct tc_ns_client_login login; + union tc_ns_client_param params[PARAM_NUM]; + __u32 param_types; + __u8 started; + __u32 calling_pid; + unsigned int file_size; + union { + char *file_buffer; + struct { + uint32_t file_addr; + uint32_t file_h_addr; + } memref; + }; +}; + +struct tc_ns_client_time { + uint32_t seconds; + uint32_t millis; +}; + +enum secfile_type_t { + LOAD_TA = 0, + LOAD_SERVICE, + LOAD_LIB, + LOAD_DYNAMIC_DRV, + LOAD_PATCH, + LOAD_TYPE_MAX, +}; + +struct sec_file_info { + enum secfile_type_t secfile_type; + uint32_t file_size; + int32_t sec_load_err; +}; + +struct load_secfile_ioctl_struct { + struct sec_file_info sec_file_info; + unsigned char uuid[UUID_LEN]; + union { + char *file_buffer; + struct { + uint32_t file_addr; + uint32_t file_h_addr; + } memref; + }; +}__attribute__((packed)); + +struct agent_ioctl_args { + uint32_t id; + uint32_t buffer_size; + union { + void *buffer; + unsigned long long addr; + }; +}; + +struct tc_ns_client_crl { + union { + uint8_t *buffer; + struct { + uint32_t buffer_addr; + uint32_t buffer_h_addr; + } memref; + }; + uint32_t size; +}; + +#ifdef CONFIG_LOG_POOL_ENABLE +struct tc_ns_log_pool { + uint64_t addr; + uint64_t size; +}; +#endif + +#define MAX_SHA_256_SZ 32 + +#define TC_NS_CLIENT_IOCTL_SES_OPEN_REQ \ + _IOW(TC_NS_CLIENT_IOC_MAGIC, 1, struct tc_ns_client_context) +#define TC_NS_CLIENT_IOCTL_SES_CLOSE_REQ \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 2, struct tc_ns_client_context) +#define TC_NS_CLIENT_IOCTL_SEND_CMD_REQ \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 3, struct tc_ns_client_context) +#define TC_NS_CLIENT_IOCTL_SHRD_MEM_RELEASE \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 4, unsigned int) +#define TC_NS_CLIENT_IOCTL_WAIT_EVENT \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 5, unsigned int) +#define TC_NS_CLIENT_IOCTL_SEND_EVENT_RESPONSE \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 6, unsigned int) +#define TC_NS_CLIENT_IOCTL_REGISTER_AGENT \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 7, struct agent_ioctl_args) +#define TC_NS_CLIENT_IOCTL_UNREGISTER_AGENT \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 8, unsigned int) +#define TC_NS_CLIENT_IOCTL_LOAD_APP_REQ \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 9, struct load_secfile_ioctl_struct) +#define TC_NS_CLIENT_IOCTL_NEED_LOAD_APP \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 10, struct tc_ns_client_context) +#define TC_NS_CLIENT_IOCTL_ALLOC_EXCEPTING_MEM \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 12, unsigned int) +#define TC_NS_CLIENT_IOCTL_CANCEL_CMD_REQ \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 13, struct tc_ns_client_context) +#define TC_NS_CLIENT_IOCTL_LOGIN \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 14, int) +#define TC_NS_CLIENT_IOCTL_TUI_EVENT \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 16, int) +#define TC_NS_CLIENT_IOCTL_SYC_SYS_TIME \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 17, struct tc_ns_client_time) +#define TC_NS_CLIENT_IOCTL_SET_NATIVECA_IDENTITY \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 18, int) +#define TC_NS_CLIENT_IOCTL_LOAD_TTF_FILE_AND_NOTCH_HEIGHT \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 19, unsigned int) +#define TC_NS_CLIENT_IOCTL_LATEINIT \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 20, unsigned int) +#define TC_NS_CLIENT_IOCTL_GET_TEE_VERSION \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 21, unsigned int) +#define TC_NS_CLIENT_IOCTL_UPDATE_TA_CRL \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 22, struct tc_ns_client_crl) +#ifdef CONFIG_LOG_POOL_ENABLE +#define TC_NS_CLIENT_IOCTL_GET_LOG_POOL \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 23, struct tc_ns_log_pool) +#endif +#endif diff --git a/tzdriver/tc_ns_log.h b/tzdriver/tc_ns_log.h new file mode 100755 index 0000000000000000000000000000000000000000..b523a793365825debddc3c4e96ba2d338ea759f0 --- /dev/null +++ b/tzdriver/tc_ns_log.h @@ -0,0 +1,67 @@ +/* + * tc_ns_log.h + * + * log func declaration + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TC_NS_LOG_H +#define TC_NS_LOG_H + +#include +#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE) +#include +#endif +#include +enum { + TZ_DEBUG_VERBOSE = 0, + TZ_DEBUG_DEBUG, + TZ_DEBUG_INFO, + TZ_DEBUG_WARN, + TZ_DEBUG_ERROR, +}; +#define MOD_TEE "tzdriver" + +#define TEE_LOG_MASK TZ_DEBUG_INFO + +#define tlogv(fmt, args...) \ +do { \ + if (TZ_DEBUG_VERBOSE >= TEE_LOG_MASK) \ + pr_info("[%s] (%i, %s)%s: " fmt, MOD_TEE, current->pid, current->comm, __func__, ## args); \ +} while (0) + + +#define tlogd(fmt, args...) \ +do { \ + if (TZ_DEBUG_DEBUG >= TEE_LOG_MASK) \ + pr_info("[%s] (%i, %s)%s: " fmt, MOD_TEE, current->pid, current->comm, __func__, ## args); \ +} while (0) + + +#define tlogi(fmt, args...) \ +do { \ + if (TZ_DEBUG_INFO >= TEE_LOG_MASK) \ + pr_info("[%s] (%i, %s)%s: " fmt, MOD_TEE, current->pid, current->comm, __func__, ## args); \ +} while (0) + + +#define tlogw(fmt, args...) \ +do { \ + if (TZ_DEBUG_WARN >= TEE_LOG_MASK) \ + pr_warn("[%s] (%i, %s)%s: " fmt, MOD_TEE, current->pid, current->comm, __func__, ## args); \ +} while (0) + + +#define tloge(fmt, args...) \ + pr_err("[%s] (%i, %s)%s: " fmt, MOD_TEE, current->pid, current->comm, __func__, ## args) + +#endif diff --git a/tzdriver/teek_client_api.h b/tzdriver/teek_client_api.h new file mode 100755 index 0000000000000000000000000000000000000000..946079b47494a6bb6d1217e918997ebe2d1f476a --- /dev/null +++ b/tzdriver/teek_client_api.h @@ -0,0 +1,183 @@ +/* + * teek_client_api.h + * + * function declaration for libteec interface for kernel CA. + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef TEEK_CLIENT_API_H +#define TEEK_CLIENT_API_H +#include "teek_ns_client.h" +#include "teek_client_type.h" + +#define TEEC_PARAM_TYPES(param0_type, param1_type, param2_type, param3_type) \ + ((param3_type) << 12 | (param2_type) << 8 | \ + (param1_type) << 4 | (param0_type)) + +#define TEEC_PARAM_TYPE_GET(param_types, index) \ + (((param_types) >> ((index) << 2)) & 0x0F) + +#define TEEC_VALUE_UNDEF 0xFFFFFFFF + +int TC_NS_RegisterServiceCallbackFunc(const char *uuid, void *func, const void *private_data); + +#ifdef CONFIG_KERNEL_CLIENT + +/* + * for history reason, we supply two set interface + * first set is uncapitalized and satisfies kernel code rule + * second set is capitalized for compatibility + */ +int teek_is_agent_alive(unsigned int agent_id); + +uint32_t teek_initialize_context(const char *name, + struct teec_context *context); + +void teek_finalize_context(struct teec_context *context); + +uint32_t teek_open_session(struct teec_context *context, + struct teec_session *session, + const struct teec_uuid *destination, + uint32_t connection_method, + const void *connection_data, + const struct teec_operation *operation, + uint32_t *return_origin); + +void teek_close_session(struct teec_session *session); + +uint32_t teek_send_secfile(struct teec_session *session, + const char *file_buffer, unsigned int file_size); + +TEEC_Result TEEK_SendSecfile(TEEC_Session *session, + const char *file_buffer, unsigned int file_size); + +uint32_t teek_invoke_command(struct teec_session *session, + uint32_t cmd_id, struct teec_operation *operation, + uint32_t *return_origin); + +uint32_t teek_register_shared_memory(struct teec_context *context, + struct teec_sharedmemory *sharedmem); + +uint32_t teek_allocate_shared_memory(struct teec_context *context, + struct teec_sharedmemory *sharedmem); + +void teek_release_shared_memory(struct teec_sharedmemory *sharedmem); + +void teek_request_cancellation(struct teec_operation *operation); + +int TEEK_IsAgentAlive(unsigned int agent_id); + +TEEC_Result TEEK_InitializeContext(const char *name, TEEC_Context *context); + +void TEEK_FinalizeContext(TEEC_Context *context); + +TEEC_Result TEEK_OpenSession(TEEC_Context *context, + TEEC_Session *session, + const TEEC_UUID *destination, + uint32_t connectionMethod, + const void *connectionData, + TEEC_Operation *operation, + uint32_t *returnOrigin); + +void TEEK_CloseSession(TEEC_Session *session); + +TEEC_Result TEEK_InvokeCommand(TEEC_Session *session, + uint32_t commandID, + TEEC_Operation *operation, + uint32_t *returnOrigin); + +#else + +static inline int teek_is_agent_alive(unsigned int agent_id) +{ + return TEEC_SUCCESS; +} + +static inline int TEEK_IsAgentAlive(unsigned int agent_id) +{ + return TEEC_SUCCESS; +} + +static inline uint32_t teek_initialize_context(const char *name, + struct teec_context *context) +{ + return TEEC_SUCCESS; +} + +static inline TEEC_Result TEEK_InitializeContext(const char *name, + TEEC_Context *context) +{ + return TEEC_SUCCESS; +} + +static inline void teek_finalize_context(struct teec_context *context) +{ + (void)context; +} + +static inline void TEEK_FinalizeContext(TEEC_Context *context) +{ + (void)context; +} + +static inline uint32_t teek_open_session(struct teec_context *context, + struct teec_session *session, + const struct teec_uuid *destination, + uint32_t connection_method, + const void *connection_data, + const struct teec_operation *operation, + uint32_t *return_origin) +{ + return TEEC_SUCCESS; +} + +static inline TEEC_Result TEEK_OpenSession(TEEC_Context *context, + TEEC_Session *session, const TEEC_UUID *destination, + uint32_t connectionMethod, const void *connectionData, + TEEC_Operation *operation, uint32_t *returnOrigin) +{ + return TEEC_SUCCESS; +} + +static inline void teek_close_session(struct teec_session *session) +{ + (void)session; +} + +static inline void TEEK_CloseSession(TEEC_Session *session) +{ + (void)session; +} + +static inline uint32_t teek_invoke_command(struct teec_session *session, + uint32_t cmd_id, struct teec_operation *operation, + uint32_t *return_origin) +{ + return TEEC_SUCCESS; +} + +static inline TEEC_Result TEEK_InvokeCommand(TEEC_Session *session, + uint32_t commandID, TEEC_Operation *operation, uint32_t *returnOrigin) +{ + return TEEC_SUCCESS; +} + +static inline uint32_t teek_send_secfile(struct teec_session *session, + const char *file_buffer, unsigned int file_size) +{ + return TEEC_SUCCESS; +} + +#endif + +#endif diff --git a/tzdriver/teek_client_constants.h b/tzdriver/teek_client_constants.h new file mode 100755 index 0000000000000000000000000000000000000000..fdeffbf4a263667a46b55bec40bc44950eea87cc --- /dev/null +++ b/tzdriver/teek_client_constants.h @@ -0,0 +1,214 @@ +/* + * teek_client_constants.h + * + * macro declaration for libteec interface for kernel CA. + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef TEEK_CLIENT_CONSTANTS_H +#define TEEK_CLIENT_CONSTANTS_H + +enum global_service_cmd_id { + GLOBAL_CMD_ID_INVALID = 0x0, + GLOBAL_CMD_ID_BOOT_ACK = 0x1, + GLOBAL_CMD_ID_OPEN_SESSION = 0x2, + GLOBAL_CMD_ID_CLOSE_SESSION = 0x3, + GLOBAL_CMD_ID_LOAD_SECURE_APP = 0x4, + GLOBAL_CMD_ID_NEED_LOAD_APP = 0x5, + GLOBAL_CMD_ID_REGISTER_AGENT = 0x6, + GLOBAL_CMD_ID_UNREGISTER_AGENT = 0x7, + GLOBAL_CMD_ID_REGISTER_NOTIFY_MEMORY = 0x8, + GLOBAL_CMD_ID_UNREGISTER_NOTIFY_MEMORY = 0x9, + GLOBAL_CMD_ID_INIT_CONTENT_PATH = 0xa, + GLOBAL_CMD_ID_TERMINATE_CONTENT_PATH = 0xb, + GLOBAL_CMD_ID_ALLOC_EXCEPTION_MEM = 0xc, + GLOBAL_CMD_ID_TEE_TIME = 0xd, + GLOBAL_CMD_ID_TEE_INFO = 0xe, + GLOBAL_CMD_ID_REGISTER_LOG_MEM = 0xf, + GLOBAL_CMD_ID_KILL_TASK = 0x10, + GLOBAL_CMD_ID_TUI_EXCEPTION = 0x11, + GLOBAL_CMD_ID_ADJUST_TIME = 0x12, + GLOBAL_CMD_ID_SET_CA_HASH = 0x13, + /* set the Android's build version */ + GLOBAL_CMD_ID_SET_BUILD_VERSION = 0x14, + GLOBAL_CMD_ID_REGISTER_TTF_MEM = 0x15, + /* get session key for encrypting dialog */ + GLOBAL_CMD_ID_GET_SESSION_SECURE_PARAMS = 0x16, + GLOBAL_CMD_ID_REGISTER_MAILBOX = 0x17, + GLOBAL_CMD_ID_REGISTER_UNUSUAL_TTF_MEM = 0x18, + GLOBAL_CMD_ID_REGISTER_ION_MEM = 0x19, + GLOBAL_CMD_ID_DUMP_MEMINFO = 0x1a, + /* this cmd will be used to service no ca handle cmd */ + GLOBAL_CMD_ID_SET_SERVE_CMD = 0x1b, + GLOBAL_CMD_ID_ADD_DYNAMIC_ION = 0x1c, + GLOBAL_CMD_ID_DEL_DYNAMIC_ION = 0x1d, + GLOBAL_CMD_ID_RELEASE_ION_SRV = 0x1e, + /* this cmd for tui to get notch_size */ + GLOBAL_CMD_ID_TUI_NOTCH = 0x1f, + GLOBAL_CMD_ID_LATE_INIT = 0x20, + /* this cmd for tui to get information of foldable screen */ + GLOBAL_CMD_ID_TUI_FOLD = 0x21, + GLOBAL_CMD_ID_GET_TEE_VERSION = 0x22, +#ifdef CONFIG_CMS_SIGNATURE + GLOBAL_CMD_ID_UPDATE_TA_CRL = 0x23, +#endif + GLOBAL_CMD_ID_REGISTER_RESMEM = 0x24, + GLOBAL_CMD_ID_DUMP_SRV_SESS = 0x25, + GLOBAL_CMD_ID_TRACE_ENABLE = 0x26, +#ifdef CONFIG_LIVEPATCH_ENABLE + GLOBAL_CMD_ID_LIVEPATCH_UNLOAD = 0x27, + GLOBAL_CMD_ID_LIVEPATCH_ENABLE = 0x28, + GLOBAL_CMD_ID_LIVEPATCH_DISABLE = 0x29, + GLOBAL_CMD_ID_LIVEPATCH_QUERY = 0x2a, +#endif + GLOBAL_CMD_ID_UNKNOWN = 0x7FFFFFFE, + GLOBAL_CMD_ID_MAX = 0x7FFFFFFF +}; + +enum teec_result { + TEEC_SUCCESS = 0x0, + TEEC_ERROR_INVALID_CMD = 0x1, + TEEC_ERROR_SERVICE_NOT_EXIST = 0x2, + TEEC_ERROR_SESSION_NOT_EXIST = 0x3, + TEEC_ERROR_SESSION_MAXIMUM, + TEEC_ERROR_REGISTER_EXIST_SERVICE, + TEEC_ERROR_TAGET_DEAD_FATAL, + TEEC_ERROR_READ_DATA, + TEEC_ERROR_WRITE_DATA, + TEEC_ERROR_TRUNCATE_OBJECT, + TEEC_ERROR_SEEK_DATA, + TEEC_ERROR_RENAME_OBJECT, + TEEC_ERROR_TRUSTED_APP_LOAD_ERROR, + TEEC_ERROR_GENERIC = 0xFFFF0000, + TEEC_ERROR_ACCESS_DENIED = 0xFFFF0001, + TEEC_ERROR_CANCEL = 0xFFFF0002, + TEEC_ERROR_ACCESS_CONFLICT = 0xFFFF0003, + TEEC_ERROR_EXCESS_DATA = 0xFFFF0004, + TEEC_ERROR_BAD_FORMAT = 0xFFFF0005, + TEEC_ERROR_BAD_PARAMETERS = 0xFFFF0006, + TEEC_ERROR_BAD_STATE = 0xFFFF0007, + TEEC_ERROR_ITEM_NOT_FOUND = 0xFFFF0008, + TEEC_ERROR_NOT_IMPLEMENTED = 0xFFFF0009, + TEEC_ERROR_NOT_SUPPORTED = 0xFFFF000A, + TEEC_ERROR_NO_DATA = 0xFFFF000B, + TEEC_ERROR_OUT_OF_MEMORY = 0xFFFF000C, + TEEC_ERROR_BUSY = 0xFFFF000D, + TEEC_ERROR_COMMUNICATION = 0xFFFF000E, + TEEC_ERROR_SECURITY = 0xFFFF000F, + TEEC_ERROR_SHORT_BUFFER = 0xFFFF0010, + TEEC_PENDING = 0xFFFF2000, + TEEC_PENDING2 = 0xFFFF2001, + TEE_ERROR_TAGET_DEAD = 0xFFFF3024, + TEE_ERROR_GT_DEAD = 0xFFFF3124, + TEEC_ERROR_MAC_INVALID = 0xFFFF3071, + TEEC_CLIENT_INTR = 0xFFFF4000, + TEEC_ERROR_TUI_IN_USE = 0xFFFF7110, + TEEC_ERROR_TUI_SWITCH_CHANNAL, + TEEC_ERROR_TUI_CFG_DRIVER, + TEEC_ERROR_TUI_INVALID_EVENT, + TEEC_ERROR_TUI_POLL_EVENT, + TEEC_ERROR_TUI_CANCELED, + TEEC_ERROR_TUI_EXIT, + TEEC_ERROR_TUI_NOT_AVAILABLE, + TEEC_ERROR_SEC_FLASH_NOT_AVAILABLE, + TEEC_ERROR_CA_AUTH_FAIL = 0xFFFFCFE5, + TEE_ERROR_AUDIT_FAIL = 0xFFFF9112, + TEE_ERROR_IS_DEAD = 0xFFFFABAB, +}; + +enum TEEC_ReturnCodeOrigin { + TEEC_ORIGIN_API = 0x1, + TEEC_ORIGIN_COMMS = 0x2, + TEEC_ORIGIN_TEE = 0x3, + TEEC_ORIGIN_TRUSTED_APP = 0x4, +}; + +enum TEEC_SharedMemCtl { + TEEC_MEM_INPUT = 0x1, + TEEC_MEM_OUTPUT = 0x2, + TEEC_MEM_INOUT = 0x3, +}; + +enum TEEC_ParamType { + TEEC_NONE = 0x0, + TEEC_VALUE_INPUT = 0x01, + TEEC_VALUE_OUTPUT = 0x02, + TEEC_VALUE_INOUT = 0x03, + TEEC_MEMREF_TEMP_INPUT = 0x05, + TEEC_MEMREF_TEMP_OUTPUT = 0x06, + TEEC_MEMREF_TEMP_INOUT = 0x07, + TEEC_ION_INPUT = 0x08, + TEEC_ION_SGLIST_INPUT = 0x09, + TEEC_MEMREF_SHARED_INOUT = 0x0a, + TEEC_MEMREF_WHOLE = 0xc, + TEEC_MEMREF_PARTIAL_INPUT = 0xd, + TEEC_MEMREF_PARTIAL_OUTPUT = 0xe, + TEEC_MEMREF_PARTIAL_INOUT = 0xf +}; + +enum TEE_ParamType { + TEE_PARAM_TYPE_NONE = 0x0, + TEE_PARAM_TYPE_VALUE_INPUT = 0x1, + TEE_PARAM_TYPE_VALUE_OUTPUT = 0x2, + TEE_PARAM_TYPE_VALUE_INOUT = 0x3, + TEE_PARAM_TYPE_MEMREF_INPUT = 0x5, + TEE_PARAM_TYPE_MEMREF_OUTPUT = 0x6, + TEE_PARAM_TYPE_MEMREF_INOUT = 0x7, + TEE_PARAM_TYPE_ION_INPUT = 0x8, + TEE_PARAM_TYPE_ION_SGLIST_INPUT = 0x9, + TEE_PARAM_TYPE_MEMREF_SHARED_INOUT = 0x0a, + TEE_PARAM_TYPE_RESMEM_INPUT = 0xc, + TEE_PARAM_TYPE_RESMEM_OUTPUT = 0xd, + TEE_PARAM_TYPE_RESMEM_INOUT = 0xe +}; + +enum TEEC_LoginMethod { + TEEC_LOGIN_PUBLIC = 0x0, + TEEC_LOGIN_USER, + TEEC_LOGIN_GROUP, + TEEC_LOGIN_APPLICATION = 0x4, + TEEC_LOGIN_USER_APPLICATION = 0x5, + TEEC_LOGIN_GROUP_APPLICATION = 0x6, + TEEC_LOGIN_IDENTIFY = 0x7, + TEEK_LOGIN_IDENTIFY = 0x80000001, +}; + +/* Add event id's name in 'view_state[]' in same order */ +enum tee_event_id { + INVOKE_CMD_START, + INVOKE_CMD_END, + SMC_SEND, + SMC_DONE, + SMC_IN, + SMC_OUT, + SMC_SLEEP, + SMC_PREEMPT, + GTASK_GET_CMD, + GTASK_PUT_CMD, + GTASK_REQ_TA, + GTASK_RESP_TA, + SPI_WAKEUP, + SCHED_IN, + SCHED_OUT, + INTERRUPT_HANDLE_SPI_START, + INTERRUPT_HANDLE_SPI_REE_RESPONSE, + INTERRUPT_HANDLE_SPI_REE_MISS, + INTERRUPT_HANDLE_SPI_REE_SCHEDULED, + INTERRUPT_HANDLE_SPI_END, + INTERRUPT_HANDLE_START, + INTERRUPT_HANDLE_END, + TEE_EVENT_MAX +}; + +#define TZ_WQ_MAX_ACTIVE 1 +#endif diff --git a/tzdriver/teek_client_ext.h b/tzdriver/teek_client_ext.h new file mode 100755 index 0000000000000000000000000000000000000000..a43b594c887fdf2246b603338d1528e1bf1252ae --- /dev/null +++ b/tzdriver/teek_client_ext.h @@ -0,0 +1,27 @@ +/* + * teek_client_ext.h + * + * ext api for teek + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TEEK_CLIENT_EXT_H +#define TEEK_CLIENT_EXT_H + +#include + +/* update crl */ +#ifdef CONFIG_CMS_SIGNATURE +uint32_t teek_update_crl(uint8_t *crl, uint32_t crl_len); +#endif + +#endif \ No newline at end of file diff --git a/tzdriver/teek_client_id.h b/tzdriver/teek_client_id.h new file mode 100755 index 0000000000000000000000000000000000000000..a9d5fccef1e369ed3f5df753e6b6aa210545cc64 --- /dev/null +++ b/tzdriver/teek_client_id.h @@ -0,0 +1,139 @@ +/* + * teek_client_id.h + * + * define exported data for secboot CA + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TEE_CLIENT_ID_H +#define TEE_CLIENT_ID_H + +#define TEE_SERVICE_SECBOOT \ +{ \ + 0x08080808, \ + 0x0808, \ + 0x0808, \ + { \ + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08 \ + } \ +} + +/* e7ed1f64-4687-41da-96dc-cbe4f27c838f */ +#define TEE_SERVICE_ANTIROOT \ +{ \ + 0xE7ED1F64, \ + 0x4687, \ + 0x41DA, \ + { \ + 0x96, 0xDC, 0xCB, 0xE4, 0xF2, 0x7C, 0x83, 0x8F \ + } \ +} +/* dca5ae8a-769e-4e24-896b-7d06442c1c0e */ +#define TEE_SERVICE_SECISP \ +{ \ + 0xDCA5AE8A, \ + 0x769E, \ + 0x4E24, \ + { \ + 0x89, 0x6B, 0x7D, 0x06, 0x44, 0x2C, 0x1C, 0x0E \ + } \ +} +/* 5700f837-8b8e-4661-800b-42bb3fc3141f */ +#define TEE_SERVICE_DRM_GRALLOC \ +{ \ + 0x5700F837, \ + 0x8B8E, \ + 0x4661, \ + { \ + 0x80, 0x0B, 0x42, 0xBB, 0x3F, 0xC3, 0x14, 0x1F \ + } \ +} + +enum SVC_SECBOOT_CMD_ID { + SECBOOT_CMD_ID_INVALID = 0x0, + SECBOOT_CMD_ID_COPY_VRL, + SECBOOT_CMD_ID_COPY_DATA, + SECBOOT_CMD_ID_VERIFY_DATA, + SECBOOT_CMD_ID_RESET_IMAGE, + SECBOOT_CMD_ID_COPY_VRL_TYPE, + SECBOOT_CMD_ID_COPY_DATA_TYPE, + SECBOOT_CMD_ID_VERIFY_DATA_TYPE, + SECBOOT_CMD_ID_VERIFY_DATA_TYPE_LOCAL, + SECBOOT_CMD_ID_COPY_IMG_TYPE, + SECBOOT_CMD_ID_BSP_MODEM_CALL, + SECBOOT_CMD_ID_BSP_MODULE_VERIFY, + SECBOOT_CMD_ID_BSP_MODEM_CALL_EXT = SECBOOT_CMD_ID_BSP_MODULE_VERIFY, + SECBOOT_CMD_ID_GET_RNG_NUM, + SECBOOT_CMD_ID_BSP_LOAD_MODEM_TEEOS, + SECBOOT_CMD_ID_BSP_UNLOAD_MODEM_TEEOS, + SECBOOT_CMD_VERIFY_BYPASS_NET_CERT, + SECBOOT_CMD_ID_GET_SOCID, +}; + +#ifdef CONFIG_SECBOOT_IMG + +#define CAS 0xff +enum SVC_SECBOOT_IMG_TYPE { + MODEM, + DSP, + XDSP, + TAS, + WAS, + MODEM_COMM_IMG, + MODEM_DTB, + NVM, + NVM_S, + MBN_R, + MBN_A, + MODEM_COLD_PATCH, + DSP_COLD_PATCH, + MODEM_CERT, + MAX_SOC_MODEM, + HIFI, + ISP, + IVP, + SOC_MAX +}; +#elif defined(CONFIG_SECBOOT_IMG_V2) +enum SVC_SECBOOT_IMG_TYPE { + HIFI, + ISP, + IVP, + MAX_AP_SOC, + MODEM_START = 0x100, + MODEM_END = 0x1FF, + MAX_SOC, +}; +#else +enum SVC_SECBOOT_IMG_TYPE { + MODEM, + HIFI, + DSP, + XDSP, + TAS, + WAS, + CAS, + MODEM_DTB, + ISP, + +#ifdef CONFIG_COLD_PATCH + MODEM_COLD_PATCH, + DSP_COLD_PATCH, +#endif +#ifdef CONFIG_RFIC_LOAD + RFIC, +#endif + SOC_MAX +}; +#endif + +#endif diff --git a/tzdriver/teek_client_type.h b/tzdriver/teek_client_type.h new file mode 100755 index 0000000000000000000000000000000000000000..e6271b623766218d98687bd8f0274d934f731d76 --- /dev/null +++ b/tzdriver/teek_client_type.h @@ -0,0 +1,138 @@ +/* + * teek_client_type.h + * + * define exported structures + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TEE_CLIENT_TYPE_H +#define TEE_CLIENT_TYPE_H + +#include +#include "teek_client_constants.h" + +#ifndef NULL +#define NULL 0 +#endif + +struct teec_uuid { + uint32_t time_low; + uint16_t time_mid; + uint16_t timehi_and_version; + uint8_t clockseq_and_node[8]; +}; + +struct teec_context { + void *dev; + uint8_t *ta_path; + struct list_head shrd_mem_list; +}; + +struct teec_session { + uint32_t session_id; + struct teec_uuid service_id; + uint32_t ops_cnt; + struct teec_context *context; +}; + +struct teec_sharedmemory { + void *buffer; + uint32_t size; + uint32_t flags; + uint32_t ops_cnt; + bool is_allocated; + struct list_head head; + struct teec_context *context; +}; + +struct teec_tempmemory_reference { + void *buffer; + uint32_t size; +}; + +struct teec_registeredmemory_reference { + struct teec_sharedmemory *parent; + uint32_t size; + uint32_t offset; +}; + + +struct teec_value { + uint32_t a; + uint32_t b; +}; + +struct teec_ion_reference { + int ion_share_fd; + uint32_t ion_size; +}; + +union teec_parameter { + struct teec_tempmemory_reference tmpref; + struct teec_registeredmemory_reference memref; + struct teec_value value; + struct teec_ion_reference ionref; +}; + +struct teec_tui_parameter { + uint32_t event_type; + /* tui event type */ + uint32_t value; + /* return value, is keycode if tui event is getkeycode */ + uint32_t notch; /* notch size of phone */ + uint32_t width; /* width of foldable screen */ + uint32_t height; /* height of foldable screen */ + uint32_t fold_state; /* state of foldable screen */ + uint32_t display_state; /* one state of folded state */ + uint32_t phy_width; /* real width of the mobile */ + uint32_t phy_height; /* real height of the mobile */ +}; + +struct teec_operation { + uint32_t started; + uint32_t paramtypes; + union teec_parameter params[4]; /* GP has four params */ + struct teec_session *session; + bool cancel_flag; +}; + +typedef uint32_t TEEC_Result; + +typedef struct teec_uuid TEEC_UUID; + +typedef struct teec_context TEEC_Context; + +typedef struct teec_session TEEC_Session; + +typedef struct teec_sharedmemory TEEC_SharedMemory; + +typedef struct teec_tempmemory_reference TEEC_TempMemoryReference; + +typedef struct teec_registeredmemory_reference TEEC_RegisteredMemoryReference; + +typedef struct teec_value TEEC_Value; + +typedef struct teec_ion_reference TEEC_IonReference; + +typedef union teec_parameter TEEC_Parameter; + +typedef struct teec_tui_parameter TEEC_TUI_Parameter; + +typedef struct { + uint32_t started; + uint32_t paramTypes; + TEEC_Parameter params[4]; + TEEC_Session *session; + bool cancel_flag; +} TEEC_Operation; + +#endif diff --git a/tzdriver/teek_ns_client.h b/tzdriver/teek_ns_client.h new file mode 100755 index 0000000000000000000000000000000000000000..ae2df2f0b3b13e3875e0b34d75ed2a1a7df7940d --- /dev/null +++ b/tzdriver/teek_ns_client.h @@ -0,0 +1,240 @@ +/* + * teek_ns_client.h + * + * define structures and IOCTLs. + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TEEK_NS_CLIENT_H +#define TEEK_NS_CLIENT_H + +#include +#include +#include +#include +#include "tc_ns_client.h" +#include "tc_ns_log.h" + +#define TC_NS_CLIENT_IOC_MAGIC 't' +#define TC_NS_CLIENT_DEV "tc_ns_client" +#define TC_PRIV_DEV "tc_private" +#define TC_NS_CLIENT_DEV_NAME "/dev/tc_ns_client" + +#define EXCEPTION_MEM_SIZE (8*1024) /* mem for exception handling */ +#ifdef CONFIG_THIRDPARTY_COMPATIBLE +#define TSP_REQUEST 0x32000008 +#define TSP_RESPONSE 0xBE000005 +#else +#define TSP_REQUEST 0xB2000008 +#define TSP_RESPONSE 0xB2000009 +#endif +#define TSP_REE_SIQ 0xB200000A +#define TSP_CRASH 0xB200000B +#define TSP_REBOOT 0xB200000E +#define TSP_CPU_ON 0xB200000F +#define TSP_REBOOT_DONE 0xB2000010 +#define TSP_PREEMPTED 0xB2000005 +#define TC_CALL_GLOBAL 0x01 +#define TC_CALL_SYNC 0x02 +#define TC_CALL_LOGIN 0x04 +#define TEE_REQ_FROM_USER_MODE 0U +#define TEE_REQ_FROM_KERNEL_MODE 1U +#define TEE_PARAM_NUM 4 +#define VMALLOC_TYPE 0 +#define RESERVED_TYPE 1 + +/* Max sizes for login info buffer comming from teecd */ +#define MAX_PACKAGE_NAME_LEN 255 +/* + * The apk certificate format is as follows: + * modulus_size(4 bytes) + modulus buffer(512 bytes) + * + exponent size(4 bytes) + exponent buffer(1 bytes) + */ +#define MAX_PUBKEY_LEN 1024 + +struct tc_ns_dev_list { + struct mutex dev_lock; /* for dev_file_list */ + struct list_head dev_file_list; +}; + +struct tc_uuid { + uint32_t time_low; + uint16_t time_mid; + uint16_t timehi_and_version; + uint8_t clockseq_and_node[8]; /* clock len is 8 */ +}; + +#define INVALID_MAP_ADDR ((void*)-1) +struct tc_ns_shared_mem { + void *kernel_addr; + void *user_addr; + void *user_addr_ca; /* for ca alloc share mem */ + unsigned int len; + int mem_type; + struct list_head head; + atomic_t usage; + atomic_t offset; +}; + +struct tc_ns_service { + unsigned char uuid[UUID_LEN]; + struct mutex session_lock; /* for session_list */ + struct list_head session_list; + struct list_head head; + struct mutex operation_lock; /* for session's open/close */ + atomic_t usage; +}; + +#define SERVICES_MAX_COUNT 32 /* service limit can opened on 1 fd */ +struct tc_ns_dev_file { + unsigned int dev_file_id; + struct mutex service_lock; /* for service_ref[], services[] */ + uint8_t service_ref[SERVICES_MAX_COUNT]; /* a judge if set services[i]=NULL */ + struct tc_ns_service *services[SERVICES_MAX_COUNT]; + struct mutex shared_mem_lock; /* for shared_mem_list */ + struct list_head shared_mem_list; + struct list_head head; + /* Device is linked to call from kernel */ + uint8_t kernel_api; + /* client login info provided by teecd, can be either package name and public + * key or uid(for non android services/daemons) + * login information can only be set once, dont' allow subsequent calls + */ + bool login_setup; + struct mutex login_setup_lock; /* for login_setup */ +#ifdef CONFIG_AUTH_HASH + bool cainfo_hash_setup; + struct mutex cainfo_hash_setup_lock; +#endif + uint32_t pkg_name_len; + uint8_t pkg_name[MAX_PACKAGE_NAME_LEN]; + uint32_t pub_key_len; + uint8_t pub_key[MAX_PUBKEY_LEN]; + int load_app_flag; + struct completion close_comp; /* for kthread close unclosed session */ +}; + +union tc_ns_parameter { + struct { + unsigned int buffer; + unsigned int size; + } memref; + struct { + unsigned int a; + unsigned int b; + } value; +}; + +struct tc_ns_login { + unsigned int method; + unsigned int mdata; +}; + +struct tc_ns_operation { + unsigned int paramtypes; + union tc_ns_parameter params[TEE_PARAM_NUM]; + unsigned int buffer_h_addr[TEE_PARAM_NUM]; + struct tc_ns_shared_mem *sharemem[TEE_PARAM_NUM]; + void *mb_buffer[TEE_PARAM_NUM]; +}; + +struct tc_ns_temp_buf { + void *temp_buffer; + unsigned int size; +}; + +enum smc_cmd_type { + CMD_TYPE_GLOBAL, + CMD_TYPE_TA, + CMD_TYPE_TA_AGENT, + CMD_TYPE_TA2TA_AGENT, /* compatible with TA2TA2TA->AGENT etc. */ + CMD_TYPE_BUILDIN_AGENT, +}; + +struct tc_ns_smc_cmd { + uint8_t uuid[sizeof(struct tc_uuid)]; + unsigned int cmd_type; + unsigned int cmd_id; + unsigned int dev_file_id; + unsigned int context_id; + unsigned int agent_id; + unsigned int operation_phys; + unsigned int operation_h_phys; + unsigned int login_method; + unsigned int login_data_phy; + unsigned int login_data_h_addr; + unsigned int login_data_len; + unsigned int err_origin; + int ret_val; + unsigned int event_nr; + unsigned int uid; + unsigned int ca_pid; /* pid */ + unsigned int pid; /* tgid */ + unsigned int eventindex; /* tee audit event index for upload */ + bool started; +} __attribute__((__packed__)); + +/* + * @brief + */ +struct tc_wait_data { + wait_queue_head_t send_cmd_wq; + int send_wait_flag; +}; + +#define NUM_OF_SO 1 +#ifdef CONFIG_CMS_CAHASH_AUTH +#define KIND_OF_SO 1 /* the number of libteecxxx.so library on MDC\DC\TI */ +#else +#define KIND_OF_SO 2 /* the number of libteecxxx.so library on OH\HO */ +#endif +struct tc_ns_session { + unsigned int session_id; + struct list_head head; + struct tc_wait_data wait_data; + struct mutex ta_session_lock; /* for open/close/invoke on 1 session */ + struct tc_ns_dev_file *owner; + uint8_t auth_hash_buf[MAX_SHA_256_SZ * NUM_OF_SO + MAX_SHA_256_SZ]; + atomic_t usage; +}; + +struct mb_cmd_pack { + struct tc_ns_operation operation; + unsigned char login_data[MAX_SHA_256_SZ * NUM_OF_SO + MAX_SHA_256_SZ]; +}; + +struct load_img_params { + struct tc_ns_dev_file *dev_file; + const char *file_buffer; + unsigned int file_size; + struct mb_cmd_pack *mb_pack; + char *mb_load_mem; + struct tc_uuid *uuid_return; + unsigned int mb_load_size; +}; + +struct tc_call_params { + struct tc_ns_dev_file *dev; + struct tc_ns_client_context *context; + struct tc_ns_session *sess; + uint8_t flags; +}; + +struct tc_op_params { + struct mb_cmd_pack *mb_pack; + struct tc_ns_smc_cmd *smc_cmd; + struct tc_ns_temp_buf local_tmpbuf[TEE_PARAM_NUM]; + uint32_t trans_paramtype[TEE_PARAM_NUM]; + bool op_inited; +}; + +#endif diff --git a/tzdriver/tlogger/Kconfig b/tzdriver/tlogger/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..7ceff8e72c3e9374387b44490838a3c161b69ee9 --- /dev/null +++ b/tzdriver/tlogger/Kconfig @@ -0,0 +1,46 @@ +config TEELOG + bool "Secure Execution Log Driver" + default n + depends on TZDRIVER + help + TEEOS log + +config TEE_LOG_ACHIVE_PATH + string "Tee log achive path" + default "/data/log/tee/last_teemsg" + depends on TEELOG + help + Last tee msg log path + +config TEE_LOG_EXCEPTION + bool "Log Exception Info to Imonitor" + default n + depends on TEELOG + help + Log exception info to imonitor + +choice + + prompt "Register tee log Mem" + default PAGES_MEM + depends on TEELOG + +config RDR_MEM + bool "Register rdr log mem" + depends on DFX_BB + help + Register rdr log mem + +config BBOX_MEM + bool "Register bbox log mem" + depends on MNTN + help + Register bbox log mem + +config PAGES_MEM + bool "Register pages log mem" + help + Register pages log mem + +endchoice + diff --git a/tzdriver/tlogger/Makefile b/tzdriver/tlogger/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..00c3f02d0e899d840bc309aa68fd7233ed26d40d --- /dev/null +++ b/tzdriver/tlogger/Makefile @@ -0,0 +1,19 @@ +KERNEL_DIR := $(srctree) + +ifneq ($(TARGET_BUILD_VARIANT), user) + ccflags-y += -DDEF_ENG +endif + +EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../third_party/bounds_checking_function/include +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/core +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/include + +ifeq ($(CONFIG_TZDRIVER_INTERNAL), y) + include $(KERNEL_DIR)/drivers/tzdriver/tzdriver_internal/internal.mk +endif + +obj-$(CONFIG_TEELOG) += tlogger.o +# For tee log memory type: bbox, rdr, or pages +# If no log mechanism is available, the pages memory can be used. +obj-$(CONFIG_PAGES_MEM) += log_pages_cfg.o diff --git a/tzdriver/tlogger/log_cfg_api.h b/tzdriver/tlogger/log_cfg_api.h new file mode 100644 index 0000000000000000000000000000000000000000..5a3797331b75b1b1437759827068e5999f1521f5 --- /dev/null +++ b/tzdriver/tlogger/log_cfg_api.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: for log cfg api define + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef LOG_CFG_API_H +#define LOG_CFG_API_H + +#include + +#if ((defined(CONFIG_BBOX_MEM) || defined(CONFIG_RDR_MEM) || \ + defined(CONFIG_PAGES_MEM)) && defined(CONFIG_TEELOG)) +int register_log_mem(uint64_t *addr, uint32_t *len); +int register_log_exception(void); +void report_log_system_error(void); +void report_log_system_panic(void); +int *map_log_mem(uint64_t mem_addr, uint32_t mem_len); +void unmap_log_mem(int *log_buffer); +void get_log_chown(uid_t *user, gid_t *group); +void unregister_log_exception(void); +void ta_crash_report_log(void); +#else +static inline int register_log_mem(const uint64_t *addr, const uint32_t *len) +{ + (void)addr; + (void)len; + return 0; +} + +static inline int register_log_exception(void) +{ + return 0; +} + +static inline void report_log_system_error(void) +{ +} + +static inline void report_log_system_panic(void) +{ +} + +static inline int *map_log_mem(uint64_t mem_addr, uint32_t mem_len) +{ + (void)mem_addr; + (void)mem_len; + return NULL; +} +static inline void unmap_log_mem(const int *log_buffer) +{ + (void)log_buffer; +} +static inline void get_log_chown(const uid_t *user, const gid_t *group) +{ + (void)user; + (void)group; +} +static inline void unregister_log_exception(void) +{ +} + +static inline void ta_crash_report_log(void) +{ +} +#endif +#endif diff --git a/tzdriver/tlogger/log_pages_cfg.c b/tzdriver/tlogger/log_pages_cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..7679f8d2e3be162278dbb29d65171e045e85fcff --- /dev/null +++ b/tzdriver/tlogger/log_pages_cfg.c @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: for pages log cfg api define + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "log_cfg_api.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "tc_ns_log.h" +#include "tlogger.h" +#include "shared_mem.h" + +void unregister_log_exception(void) +{ +} + +int register_log_exception(void) +{ + return 0; +} + +struct pages_module_result { + uint64_t log_addr; + uint32_t log_len; +}; + +struct pages_module_result g_mem_info = {0}; + +static int tee_pages_register_core(void) +{ + if (g_mem_info.log_addr != 0 || g_mem_info.log_len != 0) { + if (memset_s((void *)g_mem_info.log_addr, g_mem_info.log_len, 0, g_mem_info.log_len) != 0) { + tloge("clean log memory failed\n"); + return -EFAULT; + } + return 0; + } + + g_mem_info.log_addr = get_log_mem_vaddr(); + if (IS_ERR_OR_NULL((void *)(uintptr_t)g_mem_info.log_addr)) { + tloge("get log mem error\n"); + return -1; + } + g_mem_info.log_len = PAGES_LOG_MEM_LEN; + return 0; +} + +/* Register log memory */ +int register_log_mem(uint64_t *addr, uint32_t *len) +{ + int ret; + uint64_t mem_addr; + uint32_t mem_len; + + if (!addr || !len) { + tloge("addr or len is invalid\n"); + return -1; + } + + ret = tee_pages_register_core(); + if (ret != 0) + return ret; + + mem_addr = get_log_mem_paddr(g_mem_info.log_addr); + mem_len = g_mem_info.log_len; + + ret = register_mem_to_teeos(mem_addr, mem_len, true); + if (ret != 0) + return ret; + + *addr = g_mem_info.log_addr; + *len = g_mem_info.log_len; + return ret; +} + +void report_log_system_error(void) +{ +} + +void report_log_system_panic(void) +{ +/* default support trigger ap reset */ +#ifndef NOT_TRIGGER_AP_RESET + panic("TEEOS panic\n"); +#endif +} + +void ta_crash_report_log(void) +{ +} + +int *map_log_mem(uint64_t mem_addr, uint32_t mem_len) +{ + (void)mem_len; + return (int *)(uintptr_t)mem_addr; +} + +void unmap_log_mem(int *log_buffer) +{ + free_log_mem((uint64_t)(uintptr_t)log_buffer); +} + +void get_log_chown(uid_t *user, gid_t *group) +{ + if (!user || !group) { + tloge("user or group buffer is null\n"); + return; + } + + *user = ROOT_UID; + *group = FILE_CHOWN_GID; +} diff --git a/tzdriver/tlogger/tlogger.c b/tzdriver/tlogger/tlogger.c new file mode 100644 index 0000000000000000000000000000000000000000..81fd888c0fd62fa67d379a48015ca817ddda02e9 --- /dev/null +++ b/tzdriver/tlogger/tlogger.c @@ -0,0 +1,1509 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: TEE Logging Subsystem, read the tee os log from log memory + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "tlogger.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "smc_smp.h" +#include "mailbox_mempool.h" +#include "teek_client_constants.h" +#include "tc_ns_client.h" +#include "teek_ns_client.h" +#include "log_cfg_api.h" +#include "tc_ns_log.h" +#include "ko_adapt.h" +#include "internal_functions.h" +#ifdef CONFIG_LOG_POOL +#include "shared_mem.h" +#endif +#ifdef CONFIG_TEE_REBOOT +#include "reboot.h" +#endif + +/* for log item ----------------------------------- */ +#define LOG_ITEM_MAGIC 0x5A5A +#define LOG_ITEM_LEN_ALIGN 64 +#define LOG_ITEM_MAX_LEN 1024 +#define LOG_READ_STATUS_ERROR 0x000FFFF + +/* =================================================== */ +#define LOGGER_LOG_TEEOS "teelog" /* tee os log */ +#define LOGGERIOCTL 0xBE /* for ioctl */ + +#define DUMP_START_MAGIC "Dump SPI notification" +#define DUMP_END_MAGIC "Dump task states END" + +#define GET_VERSION_BASE 5 +#define SET_READERPOS_CUR_BASE 6 +#define SET_TLOGCAT_STAT_BASE 7 +#define GET_TLOGCAT_STAT_BASE 8 + +/* get tee verison */ +#define MAX_TEE_VERSION_LEN 256 +#define TEELOGGER_GET_VERSION \ + _IOR(LOGGERIOCTL, GET_VERSION_BASE, char[MAX_TEE_VERSION_LEN]) +/* set the log reader pos to current pos */ +#define TEELOGGER_SET_READERPOS_CUR \ + _IO(LOGGERIOCTL, SET_READERPOS_CUR_BASE) +#define TEELOGGER_SET_TLOGCAT_STAT \ + _IO(LOGGERIOCTL, SET_TLOGCAT_STAT_BASE) +#define TEELOGGER_GET_TLOGCAT_STAT \ + _IO(LOGGERIOCTL, GET_TLOGCAT_STAT_BASE) + +#ifdef CONFIG_LOG_POOL +struct teelogger_log_pool { + uint64_t addr; + uint64_t size; +}; + +#define GET_LOG_POOL_BASE 9 +#define LOG_POOL_APPEND_BASE 10 + +#define TEELOGGER_GET_LOG_POOL \ + _IOWR(LOGGERIOCTL, GET_LOG_POOL_BASE, uint64_t) +#define TEELOGGER_LOG_POOL_APPEND \ + _IOWR(LOGGERIOCTL, LOG_POOL_APPEND_BASE, struct teelogger_log_pool) +#endif + +int g_tlogcat_f = 0; + +#ifndef CONFIG_TEE_LOG_ACHIVE_PATH +#define CONFIG_TEE_LOG_ACHIVE_PATH "/data/log/tee/last_teemsg" +#endif +#define TEE_LOG_FILE_NAME_MAX 256 + +uint32_t g_last_read_offset = 0; + +#define NEVER_USED_LEN 32U +#define LOG_ITEM_RESERVED_LEN 1U + +/* 64 byte head + user log */ +struct log_item { + unsigned char never_used[NEVER_USED_LEN]; + unsigned short magic; + unsigned short reserved0; + uint32_t serial_no; + unsigned short real_len; /* log real len */ + unsigned short buffer_len; /* log buffer's len, multiple of 32 bytes */ + unsigned char uuid[UUID_LEN]; + unsigned char log_source_type; + unsigned char reserved[LOG_ITEM_RESERVED_LEN]; + unsigned char log_level; + unsigned char new_line; /* '\n' char, easy viewing log in bbox.bin file */ + unsigned char log_buffer[]; +}; + +/* --- for log mem --------------------------------- */ +#define TEMP_LOG_MEM_SIZE (10 * SZ_1K) + +#define LOG_BUFFER_RESERVED_LEN 11U +#define VERSION_INFO_LEN 156U + +/* + * Log's buffer flag info, size: 64 bytes head + 156 bytes's version info. + * For filed description: + * last_pos : current log's end position, last log's start position. + * write_loops: Write cyclically. Init value is 0, when memory is used + * up, the value add 1. + */ +struct log_buffer_flag { + uint32_t reserved0; + uint32_t last_pos; + uint32_t write_loops; + uint32_t log_level; + /* [0] is magic failed, [1] is serial_no failed, used fior log retention feature */ + uint32_t reserved[LOG_BUFFER_RESERVED_LEN]; + uint32_t max_len; + unsigned char version_info[VERSION_INFO_LEN]; +}; + +struct log_buffer { + struct log_buffer_flag flag; + unsigned char buffer_start[]; +}; + +static struct log_buffer *g_log_buffer = NULL; + +struct tlogger_log { + unsigned char *buffer_info; /* ring buffer info */ + struct mutex mutex_info; /* this mutex protects buffer_info */ + wait_queue_head_t wait_queue_head; /* wait queue head for reader */ + struct list_head logs; /* log channels list */ + struct miscdevice misc_device; /* misc device log */ + struct list_head readers; /* log's readers */ +}; + +static LIST_HEAD(m_log_list); + +struct tlogger_reader { + struct tlogger_log *log; /* tlogger_log info data */ + struct list_head list; /* log entry in tlogger_log's list */ + /* Current reading position, start position of next read again */ + uint32_t r_off; + uint32_t r_loops; + uint32_t r_sn; + uint32_t r_failtimes; + uint32_t r_from_cur; + uint32_t r_is_tlogf; + bool r_all; /* whether this reader can read all entries */ + uint32_t r_ver; +}; + +static uint32_t g_log_mem_len = 0; +static uint32_t g_tlogcat_count = 0; +static struct tlogger_log *g_log; + +static struct tlogger_log *get_reader_log(const struct file *file) +{ + struct tlogger_reader *reader = NULL; + + reader = file->private_data; + if (!reader) + return NULL; + + return reader->log; +} + +static bool check_log_item_validite(const struct log_item *item, + uint32_t item_max_size) +{ + bool con = (item && (item->magic == LOG_ITEM_MAGIC) && + (item->buffer_len > 0) && + (item->real_len > 0) && + (item->buffer_len % LOG_ITEM_LEN_ALIGN == 0) && + (item->real_len <= item->buffer_len) && + ((item->buffer_len - item->real_len) < LOG_ITEM_LEN_ALIGN) && + (item->buffer_len + sizeof(*item) <= item_max_size)); + + return con; +} + +static struct log_item *get_next_log_item(const unsigned char *buffer_start, + uint32_t max_len, uint32_t read_pos, uint32_t scope_len, uint32_t *pos) +{ + uint32_t i = 0; + struct log_item *item = NULL; + uint32_t max_size; + + if ((read_pos + scope_len) > max_len) + return NULL; + + while ((i + sizeof(*item) + LOG_ITEM_LEN_ALIGN) <= scope_len) { + *pos = read_pos + i; + item = (struct log_item *)(uintptr_t)(buffer_start + read_pos + i); + max_size = (((scope_len - i) > LOG_ITEM_MAX_LEN) ? + LOG_ITEM_MAX_LEN : (scope_len - i)); + if (check_log_item_validite(item, max_size)) + break; + + i += LOG_ITEM_LEN_ALIGN; + item = NULL; + } + + return item; +} + +struct reader_position { + const unsigned char *buffer_start; + uint32_t max_len; + uint32_t start_pos; + uint32_t end_pos; +}; + +static uint32_t parse_log_item(char __user *buf, size_t count, + struct reader_position *position, uint32_t *read_off, + bool *user_buffer_left) +{ + struct log_item *next_item = NULL; + size_t buf_left; + uint32_t buf_written; + uint32_t item_len; + bool con = false; + uint32_t start_pos = position->start_pos; + + buf_written = 0; + buf_left = count; + + con = (!read_off || !position->buffer_start); + if (con) + return buf_written; + + *user_buffer_left = true; + while (start_pos < position->end_pos) { + next_item = get_next_log_item(position->buffer_start, + position->max_len, start_pos, + position->end_pos - start_pos, &start_pos); + if (!next_item) + break; + + /* copy to user */ + item_len = next_item->buffer_len + sizeof(*next_item); + if (buf_left < item_len) { + *user_buffer_left = false; + break; + } + + start_pos += item_len; + if (copy_to_user(buf + buf_written, + (void *)next_item, item_len) != 0) + tloge("copy failed, item len %u\n", item_len); + buf_written += item_len; + buf_left -= item_len; + } + + *read_off = start_pos; + return buf_written; +} + +static ssize_t get_buffer_info(struct tlogger_reader *reader, + struct log_buffer_flag *buffer_flag, struct log_buffer **log_buffer) +{ + struct tlogger_log *log = NULL; + errno_t ret; + struct log_buffer *buffer_tmp = NULL; + + log = reader->log; + if (!log) + return -EINVAL; + + buffer_tmp = (struct log_buffer*)log->buffer_info; + if (!buffer_tmp) + return -EINVAL; + + __asm__ volatile ("isb"); + __asm__ volatile ("dsb sy"); + + mutex_lock(&log->mutex_info); + ret = memcpy_s(buffer_flag, sizeof(*buffer_flag), &buffer_tmp->flag, + sizeof(buffer_tmp->flag)); + mutex_unlock(&log->mutex_info); + if (ret != 0) { + tloge("memcpy failed %d\n", ret); + return -EAGAIN; + } + + *log_buffer = buffer_tmp; + return 0; +} + +#define LOG_BUFFER_MAX_LEN 0x100000 + +static ssize_t get_last_read_pos(struct log_buffer_flag *log_flag, + const struct tlogger_reader *reader, uint32_t *log_last_pos, uint32_t *is_read) +{ + uint32_t buffer_max_len = g_log_mem_len - sizeof(*g_log_buffer); + + *is_read = 0; + + if (buffer_max_len > LOG_BUFFER_MAX_LEN) + return -EINVAL; + + *log_last_pos = log_flag->last_pos; + if (*log_last_pos == reader->r_off && + log_flag->write_loops == reader->r_loops) + return 0; + + if (log_flag->max_len < *log_last_pos || + log_flag->max_len > buffer_max_len) { + tloge("invalid data maxlen %x pos %x\n", + log_flag->max_len, *log_last_pos); + return -EFAULT; + } + + if (reader->r_off > log_flag->max_len) { + tloge("invalid data roff %x maxlen %x\n", + reader->r_off, log_flag->max_len); + return -EFAULT; + } + + *is_read = 1; + return 0; +} + +static void set_reader_position(struct reader_position *position, + const unsigned char *buffer_start, uint32_t max_len, uint32_t start_pos, uint32_t end_pos) +{ + position->buffer_start = buffer_start; + position->max_len = max_len; + position->start_pos = start_pos; + position->end_pos = end_pos; +} + +static ssize_t proc_read_ret(uint32_t buf_written, + const struct tlogger_reader *reader) +{ + ssize_t ret; + if (buf_written == 0) { + ret = 0; + } else { + ret = buf_written; + tlogd("read length %u\n", buf_written); + g_last_read_offset = reader->r_off; + } + return ret; +} + +static ssize_t check_read_params(const struct file *file, + const char __user *buf, size_t count) +{ + if (count < LOG_ITEM_MAX_LEN) + return -EINVAL; + + if (!file || !buf) + return -EINVAL; + + return 0; +} + +/* + * If the sequence number of the last read position is smaller + * than the current minimum sequence number, the last read + * position is overwritten. And this time read data from + * minimum number, or read data from last position. + */ +static ssize_t trigger_parse_log(char __user *buf, size_t count, + uint32_t log_last_pos, struct log_buffer *log_buffer, + struct tlogger_reader *reader) +{ + bool user_buffer_left = false; + uint32_t buf_written; + struct reader_position position = {0}; + struct log_buffer_flag *buffer_flag = &(log_buffer->flag); + + if (buffer_flag->write_loops == reader->r_loops) { + set_reader_position(&position, log_buffer->buffer_start, + buffer_flag->max_len, reader->r_off, log_last_pos); + + buf_written = parse_log_item(buf, count, &position, + &reader->r_off, &user_buffer_left); + + return proc_read_ret(buf_written, reader); + } + + if (buffer_flag->write_loops > (reader->r_loops +1) || + ((buffer_flag->write_loops == (reader->r_loops + 1)) && + (reader->r_off < log_last_pos))) { + reader->r_off = log_last_pos; + reader->r_loops = buffer_flag->write_loops - 1; + } + + set_reader_position(&position, log_buffer->buffer_start, + buffer_flag->max_len, reader->r_off, buffer_flag->max_len); + + buf_written = parse_log_item(buf, count, &position, + &reader->r_off, &user_buffer_left); + + if (count > buf_written && user_buffer_left) { + set_reader_position(&position, log_buffer->buffer_start, + buffer_flag->max_len, 0, log_last_pos); + + buf_written += parse_log_item(buf + buf_written, + count - buf_written, &position, + &reader->r_off, &user_buffer_left); + + reader->r_loops = buffer_flag->write_loops; + } + + return proc_read_ret(buf_written, reader); +} + +static ssize_t process_tlogger_read(struct file *file, + char __user *buf, size_t count, loff_t *pos) +{ + struct tlogger_reader *reader = NULL; + struct log_buffer *log_buffer = NULL; + ssize_t ret; + uint32_t last_pos; + uint32_t is_read; + struct log_buffer_flag buffer_flag; + + (void)pos; + + ret = check_read_params(file, buf, count); + if (ret != 0) + return ret; + + reader = file->private_data; + if (!reader) + return -EINVAL; + + ret = get_buffer_info(reader, &buffer_flag, &log_buffer); + if (ret != 0) + return ret; + + ret = get_last_read_pos(&buffer_flag, reader, &last_pos, &is_read); + if (is_read == 0) + return ret; + + return trigger_parse_log(buf, count, last_pos, log_buffer, reader); +} + +void tz_log_write(void) +{ + struct log_buffer *log_buffer = NULL; + + if (!g_log) + return; + + log_buffer = (struct log_buffer*)g_log->buffer_info; + if (!log_buffer) + return; + + if (g_last_read_offset != log_buffer->flag.last_pos) { + tlogd("wake up write tz log\n"); + wake_up_interruptible(&g_log->wait_queue_head); + } + + return; +} + +static struct tlogger_log *get_tlogger_log_by_minor(int minor) +{ + struct tlogger_log *log = NULL; + + list_for_each_entry(log, &m_log_list, logs) { + if (log->misc_device.minor == minor) + return log; + } + + return NULL; +} + +static int process_tlogger_open(struct inode *inode, + struct file *file) +{ + struct tlogger_log *log = NULL; + int ret; + struct tlogger_reader *reader = NULL; + + tlogd("open logger open ++\n"); + /* not support seek */ + ret = nonseekable_open(inode, file); + if (ret != 0) + return ret; + + tlogd("Before get log from minor\n"); + log = get_tlogger_log_by_minor(MINOR(inode->i_rdev)); + if (!log) + return -ENODEV; + + reader = kmalloc(sizeof(*reader), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)reader)) + return -ENOMEM; + + reader->log = log; + reader->r_all = true; + reader->r_off = 0; + reader->r_loops = 0; + reader->r_sn = 0; + reader->r_failtimes = 0; + reader->r_is_tlogf = 0; + reader ->r_from_cur = 0; + + INIT_LIST_HEAD(&reader->list); + + mutex_lock(&log->mutex_info); + list_add_tail(&reader->list, &log->readers); + g_tlogcat_count++; + mutex_unlock(&log->mutex_info); + + file->private_data = reader; + tlogd("tlogcat count %u\n", g_tlogcat_count); +#ifdef CONFIG_TEE_REBOOT + get_tlogcat_pid(); +#endif + return 0; +} + +static int process_tlogger_release(struct inode *ignored, + struct file *file) +{ + struct tlogger_reader *reader = NULL; + struct tlogger_log *log = NULL; + + (void)ignored; + + tlogd("logger_release ++\n"); + + if (!file) + return -1; + + reader = file->private_data; + if (!reader) { + tloge("reader is null\n"); + return -1; + } + + log = reader->log; + if (!log) { + tloge("log is null\n"); + return -1; + } + + mutex_lock(&log->mutex_info); + list_del(&reader->list); + if (g_tlogcat_count >= 1) + g_tlogcat_count--; + mutex_unlock(&log->mutex_info); + + tlogd("logger_release r_is_tlogf-%u\n", reader->r_is_tlogf); + + if (reader->r_is_tlogf != 0) + g_tlogcat_f = 0; + + kfree(reader); + tlogd("tlogcat count %u\n", g_tlogcat_count); + return 0; +} + +static unsigned int process_tlogger_poll(struct file *file, + poll_table *wait) +{ + struct tlogger_reader *reader = NULL; + struct tlogger_log *log = NULL; + struct log_buffer *buffer = NULL; + uint32_t ret = POLLOUT | POLLWRNORM; + + tlogd("logger_poll ++\n"); + if (!file) { + tloge("file is null\n"); + return ret; + } + + reader = file->private_data; + if (!reader) { + tloge("the private data is null\n"); + return ret; + } + + log = reader->log; + if (!log) { + tloge("log is null\n"); + return ret; + } + + buffer = (struct log_buffer*)log->buffer_info; + if (!buffer) { + tloge("buffer is null\n"); + return ret; + } + + poll_wait(file, &log->wait_queue_head, wait); + + if (buffer->flag.last_pos != reader->r_off) + ret |= POLLIN | POLLRDNORM; + + return ret; +} + +#define SET_READ_POS 1U +static void set_reader_cur_pos(const struct file *file) +{ + struct tlogger_reader *reader = NULL; + struct tlogger_log *log = NULL; + struct log_buffer *buffer = NULL; + + reader = file->private_data; + if (!reader) + return; + + log = reader->log; + if (!log) + return; + + buffer = (struct log_buffer*)log->buffer_info; + if (!buffer) + return; + + reader->r_from_cur = SET_READ_POS; + reader->r_off = buffer->flag.last_pos; + reader->r_loops = buffer->flag.write_loops; +} + +static void set_tlogcat_f_stat(const struct file *file) +{ + struct tlogger_reader *reader = NULL; + + if (!file) + return; + + reader = file->private_data; + if (!reader) + return; + + reader->r_is_tlogf = 1; + g_tlogcat_f = 1; + + tlogi("set tlogcat_f-%d\n", g_tlogcat_f); + return; +} + +static int get_tlogcat_f_stat(void) +{ + tlogi("get tlogcat_f-%d\n", g_tlogcat_f); + return g_tlogcat_f; +} + +static int check_user_arg(unsigned long arg, size_t arg_len) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71)) + return (int)access_ok(VERIFY_READ, + (void __user *)(uintptr_t)arg, arg_len); +#else + return (int)access_ok((void __user *)(uintptr_t)arg, arg_len); +#endif +} + +static int get_teeos_version(uint32_t cmd, unsigned long arg) +{ + int ret; + + if ((_IOC_DIR(cmd) & _IOC_READ) == 0) { + tloge("check get version cmd failed\n"); + return -1; + } + + ret = check_user_arg(arg, + sizeof(g_log_buffer->flag.version_info)); + if (ret == 0) { + tloge("check version info arg failed\n"); + return -1; + } + + if (copy_to_user((void __user *)(uintptr_t)arg, + (void *)g_log_buffer->flag.version_info, + sizeof(g_log_buffer->flag.version_info)) != 0) { + tloge("version info copy failed\n"); + return -1; + } + + return 0; +} + +#ifdef CONFIG_LOG_POOL +#define LOG_POOL_SIZE 0x80000UL +#define LOG_POOL_ITEM_MAX_LEN 384 +#define HALF_DIV_NUM 2 + +static DEFINE_MUTEX(g_log_pool_lock); +static uint64_t g_log_pool_va = 0; +static uint64_t g_log_pool_size = 0; +static uint32_t g_logserialno = 0; + +static int get_log_pool(void *argp) +{ + uint64_t sz = 0; + + if (argp == NULL) { + tloge("invalid params\n"); + return -1; + } + sz = get_log_mem_size() / HALF_DIV_NUM; + if (sz != LOG_POOL_SIZE) { + tloge("log pool size error\n"); + return -1; + } + if (copy_to_user(argp, &sz, sizeof(uint64_t)) != 0) { + tloge("copy to user failed\n"); + return -1; + } + + return 0; +} + +static int log_pool_check(void) +{ + if (g_log_pool_size != LOG_POOL_SIZE) { + tloge("log pool size error\n"); + g_log_pool_size = 0; + return -1; + } + + if (g_log_pool_va == 0 || (UINT64_MAX - g_log_pool_va) < g_log_pool_size) { + tloge("log pool addr error\n"); + g_log_pool_va = 0; + return -1; + } + + return 0; +} + +static int log_pool_item_check(struct log_item *item, struct teelogger_log_pool pool_item) +{ + if (item->magic != LOG_ITEM_MAGIC || item->buffer_len == 0 || item->real_len == 0|| + item->buffer_len % LOG_ITEM_LEN_ALIGN != 0 || item->real_len > item->buffer_len || + (item->buffer_len - item->real_len) >= (uint16_t)LOG_ITEM_LEN_ALIGN || + (uint64_t)(item->buffer_len + sizeof(struct log_item)) > pool_item.size) + return -1; + + return 0; +} + +static int log_pool_append(void *argp) +{ + struct teelogger_log_pool pool_item = {0}; + struct log_buffer *pool_buffer = NULL; + struct log_item *item = NULL; + if (argp == NULL || log_pool_check() != 0) { + tloge("invalid params, g_log_pool_va or g_log_pool_size\n"); + return -1; + } + if (copy_from_user((void *)&pool_item, argp, sizeof(struct teelogger_log_pool)) != 0) { + tloge("pool_item copy from user error\n"); + return -1; + } + if ((uint64_t)LOG_POOL_ITEM_MAX_LEN < pool_item.size || pool_item.size < (uint64_t)sizeof(struct log_item) || + pool_item.addr == 0 || UINT64_MAX - pool_item.addr < pool_item.size) { + tloge("pool_item addr or size error\n"); + return -1; + } + + mutex_lock(&g_log_pool_lock); + pool_buffer = (struct log_buffer *)(uintptr_t)g_log_pool_va; + if (pool_buffer == NULL || (uint64_t)(pool_buffer->flag).last_pos > g_log_pool_size || + ((uint64_t)sizeof(struct log_buffer) + (uint64_t)(pool_buffer->flag).last_pos) > g_log_pool_size) { + tloge("pool_buffer error\n"); + mutex_unlock(&g_log_pool_lock); + return -1; + } + /* restart from head */ + if (((uint64_t)sizeof(struct log_buffer) + (uint64_t)(pool_buffer->flag).last_pos + + pool_item.size) > g_log_pool_size) { + pool_buffer->flag.write_loops++; + pool_buffer->flag.last_pos = 0; + } + + item = (struct log_item *)(uintptr_t)((uint64_t)(uintptr_t)pool_buffer->buffer_start + + (uint64_t)pool_buffer->flag.last_pos); + if (copy_from_user((void *)item, (void *)pool_item.addr, pool_item.size) != 0) { + tloge("item copy_from_user error\n"); + mutex_unlock(&g_log_pool_lock); + return -1; + } + if (log_pool_item_check(item, pool_item) != 0) { + tloge("item check error\n"); + mutex_unlock(&g_log_pool_lock); + return -1; + } + + item->serial_no = ++g_logserialno; + /* reset g_logserialno */ + if (item->serial_no == 0) + item->serial_no = ++g_logserialno; + item->new_line = (unsigned char)'\n'; + pool_buffer->flag.reserved[1] = g_logserialno; + pool_buffer->flag.last_pos += (uint32_t)pool_item.size; + mutex_unlock(&g_log_pool_lock); + + return 0; +} + +static int init_log_pool(void) +{ + struct log_buffer *pool_buffer = NULL; + uint64_t paddr = get_log_mem_paddr(0); + + g_log_pool_size = get_log_mem_size(); + if ((UINT64_MAX - paddr) < g_log_pool_size) { + tloge("log pool paddr error\n"); + return -1; + } + g_log_pool_size /= HALF_DIV_NUM; + g_log_pool_va = (uint64_t)(uintptr_t)ioremap_cache(paddr + g_log_pool_size, g_log_pool_size); + if (log_pool_check() != 0) { + tloge("log pool addr or size error\n"); + return -1; + } + pool_buffer = (struct log_buffer *)(uintptr_t)g_log_pool_va; + + /* + * the struct log_buffer magic field use for log retention feature, + * if hit the magic, will retain the old log before reset in log pool, + * or will memset log pool. + */ + if (pool_buffer->flag.reserved[0] != LOG_ITEM_MAGIC) { + (void)memset_s((void *)(uintptr_t)g_log_pool_va, g_log_pool_size, 0, g_log_pool_size); + pool_buffer->flag.reserved[0] = LOG_ITEM_MAGIC; + pool_buffer->flag.max_len = g_log_pool_size - sizeof(struct log_buffer); + } else { + g_logserialno = pool_buffer->flag.reserved[1]; + } + + return 0; +} + +static void free_log_pool(void) +{ + if (g_log_pool_va != 0) + iounmap((void __iomem *)(uintptr_t)g_log_pool_va); + g_log_pool_va = 0; + g_log_pool_size = 0; + g_logserialno = 0; +} +#endif + +static long process_tlogger_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct tlogger_log *log = NULL; + long ret = -EINVAL; + + if (!file) + return -1; + + log = get_reader_log(file); + if (!log) { + tloge("log is null\n"); + return -1; + } + + tlogd("logger_ioctl start ++\n"); + mutex_lock(&log->mutex_info); + + switch (cmd) { + case TEELOGGER_GET_VERSION: + if (get_teeos_version(cmd, arg) == 0) + ret = 0; + break; + case TEELOGGER_SET_READERPOS_CUR: + set_reader_cur_pos(file); + ret = 0; + break; + case TEELOGGER_SET_TLOGCAT_STAT: + set_tlogcat_f_stat(file); + ret = 0; + break; + case TEELOGGER_GET_TLOGCAT_STAT: + ret = get_tlogcat_f_stat(); + break; +#ifdef CONFIG_LOG_POOL + case TEELOGGER_GET_LOG_POOL: + ret = get_log_pool(arg); + break; + case TEELOGGER_LOG_POOL_APPEND: + ret = log_pool_append(arg); + break; +#endif + default: + tloge("ioctl error default\n"); + break; + } + + mutex_unlock(&log->mutex_info); + return ret; +} + +#ifdef CONFIG_COMPAT +static long process_tlogger_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + tlogd("logger_compat_ioctl ++\n"); + arg = (unsigned long)(uintptr_t)compat_ptr(arg); + return process_tlogger_ioctl(file, cmd, arg); +} +#endif + +static const struct file_operations g_logger_fops = { + .owner = THIS_MODULE, + .read = process_tlogger_read, + .poll = process_tlogger_poll, + .unlocked_ioctl = process_tlogger_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = process_tlogger_compat_ioctl, +#endif + .open = process_tlogger_open, + .release = process_tlogger_release, +}; + +static int __init register_device(const char *log_name, + uintptr_t addr, int size) +{ + int ret; + struct tlogger_log *log = NULL; + unsigned char *buffer = (unsigned char *)addr; + (void)size; + + log = kzalloc(sizeof(*log), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)log)) { + tloge("kzalloc is failed\n"); + return -ENOMEM; + } + log->buffer_info = buffer; + log->misc_device.minor = MISC_DYNAMIC_MINOR; + log->misc_device.name = kstrdup(log_name, GFP_KERNEL); + if (!log->misc_device.name) { + ret = -ENOMEM; + tloge("kstrdup is failed\n"); + goto out_free_log; + } + log->misc_device.fops = &g_logger_fops; + log->misc_device.parent = NULL; + + init_waitqueue_head(&log->wait_queue_head); + INIT_LIST_HEAD(&log->readers); + mutex_init(&log->mutex_info); + INIT_LIST_HEAD(&log->logs); + list_add_tail(&log->logs, &m_log_list); + + /* register misc device for this log */ + ret = misc_register(&log->misc_device); + if (unlikely(ret)) { + tloge("failed to register misc device:%s\n", + log->misc_device.name); + goto out_free_log; + } + g_log = log; + return 0; + +out_free_log: + if (log->misc_device.name) + kfree(log->misc_device.name); + + kfree(log); + return ret; +} + +static struct log_item *msg_get_next(unsigned char *buffer_start, + uint32_t read_pos, uint32_t scope_len, uint32_t max_len) +{ + uint32_t i = 0; + struct log_item *item = NULL; + uint32_t item_max_size; + uint32_t len; + + while (i <= scope_len && + ((read_pos + i + sizeof(*item)) < max_len)) { + len = (uint32_t)(scope_len - i); + item_max_size = + ((len > LOG_ITEM_MAX_LEN) ? LOG_ITEM_MAX_LEN : len); + item = (struct log_item *)(buffer_start + read_pos + i); + + if (check_log_item_validite(item, item_max_size)) { + if ((read_pos + i + sizeof(*item) + + item->buffer_len) > max_len) { + tloge("check item len error\n"); + return NULL; + } + + return item; + } + + i += LOG_ITEM_LEN_ALIGN; + item = NULL; + } + + return NULL; +} + +#ifdef CONFIG_TZDRIVER_MODULE +/* there is no way to chown in kernel-5.10 for ko */ +static int tlogger_chown(const char *file_path, uint32_t file_path_len) +{ + (void)file_path; + (void)file_path_len; + + return 0; +} +#else +static int tlogger_chown(const char *file_path, uint32_t file_path_len) +{ + (void)file_path_len; + uid_t user = ROOT_UID; + gid_t group = ROOT_GID; + int ret; + mm_segment_t old_fs; + + get_log_chown(&user, &group); + + /* not need modify chown attr */ + if (group == ROOT_GID && user == ROOT_UID) + return 0; + + old_fs = get_fs(); + set_fs(KERNEL_DS); +#if (KERNEL_VERSION(5, 4, 0) <= LINUX_VERSION_CODE) + ret = (int)ksys_chown((const char __user *)file_path, user, group); +#else + ret = (int)sys_chown((const char __user *)file_path, user, group); +#endif + if (ret != 0) { + tloge("sys chown for last teemsg file error\n"); + set_fs(old_fs); + return -1; + } + + set_fs(old_fs); + return 0; +} +#endif + +static int write_version_to_msg(struct file *filep, + loff_t *pos) +{ + ssize_t write_len; + + /* first write tee versino info */ + write_len = kernel_write(filep, g_log_buffer->flag.version_info, + strlen(g_log_buffer->flag.version_info), pos); + if (write_len < 0) { + tloge("Failed to write to last teemsg version\n"); + return -1; + } + + tlogd("Succeed to Write to last teemsg version, len=%zd\n", write_len); + return 0; +} + +static int write_part_log_to_msg(struct file *filep, + unsigned char *buffer, uint32_t buffer_max_len, loff_t *pos, + uint32_t read_off, uint32_t read_off_end) +{ + struct log_item *next_item = NULL; + uint32_t item_len; + uint32_t total_len = 0; + ssize_t write_len; + + next_item = msg_get_next(buffer, read_off, + LOG_ITEM_MAX_LEN, buffer_max_len); + + while (next_item && read_off <= read_off_end) { + item_len = next_item->buffer_len + sizeof(*next_item); + write_len = kernel_write(filep, next_item->log_buffer, + next_item->real_len, pos); + if (write_len < 0) { + tloge("Failed to write last teemsg %zd\n", write_len); + return -1; + } + + tlogd("Succeed to Write last teemsg, len=%zd\n", write_len); + total_len += item_len; + read_off = (unsigned char *)next_item - buffer + item_len; + if (total_len >= buffer_max_len) + break; + + next_item = msg_get_next(buffer, read_off, + LOG_ITEM_MAX_LEN, buffer_max_len); + } + + return 0; +} + +static int write_log_to_msg(struct file *filep, + unsigned char *buffer, uint32_t buffer_max_len, loff_t *pos, + uint32_t read_off, uint32_t read_off_end) +{ + if (read_off < read_off_end) { + return write_part_log_to_msg(filep, buffer, buffer_max_len, pos, + read_off, read_off_end); + } else { + if (write_part_log_to_msg(filep, buffer, buffer_max_len, pos, + read_off, buffer_max_len) != 0) + return -1; + return write_part_log_to_msg(filep, buffer, buffer_max_len, pos, + 0, read_off_end); + } +} + +#ifdef CONFIG_TEE_LOG_DUMP_PATH +static void update_dumpmsg_offset(uint32_t *read_start, uint32_t *read_end, + uint32_t read_off, uint32_t read_off_end, uint32_t *dump_start_flag, uint32_t *dump_end_flag) +{ + struct log_item *next_item = NULL; + unsigned char *buffer = g_log_buffer->buffer_start; + uint32_t buffer_max_len = g_log_mem_len - sizeof(*g_log_buffer); + ssize_t item_len; + ssize_t total_len = 0; + + next_item = msg_get_next(buffer, read_off, + LOG_ITEM_MAX_LEN, buffer_max_len); + + while (next_item && read_off <= read_off_end) { + item_len = next_item->buffer_len + sizeof(*next_item); + if (strstr(next_item->log_buffer, DUMP_START_MAGIC)) { + *read_start = read_off; + *dump_start_flag = 1; + } else if (strstr(next_item->log_buffer, DUMP_END_MAGIC)) { + *read_end = read_off; + *dump_end_flag = 1; + } + read_off = (unsigned char *)next_item - buffer + item_len; + total_len += item_len; + if (total_len >= buffer_max_len) + break; + + next_item = msg_get_next(buffer, read_off, + LOG_ITEM_MAX_LEN, buffer_max_len); + } +} +#endif + +#ifdef CONFIG_TEE_LOG_DUMP_PATH +static int get_dumpmsg_offset(uint32_t *read_start, uint32_t *read_end) +{ + uint32_t read_off = *read_start; + uint32_t read_off_end = *read_end; + uint32_t buffer_max_len = g_log_mem_len - sizeof(*g_log_buffer); + uint32_t dump_start_flag = 0; + uint32_t dump_end_flag = 0; + + if (read_off < read_off_end) { + update_dumpmsg_offset(read_start, read_end, read_off, read_off_end, + &dump_start_flag, &dump_end_flag); + } else { + update_dumpmsg_offset(read_start, read_end, read_off, buffer_max_len, + &dump_start_flag, &dump_end_flag); + update_dumpmsg_offset(read_start, read_end, 0, read_off_end, + &dump_start_flag, &dump_end_flag); + } + + if (dump_start_flag == 0 || dump_end_flag == 0) { + tloge("can't find dump start or end\n"); + return -1; + } else { + return 0; + } +} +#endif + +static int get_msg_buffer(unsigned char **buffer, uint32_t *buffer_max_len, + uint32_t *read_start, uint32_t *read_end, + const char *file_path, uint32_t file_path_len) +{ + errno_t rc; + int ret; + unsigned char *addr = NULL; + (void)file_path_len; + + if (!g_log_buffer) + return -1; + + *buffer_max_len = g_log_mem_len - sizeof(*g_log_buffer); + + if (*buffer_max_len > LOG_BUFFER_MAX_LEN) + return 0; + + *read_start = 0; + *read_end = *buffer_max_len; +#ifdef CONFIG_TEE_LOG_DUMP_PATH + if (strcmp(file_path, CONFIG_TEE_LOG_DUMP_PATH) == 0) { + *read_start = g_last_read_offset; + *read_end = ((struct log_buffer*)g_log->buffer_info)->flag.last_pos; + if (get_dumpmsg_offset(read_start, read_end) != 0) { + tloge("get dump offset failed\n"); + return -1; + } + } +#else + (void)file_path; +#endif + addr = kmalloc(*buffer_max_len, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)addr)) { + ret = -ENOMEM; + goto free_res; + } + + rc = memcpy_s(addr, *buffer_max_len, g_log_buffer->buffer_start, + *buffer_max_len); + if (rc) { + tloge("memcpy failed %d\n", rc); + ret = -EAGAIN; + goto free_res; + } + + *buffer = addr; + return 0; + +free_res: + if (addr) + kfree(addr); + + return ret; +} + +static int open_msg_file(struct file **file, + const char *file_path, uint32_t file_path_len) +{ + struct file *filep = NULL; + (void)file_path_len; + + filep = filp_open(file_path, O_CREAT | O_RDWR | O_TRUNC, OPEN_FILE_MODE); + if (!filep || IS_ERR(filep)) { + tloge("open last teemsg file err %ld\n", PTR_ERR(filep)); + return -1; + } + + *file = filep; + return 0; +} + +int tlogger_store_msg(const char *file_path, uint32_t file_path_len) +{ + struct file *filep = NULL; + loff_t pos = 0; + int ret; + uint32_t buffer_max_len = 0; + unsigned char *buffer = NULL; + uint32_t read_start = 0; + uint32_t read_end = 0; + + if (!file_path || file_path_len > TEE_LOG_FILE_NAME_MAX) { + tloge("file path is invalid\n"); + return -1; + } + + if (!g_tlogcat_count) { + tlogd("tlogcat count %u\n", g_tlogcat_count); + return 0; + } + + /* copy logs from log memory, then parse the logs */ + ret = get_msg_buffer(&buffer, &buffer_max_len, + &read_start, &read_end, file_path, file_path_len); + if (ret != 0) + return ret; + + /* exception handling, store trustedcore exception info to file */ + ret = open_msg_file(&filep, file_path, file_path_len); + if (ret != 0) + goto free_res; + + ret = tlogger_chown(file_path, file_path_len); + if (ret != 0) + goto free_res; + + ret = write_version_to_msg(filep, &pos); + if (ret != 0) + goto free_res; + + ret = write_log_to_msg(filep, buffer, buffer_max_len, + &pos, read_start, read_end); + +free_res: + if (buffer) { + kfree(buffer); + buffer = NULL; + } + + if (filep != NULL) { + vfs_fsync(filep, 0); + filp_close(filep, 0); + } + + /* trigger write teeos log */ + tz_log_write(); + return ret; +} + +#ifdef DEF_ENG +#define KERNEL_IMG_IS_ENG 1 +#endif +int register_mem_to_teeos(uint64_t mem_addr, uint32_t mem_len, bool is_cache_mem) +{ + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + struct mb_cmd_pack *mb_pack = NULL; + int ret; + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) { + tloge("mailbox alloc failed\n"); + return -ENOMEM; + } + + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.cmd_id = GLOBAL_CMD_ID_REGISTER_LOG_MEM; + mb_pack->operation.paramtypes = teec_param_types( + TEE_PARAM_TYPE_VALUE_INPUT, + TEE_PARAM_TYPE_VALUE_INPUT, + TEE_PARAM_TYPE_VALUE_INPUT, + TEE_PARAM_TYPE_NONE); + mb_pack->operation.params[0].value.a = mem_addr; + mb_pack->operation.params[0].value.b = mem_addr >> ADDR_TRANS_NUM; + mb_pack->operation.params[1].value.a = mem_len; +#ifdef DEF_ENG + mb_pack->operation.params[1].value.b = KERNEL_IMG_IS_ENG; +#endif + /* + * is_cache_mem: true, teeos map this memory for cache + * style; or else map to no cache style + */ + mb_pack->operation.params[2].value.a = is_cache_mem; + + smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; + + if (is_tee_rebooting()) + ret = send_smc_cmd_rebooting(TSP_REQUEST, 0, 0, &smc_cmd); + else + ret = tc_ns_smc(&smc_cmd); + + mailbox_free(mb_pack); + if (ret != 0) + tloge("Send log mem info failed\n"); + + return ret; +} + +static int register_mem_cfg(uint64_t *addr, uint32_t *len) +{ + int ret; + ret = register_log_mem(addr, len); + if (ret != 0) + tloge("register log mem failed %x\n", ret); + + ret = register_log_exception(); + if (ret != 0) + tloge("teeos register exception to log module failed\n"); + + return ret; +} + +static int check_log_mem(uint64_t mem_addr, uint32_t mem_len) +{ + if (mem_len < TEMP_LOG_MEM_SIZE) { + tloge("log mem init error, too small len:0x%x\n", mem_len); + return -1; + } + if (!mem_addr) { + tloge("mem init failed!!! addr is 0\n"); + return -1; + } + return 0; +} + +int register_tloger_mem(void) +{ + int ret; + uint64_t mem_addr = 0; + + ret = register_mem_cfg(&mem_addr, &g_log_mem_len); + if (ret != 0) + return ret; + + ret = check_log_mem(mem_addr, g_log_mem_len); + if (ret != 0) + return ret; + + g_log_buffer = + (struct log_buffer *)map_log_mem(mem_addr, g_log_mem_len); + if (!g_log_buffer) + return -ENOMEM; + + g_log_buffer->flag.max_len = g_log_mem_len - sizeof(*g_log_buffer); + + return ret; +} + +static int register_tloger_device(void) +{ + int ret; + + tlogi("tlogcat version 1.0.0\n"); + ret = register_device(LOGGER_LOG_TEEOS, (uintptr_t)g_log_buffer, + sizeof(*g_log_buffer) + g_log_buffer->flag.max_len); + if (ret != 0) { + unmap_log_mem((int *)g_log_buffer); + g_log_buffer = NULL; + g_log_mem_len = 0; + } + + return ret; +} + +static int register_tloger(void) +{ + int ret; + + ret = register_tloger_mem(); + if (ret != 0) + return ret; + +#ifdef CONFIG_LOG_POOL + ret = init_log_pool(); + if (ret != 0) + tloge("init_log_pool init failed\n"); +#endif + + ret = register_tloger_device(); + + return ret; +} + +static void unregister_mem_cfg(void) +{ + if (g_log_buffer) + unmap_log_mem((int *)g_log_buffer); + + unregister_log_exception(); +} + +static void unregister_tlogger(void) +{ + struct tlogger_log *current_log = NULL; + struct tlogger_log *next_log = NULL; + + list_for_each_entry_safe(current_log, next_log, &m_log_list, logs) { + /* we have to delete all the entry inside m_log_list */ + misc_deregister(¤t_log->misc_device); + kfree(current_log->misc_device.name); + list_del(¤t_log->logs); + kfree(current_log); + } + +#ifdef CONFIG_LOG_POOL + free_log_pool(); +#endif + unregister_mem_cfg(); + g_log_buffer = NULL; + g_log_mem_len = 0; +} + +#ifdef CONFIG_TZDRIVER_MODULE +int init_tlogger_service(void) +{ + return register_tloger(); +} + +void free_tlogger_service(void) +{ + unregister_tlogger(); +} +#else +static int __init init_tlogger_service(void) +{ + return register_tloger(); +} + +static void __exit free_tlogger_service(void) +{ + unregister_tlogger(); +} +#endif + +#ifdef CONFIG_TZDRIVER +device_initcall(init_tlogger_service); +module_exit(free_tlogger_service); + +MODULE_AUTHOR("iTrustee"); +MODULE_DESCRIPTION("TrustCore Logger"); +MODULE_VERSION("1.00"); +#endif diff --git a/tzdriver/tlogger/tlogger.h b/tzdriver/tlogger/tlogger.h new file mode 100644 index 0000000000000000000000000000000000000000..b5dc91a193f1ece0f662b346d137b685dda625d6 --- /dev/null +++ b/tzdriver/tlogger/tlogger.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: TEE Logging Subsystem, read the tee os log from rdr memory + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TLOGGER_H +#define TLOGGER_H + +#include + +#define OPEN_FILE_MODE 0640U +#define ROOT_UID 0 +#define ROOT_GID 0 +#define SYSTEM_GID 1000 +#ifdef LAST_TEE_MSG_ROOT_GID +#define FILE_CHOWN_GID 0 +#else +/* system gid for last_teemsg file sys chown */ +#define FILE_CHOWN_GID 1000 +#endif + +#define UINT64_MAX (uint64_t)(~((uint64_t)0)) /* 0xFFFFFFFFFFFFFFFF */ + +#ifdef CONFIG_TEELOG +void tz_log_write(void); +int tlogger_store_msg(const char *file_path, uint32_t file_path_len); +int register_mem_to_teeos(uint64_t mem_addr, uint32_t mem_len, bool is_cache_mem); + +#ifdef CONFIG_TZDRIVER_MODULE +int init_tlogger_service(void); +void free_tlogger_service(void); +int register_tloger_mem(void); +#endif + +#else +static inline void tz_log_write(void) +{ + return; +} + +static inline int tlogger_store_msg(const char *file_path, uint32_t file_path_len) +{ + (void)file_path; + (void)file_path_len; + return 0; +} +static inline int register_mem_to_teeos(uint64_t mem_addr, uint32_t mem_len, + bool is_cache_mem) +{ + (void)mem_addr; + (void)mem_len; + return 0; +} +static inline int init_tlogger_service(void) +{ + return 0; +} +static inline void free_tlogger_service(void) +{ +} +#endif +#endif diff --git a/tzdriver/tui.h b/tzdriver/tui.h new file mode 120000 index 0000000000000000000000000000000000000000..cbc3001df7d17d1d362ab750491f90e26bb585ec --- /dev/null +++ b/tzdriver/tui.h @@ -0,0 +1 @@ +tui/tui.h \ No newline at end of file diff --git a/tzdriver/tui/Kconfig b/tzdriver/tui/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..1c50679d82f6ff72d53c83281e105fbedc2090c5 --- /dev/null +++ b/tzdriver/tui/Kconfig @@ -0,0 +1,20 @@ +config TEE_TUI + bool "Trusted User Interface Driver" + default n + depends on TZDRIVER + help + Trusted user interface driver + +config TEE_TUI_FP + bool "Trusted User Interface Driver for FP" + default n + depends on TZDRIVER + help + Trusted user interface driver + +config TEE_TUI_DISPLAY_3_0 + bool "Trusted User Interface Driver for DSS3.0" + default n + depends on TEE_TUI + help + Trusted user interface driver diff --git a/tzdriver/tui/Makefile b/tzdriver/tui/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..ad0d6d9571d53309f86072591ff5959a5f040aa4 --- /dev/null +++ b/tzdriver/tui/Makefile @@ -0,0 +1,48 @@ +KERNEL_DIR :=$(srctree) + +ifneq ($(TARGET_BUILD_VARIANT), user) + ccflags-y += -DDEBUG_TUI +endif + +ifeq ($(CONFIG_TZDRIVER_OHOS),y) +EXTRA_CFLAGS += -DTUI_DAEMON_UID_IN_OH=6668 +endif + +ifeq ($(CONFIG_TEE_TUI_MTK), y) +EXTRA_CFLAGS += -Idrivers/tzdriver +EXTRA_CFLAGS += -Idrivers/tzdriver/core + +EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../third_party/bounds_checking_function/include +EXTRA_CFLAGS += -Idrivers/gpu/drm/mediatek +EXTRA_CFLAGS += -Idrivers/misc/mediatek/memory-ssmr +EXTRA_CFLAGS += -Idrivers/devkit/lcdkit/lcdkit3.0/kernel/mtk/include +EXTRA_CFLAGS += -Idrivers/devkit/lcdkit/lcdkit3.0/kernel/common/include +EXTRA_CFLAGS += -Idrivers/devkit/lcdkit/lcdkit3.0/kernel/mtk/adapt +else +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/platform_drivers/tzdriver +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/platform_drivers/tzdriver/core + +ifeq ($(CONFIG_TEE_TUI_DISPLAY_3_0), y) +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/video/dkmd/dksm +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/video/fbdev/dkmd/dksm +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/video/dkmd/dpu/begonia +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/video/fbdev/dkmd/dpu/begonia +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/video/dkmd/dpu/begonia/composer +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/video/fbdev/dkmd/dpu/begonia/composer +EXTRA_CFLAGS += -I$(KERNEL_DIR)/include/platform_include/display/dkmd +else +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/video/dkmd/dpu/azalea +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/video/fbdev/dkmd/dpu/azalea +endif + +endif + +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/platform_drivers/tzdriver/include +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/platform_drivers/tzdriver/ion +EXTRA_CFLAGS += -include internal_functions.h + +ifeq ($(CONFIG_HW_SECMEM), y) +EXTRA_CFLAGS += -Idrivers/uni_drivers/secmem +endif + +obj-$(CONFIG_TEE_TUI) += tui.o \ No newline at end of file diff --git a/tzdriver/tui/tui.c b/tzdriver/tui/tui.c new file mode 100644 index 0000000000000000000000000000000000000000..d4a20515151f889d332a3b1deeda2d1b47b96fb3 --- /dev/null +++ b/tzdriver/tui/tui.c @@ -0,0 +1,2015 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * Decription: tui agent for tui display and interact + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "tui.h" +#include +#include +#if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE) +#include +#include +#else +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef CONFIG_DMABUF_MM +#include +#endif +#include +#include +#include +#include +#include +#ifdef CONFIG_TEE_TUI_MTK +#include +#include +#include +#endif +/* add for CMA malloc framebuffer */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "teek_client_constants.h" +#include "agent.h" +#include "mem.h" +#include "teek_ns_client.h" +#include "smc_smp.h" +#include "tc_ns_client.h" +#include "tc_ns_log.h" +#include "mailbox_mempool.h" +#ifndef CONFIG_TEE_TUI_MTK +#include +#ifdef CONFIG_DMABUF_MM +#include +#else +#include +#endif +#ifdef CONFIG_TEE_TUI_DISPLAY_3_0 +#include "dpu_comp_mgr.h" +#else +#include +#endif +#endif +#include "dynamic_ion_mem.h" +#ifdef CONFIG_TEE_TUI_MTK +#include "teek_client_type.h" +#include "teek_client_api.h" +#include +#include +#ifdef CONFIG_HW_SECMEM +#include "secmem_api.h" +#endif +#ifdef CONFIG_ITRUSTEE_TRUSTED_UI +#include +#endif + +#ifdef CONFIG_HW_COMB_KEY +#include +#endif + +#ifndef CONFIG_ITRUSTEE_TRUSTED_UI +#include +struct mtk_fb_data_type { + bool panel_power_on; + struct mtk_panel_info panel_info; +}; +#endif +#endif +#include "internal_functions.h" + +static void tui_poweroff_work_func(struct work_struct *work); +static ssize_t tui_status_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); +static void tui_msg_del(const char *name); +static DECLARE_DELAYED_WORK(tui_poweroff_work, tui_poweroff_work_func); +static struct kobject *g_tui_kobj = NULL; +static struct kobj_attribute tui_attribute = + __ATTR(c_state, 0440, tui_status_show, NULL); +static struct attribute *attrs[] = { + &tui_attribute.attr, + NULL, +}; + +static struct attribute_group g_tui_attr_group = { + .attrs = attrs, +}; + +DEFINE_MUTEX(g_tui_drv_lock); +static struct task_struct *g_tui_task = NULL; +static struct tui_ctl_shm *g_tui_ctl = NULL; +static atomic_t g_tui_usage = ATOMIC_INIT(0); +static atomic_t g_tui_state = ATOMIC_INIT(TUI_STATE_UNUSED); +static struct list_head g_tui_drv_head = LIST_HEAD_INIT(g_tui_drv_head); +static atomic_t g_tui_attached_device = ATOMIC_INIT(TUI_PID_CLEAR); +static atomic_t g_tui_pid = ATOMIC_INIT(TUI_PID_CLEAR); +static bool g_normal_load_flag = false; + +static spinlock_t g_tui_msg_lock; +static struct list_head g_tui_msg_head; +static wait_queue_head_t g_tui_state_wq; +static int g_tui_state_flag; +static wait_queue_head_t g_tui_msg_wq; +static int32_t g_tui_msg_flag; +#ifdef CONFIG_TEE_TUI_MTK +static struct mtk_fb_data_type *g_dss_fd; +#elif defined CONFIG_TEE_TUI_DISPLAY_3_0 +static struct dpu_composer *g_dss_fd; +#else +static struct hisi_fb_data_type *g_dss_fd; +#endif +#define TUI_DSS_NAME "DSS" +#define TUI_GPIO_NAME "fff0d000.gpio" +#define TUI_TP_NAME "tp" +#define TUI_FP_NAME "fp" + +/* EMUI 11.1 need use the ttf of HarmonyOSHans.ttf */ +#define TTF_NORMAL_BUFF_SIZE (20 * 1024 * 1024) + +#ifdef TUI_DAEMON_UID_IN_OH +#define TTF_NORMAL_FILE_PATH "/system/fonts/HarmonyOS_Sans_SC_Regular.ttf" +#else +#define TTF_NORMAL_FILE_PATH "/system/fonts/HarmonyOS_Sans_SC.ttf" +#endif + +/* 2M memory size is 2^21 */ +#define ALIGN_SIZE 21 +#define ALIGN_M (1 << 21) +#define MAX_SCREEN_RESOLUTION 8192 +#define TP_BASE_VALUE 10 + +/* dss and tp couple mode: 0 is init dss and tp; 1 is only init dss; 2 is only init tp */ +#define DSS_TP_COUPLE_MODE 0 +#define NORMAL_MODE 0 /* init all driver */ +#define ONLY_INIT_DSS 1 /* only init dss */ +#define ONLY_INIT_TP 2 /* only init tp */ + +/* + * do fp init(disable fp irq) before gpio init in order not response + * sensor in normal world(when gpio secure status is set) + */ +#if ONLY_INIT_DSS == DSS_TP_COUPLE_MODE +#define DRIVER_NUM 1 +static char *g_init_driver[DRIVER_NUM] = {TUI_DSS_NAME}; +static char *g_deinit_driver[DRIVER_NUM] = {TUI_DSS_NAME}; +#endif + +#if ONLY_INIT_TP == DSS_TP_COUPLE_MODE +#define DRIVER_NUM 3 +static char *g_init_driver[DRIVER_NUM] = {TUI_TP_NAME, TUI_FP_NAME, TUI_GPIO_NAME}; +static char *g_deinit_driver[DRIVER_NUM] = {TUI_TP_NAME, TUI_FP_NAME, TUI_GPIO_NAME}; +#endif + +#if NORMAL_MODE == DSS_TP_COUPLE_MODE +#define DRIVER_NUM 4 +static char *g_init_driver[DRIVER_NUM] = {TUI_DSS_NAME, TUI_TP_NAME, TUI_FP_NAME, TUI_GPIO_NAME}; +static char *g_deinit_driver[DRIVER_NUM] = {TUI_DSS_NAME, TUI_TP_NAME, TUI_FP_NAME, TUI_GPIO_NAME}; +#endif + +#define TIME_OUT_FOWER_ON 100 +#define DOWN_VAL 22 /* 4M */ +#define UP_VAL 27 /* 64M */ +#define COLOR_TYPE 4 /* ARGB */ +#define BUFFER_NUM 2 +#define UID_MAX_VAL 1000 +#define HIGH_VALUES 32 +#define ION_NENTS_FLAG 1 +#define INVALID_CFG_NAME (-2) + +static tui_ion_mem g_tui_display_mem; +static tui_ion_mem g_normal_font_mem; + +unsigned int get_frame_size(unsigned int num) +{ + if (num % ALIGN_M != 0) + return (((num >> ALIGN_SIZE) + 1) << ALIGN_SIZE); + else + return num; +} + +unsigned int get_tui_size(unsigned int num) +{ + unsigned int i; + for (i = DOWN_VAL; i < UP_VAL; i++) + if ((num >> i) == 0) + break; + return (unsigned int)1 << i; +} + +/* + * alloc order: 4M-font, framebuffer, 20M-unusualfont + * 1.4M alloc when boot so from ION_TUI_HEAP_ID + * 2.20M and frambuffer alloc when tui init so from ION_MISC_HEAP_ID + */ +static size_t get_tui_font_file_size(void) +{ + int ret; + struct kstat ttf_file_stat; + mm_segment_t old_fs; + + old_fs = get_fs(); + set_fs(KERNEL_DS); + /* get ttf file size */ + ret = vfs_stat(TTF_NORMAL_FILE_PATH, &ttf_file_stat); + if (ret < 0) { + tloge("Failed to get ttf extend file size, ret is %d\n", ret); + set_fs(old_fs); + return 0; + } + set_fs(old_fs); + + return ttf_file_stat.size; +} + +static int check_ion_sg_table(const struct sg_table *sg_table) +{ + if (sg_table == NULL) { + tloge("invalid sgtable\n"); + return -1; + } + + /* nent must be 1, because ion addr for tui is continuous */ + if (sg_table->nents != ION_NENTS_FLAG) { + tloge("nent is invalid\n"); + return -1; + } + return 0; +} + +static int get_tui_ion_sglist(tui_ion_mem *tui_mem) +{ + struct sglist *tmp_tui_sglist = NULL; + struct scatterlist *tui_sg = NULL; + struct page *tui_page = NULL; + uint32_t tui_sglist_size; + uint32_t i = 0; + + struct sg_table *tui_ion_sg_table = tui_mem->tui_sg_table; + if (check_ion_sg_table(tui_ion_sg_table) != 0) + return -1; + + tui_sglist_size = sizeof(struct ion_page_info) * tui_ion_sg_table->nents + + sizeof(struct sglist); + tmp_tui_sglist = (struct sglist *)mailbox_alloc(tui_sglist_size, MB_FLAG_ZERO); + if (tmp_tui_sglist == NULL) { + tloge("in %s err: mailbox_alloc failed\n", __func__); + return -1; + } + + tmp_tui_sglist->sglist_size = (uint64_t)tui_sglist_size; + tmp_tui_sglist->ion_size = (uint64_t)tui_mem->len; + tmp_tui_sglist->info_length = (uint64_t)tui_ion_sg_table->nents; + tui_mem->info_length = (uint64_t)tui_ion_sg_table->nents; + + /* get tui_sg to fetch ion for tui */ + for_each_sg(tui_ion_sg_table->sgl, tui_sg, tui_ion_sg_table->nents, i) { + if (tui_sg == NULL) { + tloge("tui sg is NULL"); + mailbox_free(tmp_tui_sglist); + return -1; + } + tui_page = sg_page(tui_sg); + tmp_tui_sglist->page_info[0].phys_addr = page_to_phys(tui_page); + tmp_tui_sglist->page_info[0].npages = tui_sg->length / PAGE_SIZE; + tui_mem->npages = tmp_tui_sglist->page_info[0].npages; + tui_mem->tui_ion_virt_addr = phys_to_virt(tmp_tui_sglist->page_info[0].phys_addr); + tui_mem->fb_phys_addr = tmp_tui_sglist->page_info[0].phys_addr; + } + + tui_mem->tui_ion_phys_addr = mailbox_virt_to_phys((uintptr_t)(void *)tmp_tui_sglist); // sglist phys-addr + if (tui_mem->tui_ion_phys_addr == 0) { + tloge("Failed to get tmp_tui_sglist physaddr, configid=%d\n", + tui_mem->configid); + mailbox_free(tmp_tui_sglist); + return -1; + } + tui_mem->size = tui_sglist_size; + return 0; +} + +static int alloc_ion_mem(tui_ion_mem *tui_mem) +{ + struct sg_table *tui_ion_sg_table = NULL; + if (tui_mem == NULL) { + tloge("bad input params\n"); + return -1; + } +#ifdef CONFIG_HW_SECMEM + tui_ion_sg_table = cma_secmem_alloc(SEC_TUI, tui_mem->len); +#endif +#ifndef CONFIG_TEE_TUI_MTK + tui_ion_sg_table = mm_secmem_alloc(SEC_TUI, tui_mem->len); +#endif + if (tui_ion_sg_table == NULL) { + tloge("failed to get ion page for tui, configid is %d\n", tui_mem->configid); + return -1; + } + tui_mem->tui_sg_table = tui_ion_sg_table; + return 0; +} + +static void free_ion_mem(tui_ion_mem *tui_mem) +{ + if (tui_mem->tui_sg_table == NULL || tui_mem->tui_ion_phys_addr == 0) { + tloge("bad input params\n"); + return; + } +#ifdef CONFIG_HW_SECMEM + cma_secmem_free(SEC_TUI, tui_mem->tui_sg_table); +#endif +#ifndef CONFIG_TEE_TUI_MTK + mm_secmem_free(SEC_TUI, tui_mem->tui_sg_table); +#endif + tui_mem->tui_ion_phys_addr = 0; + return; +} + +static void free_tui_font_mem(void) +{ + free_ion_mem(&g_normal_font_mem); + g_normal_load_flag = false; + tloge("normal tui font file freed\n"); +} + +static int get_tui_font_mem(tui_ion_mem *tui_font_mem) +{ + int ret; + + ret = alloc_ion_mem(tui_font_mem); + if (ret < 0) { + tloge("Failed to alloc cma mem for tui font lib\n"); + return -ENOMEM; + } + + return 0; +} + +/* size is calculated dynamically according to the screen resolution */ +#ifdef CONFIG_TEE_TUI_DISPLAY_3_0 +static phys_addr_t get_frame_addr(void) +{ + int screen_r; + int ret; + bool check_params = false; + if (g_dss_fd == NULL) + return 0; + + check_params = (g_dss_fd->comp.base.xres > MAX_SCREEN_RESOLUTION) || + (g_dss_fd->comp.base.yres > MAX_SCREEN_RESOLUTION); + if (check_params) { + tloge("Horizontal resolution or Vertical resolution is too large\n"); + return 0; + } + screen_r = g_dss_fd->comp.base.xres * g_dss_fd->comp.base.yres * COLOR_TYPE * BUFFER_NUM; + g_tui_display_mem.len = get_frame_size(screen_r); + ret = alloc_ion_mem(&g_tui_display_mem); + if (ret) { + tloge("Failed to alloc mem for tui display\n"); + return 0; + } + + if (get_tui_ion_sglist(&g_tui_display_mem)) { + tloge("get sglist failed\n"); + free_ion_mem(&g_tui_display_mem); + return 0; + } + + return g_tui_display_mem.fb_phys_addr; +} +#else +static phys_addr_t get_frame_addr(void) +{ + int screen_r; + int ret; + bool check_params = false; + if (g_dss_fd == NULL) + return 0; + + check_params = (g_dss_fd->panel_info.xres > MAX_SCREEN_RESOLUTION) || + (g_dss_fd->panel_info.yres > MAX_SCREEN_RESOLUTION); + if (check_params) { + tloge("Horizontal resolution or Vertical resolution is too large\n"); + return 0; + } + screen_r = g_dss_fd->panel_info.xres * g_dss_fd->panel_info.yres * COLOR_TYPE * BUFFER_NUM; + g_tui_display_mem.len = get_frame_size(screen_r); + ret = alloc_ion_mem(&g_tui_display_mem); + if (ret != 0) { + tloge("Failed to alloc mem for tui display\n"); + return 0; + } + + if (get_tui_ion_sglist(&g_tui_display_mem) != 0) { + tloge("get sglist failed\n"); + free_ion_mem(&g_tui_display_mem); + return 0; + } + + return g_tui_display_mem.fb_phys_addr; +} +#endif + +void free_frame_addr(void) +{ + mailbox_free(phys_to_virt(g_tui_display_mem.tui_ion_phys_addr)); + free_ion_mem(&g_tui_display_mem); + return; +} + +static int32_t tc_ns_register_tui_font_mem(tui_ion_mem *tui_font_mem, + size_t font_file_size) +{ + struct tc_ns_smc_cmd smc_cmd = { {0}, 0}; + int ret = 0; + struct mb_cmd_pack *mb_pack = NULL; + + mb_pack = mailbox_alloc_cmd_pack(); + if (!mb_pack) { + tloge("alloc cmd pack failed\n"); + return -ENOMEM; + } + + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.cmd_id = GLOBAL_CMD_ID_REGISTER_TTF_MEM; + + mb_pack->operation.paramtypes = teec_param_types( + TEEC_MEMREF_TEMP_INPUT, + TEEC_VALUE_INPUT, + TEEC_NONE, + TEEC_NONE + ); + + mb_pack->operation.params[0].memref.size = (uint32_t)(tui_font_mem->size); + mb_pack->operation.params[0].memref.buffer = (uint32_t)(tui_font_mem->tui_ion_phys_addr & 0xFFFFFFFF); + mb_pack->operation.buffer_h_addr[0] = tui_font_mem->tui_ion_phys_addr >> HIGH_VALUES; + mb_pack->operation.params[1].value.a = font_file_size; + + smc_cmd.operation_phys = (unsigned int)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> HIGH_VALUES; + if (tc_ns_smc(&smc_cmd)) { + ret = -EPERM; + tloge("send ttf mem info failed. ret = 0x%x\n", smc_cmd.ret_val); + } + mailbox_free(mb_pack); + + return ret; +} + +static int32_t copy_tui_font_file(size_t font_file_size, const void *font_virt_addr) +{ + struct file *filep = NULL; + mm_segment_t old_fs; + loff_t pos = 0; + unsigned int count; + int ret = 0; + + if (font_virt_addr == NULL) + return -1; + + filep = filp_open(TTF_NORMAL_FILE_PATH, O_RDONLY, 0); + if (IS_ERR(filep) || filep == NULL) { + tloge("Failed to open ttf file\n"); + return -1; + } + + old_fs = get_fs(); + set_fs(KERNEL_DS); + + count = (unsigned int)vfs_read(filep, (char __user *)font_virt_addr, font_file_size, &pos); + + if (font_file_size != count) { + tloge("read ttf file failed\n"); + ret = -1; + } + + set_fs(old_fs); + filp_close(filep, 0); + filep = NULL; + return ret; +} + +static int32_t send_ttf_mem(tui_ion_mem *tui_ttf_mem) +{ + int ret; + size_t tui_font_file_size; + bool check_params = false; + + tui_font_file_size = get_tui_font_file_size(); + check_params = (tui_font_file_size == 0) || (tui_font_file_size > tui_ttf_mem->len); + if (check_params) { + tloge("Failed to get the tui font file size or the tui_font_file_size is too big\n"); + return -1; + } + + __dma_map_area(tui_ttf_mem->tui_ion_virt_addr, tui_ttf_mem->len, DMA_BIDIRECTIONAL); + ret = copy_tui_font_file(tui_font_file_size, tui_ttf_mem->tui_ion_virt_addr); + if (ret < 0) { + tloge("Failed to do ttf file copy\n"); + return -1; + } + + __dma_map_area(tui_ttf_mem->tui_ion_virt_addr, tui_ttf_mem->len, DMA_BIDIRECTIONAL); + __dma_map_area(tui_ttf_mem->tui_ion_virt_addr, tui_ttf_mem->len, DMA_FROM_DEVICE); + + ret = tc_ns_register_tui_font_mem(tui_ttf_mem, tui_font_file_size); + if (ret != 0) { + tloge("Failed to do ttf file register ret is 0x%x\n", ret); + return -1; + } + + return 0; +} + +static int32_t load_tui_font_file(void) +{ + int ret = 0; + tui_ion_mem *tui_ttf_mem = NULL; + + tloge("====load ttf start =====\n"); + + mutex_lock(&g_tui_drv_lock); + if (g_normal_load_flag) { + tloge("normal tui font file has been loaded\n"); + mutex_unlock(&g_tui_drv_lock); + return 0; + } + + g_normal_font_mem.len = TTF_NORMAL_BUFF_SIZE; + ret = get_tui_font_mem(&g_normal_font_mem); + tui_ttf_mem = &g_normal_font_mem; + if (ret != 0) { + tloge("Failed to get tui font memory\n"); + mutex_unlock(&g_tui_drv_lock); + return -1; + } + + if (get_tui_ion_sglist(tui_ttf_mem) != 0) { + tloge("get tui sglist failed\n"); + free_tui_font_mem(); + mutex_unlock(&g_tui_drv_lock); + return -1; + } + + ret = send_ttf_mem(tui_ttf_mem); + if (ret != 0) { + mailbox_free(phys_to_virt(tui_ttf_mem->tui_ion_phys_addr)); + free_tui_font_mem(); + mutex_unlock(&g_tui_drv_lock); + return -1; + } + + tloge("normal tui font file loaded\n"); + g_normal_load_flag = true; + mutex_unlock(&g_tui_drv_lock); + + mailbox_free(phys_to_virt(tui_ttf_mem->tui_ion_phys_addr)); + tloge("=====load ttf end=====\n"); + return ret; +} + +int register_tui_driver(tui_drv_init fun, const char *name, + void *pdata) +{ + struct tui_drv_node *tui_drv = NULL; + struct tui_drv_node *pos = NULL; + + /* Return error if name is invalid */ + if (name == NULL || fun == NULL) { + tloge("name or func is null"); + return -EINVAL; + } + + if (strncmp(name, TUI_DSS_NAME, (size_t)TUI_DRV_NAME_MAX) == 0) { + if (pdata == NULL) + return -1; + else +#ifdef CONFIG_TEE_TUI_MTK + g_dss_fd = (struct mtk_fb_data_type *)pdata; +#elif defined CONFIG_TEE_TUI_DISPLAY_3_0 + g_dss_fd = (struct dpu_composer *)pdata; +#else + g_dss_fd = (struct hisi_fb_data_type *)pdata; +#endif + } + + if ((strncmp(name, TUI_TP_NAME, (size_t)TUI_DRV_NAME_MAX) == 0) && pdata == NULL) + return -1; + + mutex_lock(&g_tui_drv_lock); + + /* name should not have been registered */ + list_for_each_entry(pos, &g_tui_drv_head, list) { + if (!strncmp(pos->name, name, TUI_DRV_NAME_MAX - 1)) { + tloge("this drv(%s) have registered\n", name); + mutex_unlock(&g_tui_drv_lock); + return -EINVAL; + } + } + mutex_unlock(&g_tui_drv_lock); + + /* Alllovate memory for tui_drv */ + tui_drv = kzalloc(sizeof(struct tui_drv_node), GFP_KERNEL); + if (tui_drv == NULL) + return -ENOMEM; + + if (memset_s(tui_drv, sizeof(struct tui_drv_node), 0, sizeof(struct tui_drv_node)) != 0) { + tloge("tui_drv memset failed"); + kfree(tui_drv); + return -1; + } + /* Assign content for tui_drv */ + tui_drv->init_func = fun; + tui_drv->pdata = pdata; + + if (strncpy_s(tui_drv->name, TUI_DRV_NAME_MAX, name, TUI_DRV_NAME_MAX - 1) != 0) { + tloge("strncpy_s error\n"); + kfree(tui_drv); + return -1; + } + + INIT_LIST_HEAD(&tui_drv->list); + + /* link the new tui_drv to the list */ + mutex_lock(&g_tui_drv_lock); + list_add_tail(&tui_drv->list, &g_tui_drv_head); + mutex_unlock(&g_tui_drv_lock); + + return 0; +} +EXPORT_SYMBOL(register_tui_driver); + +void unregister_tui_driver(const char *name) +{ + struct tui_drv_node *pos = NULL, *tmp = NULL; + + /* Return error if name is invalid */ + if (name == NULL) { + tloge("name is null"); + return; + } + + mutex_lock(&g_tui_drv_lock); + list_for_each_entry_safe(pos, tmp, &g_tui_drv_head, list) { + if (!strncmp(pos->name, name, TUI_DRV_NAME_MAX)) { + list_del(&pos->list); + kfree(pos); + break; + } + } + mutex_unlock(&g_tui_drv_lock); +} +EXPORT_SYMBOL(unregister_tui_driver); + +static int add_tui_msg(int type, int val, void *data) +{ + struct tui_msg_node *tui_msg = NULL; + unsigned long flags; + + /* Return error if pdata is invalid */ + if (data == NULL) { + tloge("data is null"); + return -EINVAL; + } + + /* Allocate memory for tui_msg */ + tui_msg = kzalloc(sizeof(*tui_msg), GFP_KERNEL); + if (tui_msg == NULL) + return -ENOMEM; + + if (memset_s(tui_msg, sizeof(*tui_msg), 0, sizeof(*tui_msg)) != 0) { + tloge("tui_msg memset failed"); + kfree(tui_msg); + return -1; + } + + /* Assign the content of tui_msg */ + tui_msg->type = type; + tui_msg->val = val; + tui_msg->data = data; + INIT_LIST_HEAD(&tui_msg->list); + + /* Link the new tui_msg to the list */ + spin_lock_irqsave(&g_tui_msg_lock, flags); + list_add_tail(&tui_msg->list, &g_tui_msg_head); + g_tui_msg_flag = 1; + spin_unlock_irqrestore(&g_tui_msg_lock, flags); + return 0; +} + +static int32_t init_each_tui_driver(struct tui_drv_node *pos, int32_t secure) +{ + if (secure == 0) { + tlogi("drv(%s) state=%d,%d\n", pos->name, secure, pos->state); + if (pos->state == 0) + return 0; + if (pos->init_func(pos->pdata, secure) != 0) + pos->state = -1; /* Process init_func() fail */ + + /* set secure state will be proceed in tui msg */ + pos->state = 0; + } else { + tlogi("init tui drv(%s) state=%d\n", pos->name, secure); + /* when init, tp and dss should be async */ + if (pos->init_func(pos->pdata, secure) != 0) { + pos->state = -1; + return -1; + } else { +#ifndef CONFIG_TEE_TUI_MTK + if (strncmp(TUI_DSS_NAME, pos->name, TUI_DRV_NAME_MAX) != 0) +#endif + pos->state = 1; + } + } + return 0; +} + +enum tui_driver_env { + UNSECURE_ENV = 0, + SECURE_ENV = 1, +}; + +#define WAIT_POWER_ON_SLEEP_SPAN 20 +static int init_tui_dss_msg(const struct tui_drv_node *pos, int secure, int *count) +{ + if ((strncmp(TUI_DSS_NAME, pos->name, TUI_DRV_NAME_MAX) == 0) && (secure != 0)) { + tloge("init_tui_driver wait power on status---\n"); +#ifdef CONFIG_TEE_TUI_DISPLAY_3_0 + while (!g_dss_fd->comp.power_on && (*count) < TIME_OUT_FOWER_ON) { +#else + while (!g_dss_fd->panel_power_on && (*count) < TIME_OUT_FOWER_ON) { +#endif + (*count)++; + msleep(WAIT_POWER_ON_SLEEP_SPAN); + } + if ((*count) == TIME_OUT_FOWER_ON) { + /* Time out. So return error. */ + tloge("wait status time out\n"); + return -1; + } + spin_lock(&g_tui_msg_lock); + tui_msg_del(TUI_DSS_NAME); + spin_unlock(&g_tui_msg_lock); + } + return 0; +} + +static bool is_dss_registered(void) +{ + struct tui_drv_node *pos = NULL; +#if ONLY_INIT_TP == DSS_TP_COUPLE_MODE + return true; +#endif + list_for_each_entry(pos, &g_tui_drv_head, list) { + if (strncmp(TUI_DSS_NAME, pos->name, TUI_DRV_NAME_MAX) == 0) + return true; + } + return false; +} + +/* WARNING: Too many leading tabs - consider code refactoring */ +/* secure : 0-unsecure, 1-secure */ +static int init_tui_driver(int secure) +{ + struct tui_drv_node *pos = NULL; + char *drv_name = NULL; + char **drv_array = g_deinit_driver; + int count = 0; + int i = 0; + int ret = 0; + if (g_dss_fd == NULL) + return -1; + + if (secure != 0) + drv_array = g_init_driver; + + while (i < DRIVER_NUM) { + drv_name = drv_array[i]; + i++; + mutex_lock(&g_tui_drv_lock); + + if (!is_dss_registered()) { + tloge("dss not registered\n"); + mutex_unlock(&g_tui_drv_lock); + return -1; + } + + /* Search all the tui_drv in their list */ + list_for_each_entry(pos, &g_tui_drv_head, list) { + if (strncmp(drv_name, pos->name, TUI_DRV_NAME_MAX) != 0) + continue; + + if (!strncmp(TUI_TP_NAME, pos->name, TUI_DRV_NAME_MAX)) { + /* If the name is "tp", assign pos->pdata to g_tui_ctl */ + g_tui_ctl->n2s.tp_info = (int)virt_to_phys(pos->pdata); + g_tui_ctl->n2s.tp_info_h_addr = virt_to_phys(pos->pdata) >> HIGH_VALUES; + } + if (pos->init_func == 0) + continue; + + ret = init_tui_dss_msg(pos, secure, &count); + if (ret != 0) { + mutex_unlock(&g_tui_drv_lock); + return ret; + } + + if (init_each_tui_driver(pos, secure) != 0) { + mutex_unlock(&g_tui_drv_lock); + return -1; + } + } + mutex_unlock(&g_tui_drv_lock); + } + + return 0; +} + +/* Only after all drivers cfg ok or some one failed, it need + * to add_tui_msg. + * ret val: 1 - all cfg ok + * 0 - cfg is not complete, or have done + * -1 - cfg failed + * -2 - invalid name + */ +static int tui_cfg_filter(const char *name, bool ok) +{ + struct tui_drv_node *pos = NULL; + int find = 0; + int lock_flag = 0; + + /* Return error if name is invalid */ + if (name == NULL) { + tloge("name is null"); + return INVALID_CFG_NAME; + } + + /* some drivers may call send_tui_msg_config at the end + * of drv_init_func which had got the lock. + */ + if (mutex_is_locked(&g_tui_drv_lock)) + lock_flag = 1; + if (!lock_flag) + mutex_lock(&g_tui_drv_lock); + list_for_each_entry(pos, &g_tui_drv_head, list) { + if (strncmp(pos->name, name, TUI_DRV_NAME_MAX) != 0) + continue; + + find = 1; + if (ok) { + pos->state = 1; + } else { + if (!lock_flag) + mutex_unlock(&g_tui_drv_lock); + return -1; + } + } + if (!lock_flag) + mutex_unlock(&g_tui_drv_lock); + + if (find == 0) + return INVALID_CFG_NAME; + + return 1; +} + +enum poll_class { + CLASS_POLL_CONFIG, + CLASS_POLL_RUNNING, + CLASS_POLL_COMMON, + CLASS_POLL_INVALID +}; + +static enum poll_class tui_poll_class(int event_type) +{ + enum poll_class class = CLASS_POLL_INVALID; + + switch (event_type) { + case TUI_POLL_CFG_OK: + case TUI_POLL_CFG_FAIL: + case TUI_POLL_RESUME_TUI: + class = CLASS_POLL_CONFIG; + break; + case TUI_POLL_TP: + case TUI_POLL_TICK: + case TUI_POLL_DELAYED_WORK: + class = CLASS_POLL_RUNNING; + break; + case TUI_POLL_CANCEL: + class = CLASS_POLL_COMMON; + break; + default: + break; + } + return class; +} + +int send_tui_msg_config(int type, int val, void *data) +{ + int ret; + + if (type >= TUI_POLL_MAX || type < 0 || data == NULL) { + tloge("invalid tui event type\n"); + return -EINVAL; + } + + /* The g_tui_state should be CONFIG */ + if (atomic_read(&g_tui_state) != TUI_STATE_CONFIG) { + tloge("failed to send tui msg(%s)\n", poll_event_type_name[type]); + return -EINVAL; + } + + if (tui_poll_class(type) == CLASS_POLL_RUNNING) { + tloge("invalid tui event type(%s) in config state\n", poll_event_type_name[type]); + return -EINVAL; + } + + tlogi("send config event type %s(%s)\n", poll_event_type_name[type], (char *)data); + + if (type == TUI_POLL_CFG_OK || type == TUI_POLL_CFG_FAIL) { + int cfg_ret; + + cfg_ret = tui_cfg_filter((const char *)data, TUI_POLL_CFG_OK == type); + tlogd("tui driver(%s) cfg ret = %d\n", (char *)data, cfg_ret); + if (cfg_ret == INVALID_CFG_NAME) { + tloge("tui cfg filter failed, cfg_ret = %d\n", cfg_ret); + return -EINVAL; + } + } + + ret = add_tui_msg(type, val, data); + if (ret != 0) { + tloge("add tui msg ret=%d\n", ret); + return ret; + } + + tlogi("add config msg type %s\n", poll_event_type_name[type]); + + /* wake up tui kthread */ + wake_up(&g_tui_msg_wq); + + return 0; +} + +#define make32(high, low) ((((uint32_t)(high)) << 16) | (uint16_t)(low)) + +static bool package_notch_msg(struct mb_cmd_pack *mb_pack, uint8_t **buf_to_tee, + struct teec_tui_parameter *tui_param) +{ + uint32_t buf_len = sizeof(*tui_param) - sizeof(tui_param->event_type); + *buf_to_tee = mailbox_alloc(buf_len, 0); + if (*buf_to_tee == NULL) { + tloge("failed to alloc memory!\n"); + return false; + } + if (memcpy_s(*buf_to_tee, buf_len, &tui_param->value, + sizeof(*tui_param) - sizeof(tui_param->event_type)) != EOK) { + tloge("copy notch data failed"); + mailbox_free(*buf_to_tee); + return false; + } + mb_pack->operation.paramtypes = + TEE_PARAM_TYPE_VALUE_INPUT | + (TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM); + mb_pack->operation.params[0].value.a = + (uint32_t)mailbox_virt_to_phys((uintptr_t)*buf_to_tee); + mb_pack->operation.params[0].value.b = + (uint64_t)mailbox_virt_to_phys((uintptr_t)*buf_to_tee) >> ADDR_TRANS_NUM; + mb_pack->operation.params[1].value.a = buf_len; + return true; +} + +static void package_fold_msg(struct mb_cmd_pack *mb_pack, + const struct teec_tui_parameter *tui_param) +{ + mb_pack->operation.paramtypes = teec_param_types(TEE_PARAM_TYPE_VALUE_INPUT, + TEE_PARAM_TYPE_VALUE_INPUT, + TEE_PARAM_TYPE_VALUE_INPUT, + TEE_PARAM_TYPE_VALUE_INPUT); + mb_pack->operation.params[0].value.a = tui_param->notch; +#ifdef CONFIG_TEE_TUI_DISPLAY_3_0 + mb_pack->operation.params[0].value.b = make32(g_dss_fd->comp.base.xres, g_dss_fd->comp.base.yres); +#else + mb_pack->operation.params[0].value.b = make32(g_dss_fd->panel_info.xres, g_dss_fd->panel_info.yres); +#endif + mb_pack->operation.params[1].value.a = tui_param->phy_width; + mb_pack->operation.params[1].value.b = tui_param->phy_height; + mb_pack->operation.params[2].value.a = tui_param->width; + mb_pack->operation.params[2].value.b = tui_param->height; + mb_pack->operation.params[3].value.a = tui_param->fold_state; + mb_pack->operation.params[3].value.b = tui_param->display_state; +} + +static bool check_uid_valid(uint32_t uid) +{ +#ifdef TUI_DAEMON_UID_IN_OH + return (uid == TUI_DAEMON_UID_IN_OH || uid == 0); +#else + return uid <= UID_MAX_VAL; +#endif +} + +static int32_t tui_send_smc_cmd(int32_t event, struct mb_cmd_pack *mb_pack, struct tc_ns_smc_cmd smc_cmd) +{ + uint32_t uid; + kuid_t kuid; + + kuid = current_uid(); + uid = kuid.val; + + if (check_uid_valid(uid) == false) { + tloge("get invalid uid = %d\n", uid); + return -1; + } + + if ((event != TUI_POLL_CANCEL) && (event != TUI_POLL_NOTCH) && (event != TUI_POLL_FOLD)) { + tloge("no permission to send msg\n"); + return -1; + } + + smc_cmd.cmd_type = CMD_TYPE_GLOBAL; + smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> HIGH_VALUES; + smc_cmd.agent_id = event; + smc_cmd.uid = uid; + livepatch_down_read_sem(); + int32_t ret = tc_ns_smc(&smc_cmd); + livepatch_up_read_sem(); + if (ret != 0) { + tloge("tc ns smc fail 0x%x", ret); + return ret; + } + + return 0; +} + +/* Send tui event by smc_cmd */ +int tui_send_event(int event, struct teec_tui_parameter *tui_param) +{ + int status_temp; + bool check_value = false; + uint8_t *buf_to_tee = NULL; + + if (tui_param == NULL) + return -1; + + if (event == TUI_POLL_NOTCH) { + check_value = true; + } else { + if (g_dss_fd == NULL) + return -1; + + status_temp = atomic_read(&g_tui_state); +#ifdef CONFIG_TEE_TUI_DISPLAY_3_0 + check_value = (status_temp != TUI_STATE_UNUSED && g_dss_fd->comp.power_on) || event == TUI_POLL_FOLD; +#else + check_value = (status_temp != TUI_STATE_UNUSED && g_dss_fd->panel_power_on) || event == TUI_POLL_FOLD; +#endif + } + + if (check_value) { + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + struct mb_cmd_pack *mb_pack = NULL; + int ret = 0; + + mb_pack = mailbox_alloc_cmd_pack(); + if (mb_pack == NULL) { + tloge("alloc cmd pack failed\n"); + return -1; + } + + switch (event) { + case TUI_POLL_CANCEL: + smc_cmd.cmd_id = GLOBAL_CMD_ID_TUI_EXCEPTION; + break; + case TUI_POLL_NOTCH: + if (!package_notch_msg(mb_pack, &buf_to_tee, + tui_param)) { + mailbox_free(mb_pack); + tloge("package notch msg failed\n"); + return -1; + } + smc_cmd.cmd_id = GLOBAL_CMD_ID_TUI_NOTCH; + break; + case TUI_POLL_FOLD: + package_fold_msg(mb_pack, tui_param); + smc_cmd.cmd_id = GLOBAL_CMD_ID_TUI_FOLD; + break; + default: + tloge("invalid event type : %d\n", event); + break; + } + + ret = tui_send_smc_cmd(event, mb_pack, smc_cmd); + if (ret != 0) + tloge("tui_send_smc_cmd error 0x%x", ret); + + mailbox_free(mb_pack); + if (buf_to_tee != NULL) + mailbox_free(buf_to_tee); + return ret; + } else { + tlogi("tui unused no need send tui event!\n"); + return 0; + } +} + +static void tui_poweroff_work_func(struct work_struct *work) +{ + struct teec_tui_parameter tui_param = {0}; + tui_send_event(TUI_POLL_CANCEL, &tui_param); +} + +void tui_poweroff_work_start(void) +{ + tlogi("tui_poweroff_work_start----------\n"); + if (g_dss_fd == NULL) + return; + +#ifdef CONFIG_TEE_TUI_DISPLAY_3_0 + if (atomic_read(&g_tui_state) != TUI_STATE_UNUSED && g_dss_fd->comp.power_on) { +#else + if (atomic_read(&g_tui_state) != TUI_STATE_UNUSED && g_dss_fd->panel_power_on) { +#endif + tlogi("come in tui_poweroff_work_start state=%d--\n", + atomic_read(&g_tui_state)); + queue_work(system_wq, &tui_poweroff_work.work); + } +} + +static void wait_tui_msg(void) +{ +#ifndef CONFIG_TEE_TUI_MTK + if (wait_event_interruptible(g_tui_msg_wq, g_tui_msg_flag)) + tloge("get tui state is interrupted\n"); +#endif + /* mtk is sync mess, don't need wait */ +} + +static int valid_msg(int msg_type) +{ + switch (msg_type) { + case TUI_POLL_RESUME_TUI: + if (atomic_read(&g_tui_state) == TUI_STATE_RUNNING) + return 0; + break; + case TUI_POLL_CANCEL: + if (atomic_read(&g_tui_state) == TUI_STATE_UNUSED) + return 0; + break; + default: + break; + } + + return 1; +} + +/* + * 1: init ok + * 0: still do init + * -1: init failed + */ +static int get_cfg_state(const char *name) +{ + const struct tui_msg_node *tui_msg = NULL; + + /* Return error if name is invalid */ + if (name == NULL) { + tloge("name is null"); + return -1; + } + + list_for_each_entry(tui_msg, &g_tui_msg_head, list) { + /* Names match */ + if (!strncmp(tui_msg->data, name, TUI_DRV_NAME_MAX)) { + if (TUI_POLL_CFG_OK == tui_msg->type) + return 1; + else if (TUI_POLL_CFG_FAIL == tui_msg->type) + return -1; + else + tloge("other state\n"); + } + } + + return 0; +} + +static void tui_msg_del(const char *name) +{ + struct tui_msg_node *tui_msg = NULL, *tmp = NULL; + + /* Return error if name is invalid */ + if (name == NULL) { + tloge("name is null"); + return; + } + + list_for_each_entry_safe(tui_msg, tmp, &g_tui_msg_head, list) { + /* Names match */ + if (!strncmp(tui_msg->data, name, TUI_DRV_NAME_MAX)) { + list_del(&tui_msg->list); + kfree(tui_msg); + } + } +} +#define DSS_CONFIG_INDEX 1 +#define TP_CONFIG_INDEX 2 + +static int32_t process_tui_poll_cfg(int32_t type) +{ + /* pre-process tui poll event if needed */ + switch(type) { + case TUI_POLL_CFG_OK: + if (DSS_CONFIG_INDEX == g_tui_ctl->s2n.value) { + phys_addr_t tui_addr_t; + tui_addr_t = get_frame_addr(); + if (tui_addr_t == 0) + tloge("get frame addr error\n"); + + g_tui_ctl->n2s.addr = (unsigned int)tui_addr_t; + g_tui_ctl->n2s.addr_h = tui_addr_t >> HIGH_VALUES; + g_tui_ctl->n2s.npages = g_tui_display_mem.npages; + g_tui_ctl->n2s.info_length = g_tui_display_mem.info_length; + g_tui_ctl->n2s.phy_size = g_tui_display_mem.len; + if (g_tui_ctl->n2s.addr == 0) + return -1; + } + break; + default: + break; + } + + return 0; +} + +static int32_t process_tui_msg_dss(void) +{ + int32_t type = TUI_POLL_CFG_OK; + +#if ONLY_INIT_TP != DSS_TP_COUPLE_MODE + /* Wait, until DSS init finishs */ + spin_lock(&g_tui_msg_lock); +#ifdef CONFIG_TEE_TUI_MTK + if (get_cfg_state(TUI_DSS_NAME) == 0) { +#else + while (get_cfg_state(TUI_DSS_NAME) == 0) { +#endif + tlogi("waiting for dss tui msg\n"); + g_tui_msg_flag = 0; + spin_unlock(&g_tui_msg_lock); + wait_tui_msg(); + tlogi("get dss init ok tui msg\n"); + spin_lock(&g_tui_msg_lock); + } + if (get_cfg_state(TUI_DSS_NAME) == -1) { + tloge("dss init failed\n"); + type = TUI_POLL_CFG_FAIL; + } + /* Delete DSS msg from g_tui_msg_head */ + tui_msg_del(TUI_DSS_NAME); + spin_unlock(&g_tui_msg_lock); +#endif + + return type; +} + +static int32_t process_tui_msg_tp(void) +{ + int32_t type = 0; + + spin_lock(&g_tui_msg_lock); +#if ONLY_INIT_DSS != DSS_TP_COUPLE_MODE + while (get_cfg_state(TUI_TP_NAME) == 0) { + tlogi("waiting for tp tui msg\n"); + g_tui_msg_flag = 0; + spin_unlock(&g_tui_msg_lock); + wait_tui_msg(); + tlogi("get tp init ok tui msg\n"); + spin_lock(&g_tui_msg_lock); + } + if (get_cfg_state(TUI_TP_NAME) == -1) { + tloge("tp failed to do init\n"); + tui_msg_del(TUI_TP_NAME); + spin_unlock(&g_tui_msg_lock); + return TUI_POLL_CFG_FAIL; + } + tui_msg_del(TUI_TP_NAME); +#if defined CONFIG_TEE_TUI_FP + if (init_tui_driver(1) == 0) { + while (get_cfg_state(TUI_GPIO_NAME) == 0 || + get_cfg_state(TUI_FP_NAME) == 0) { + tlogd("waiting for gpio/fp tui msg\n"); + g_tui_msg_flag = 0; + spin_unlock(&g_tui_msg_lock); + wait_tui_msg(); + tlogd("get gpio/fp init ok tui msg\n"); + spin_lock(&g_tui_msg_lock); + } + if (get_cfg_state(TUI_GPIO_NAME) == -1 || + get_cfg_state(TUI_FP_NAME) == -1) { + tloge("one of gpio/fp failed to do init\n"); + type = TUI_POLL_CFG_FAIL; + } + } + tui_msg_del(TUI_GPIO_NAME); + tui_msg_del(TUI_FP_NAME); +#endif + tlogd("tp/gpio/fp is config result:type = 0x%x\n", type); +#endif + spin_unlock(&g_tui_msg_lock); + return type; +} + +static void process_tui_msg(void) +{ + int32_t val = 0; + int32_t type = TUI_POLL_CFG_OK; + +fetch_msg: + if (g_tui_ctl->s2n.value == DSS_CONFIG_INDEX) + type = process_tui_msg_dss(); + else if (g_tui_ctl->s2n.value == TP_CONFIG_INDEX) + type = process_tui_msg_tp(); + else + tloge("wait others dev\n"); + + val = process_tui_poll_cfg(type); + + g_tui_ctl->n2s.event_type = type; + g_tui_ctl->n2s.value = val; + + if (!valid_msg(g_tui_ctl->n2s.event_type)) { + tlogi("refetch tui msg\n"); + goto fetch_msg; + } +} + +static int init_tui_agent(void) +{ + int ret; + + ret = tc_ns_register_agent(NULL, TEE_TUI_AGENT_ID, SZ_4K, (void **)(&g_tui_ctl), false); + if (ret != 0) { + tloge("register tui agent failed, ret = 0x%x\n", ret); + g_tui_ctl = NULL; + return -EFAULT; + } + + return 0; +} + +static void exit_tui_agent(void) +{ + if (tc_ns_unregister_agent(TEE_TUI_AGENT_ID) != 0) + tloge("unregister tui agent failed\n"); + + g_tui_ctl = NULL; +} + +static void set_tui_state(int state) +{ + if (state < TUI_STATE_UNUSED || state > TUI_STATE_ERROR) { + tloge("state=%d is invalid\n", state); + return; + } + if (atomic_read(&g_tui_state) != state) { + atomic_set(&g_tui_state, state); + tloge("set ree tui state is %d, 0: unused, 1:config, 2:running\n", state); + g_tui_state_flag = 1; + wake_up(&g_tui_state_wq); + } +} + +int is_tui_in_use(int pid_value) +{ + if (pid_value == atomic_read(&g_tui_pid)) + return 1; + return 0; +} + +void free_tui_caller_info(void) +{ + atomic_set(&g_tui_attached_device, TUI_PID_CLEAR); + atomic_set(&g_tui_pid, TUI_PID_CLEAR); +} + +static int agent_process_work_tui(void) +{ + struct smc_event_data *event_data = NULL; + + event_data = find_event_control(TEE_TUI_AGENT_ID); + if (event_data == NULL || atomic_read(&event_data->agent_ready) == AGENT_CRASHED) { + /* if return, the pending task in S can't be resumed!! */ + tloge("tui agent is not exist\n"); + put_agent_event(event_data); + return TEEC_ERROR_GENERIC; + } + + isb(); + wmb(); + event_data->ret_flag = 1; + /* Wake up tui agent that will process the command */ + wake_up(&event_data->wait_event_wq); + + tlogi("agent 0x%x request, goto sleep, pe->run=%d\n", + TEE_TUI_AGENT_ID, atomic_read(&event_data->ca_run)); + wait_event(event_data->ca_pending_wq, atomic_read(&event_data->ca_run)); + atomic_set(&event_data->ca_run, 0); + put_agent_event(event_data); + + return TEEC_SUCCESS; +} + +void do_ns_tui_release(void) +{ + if (atomic_read(&g_tui_state) != TUI_STATE_UNUSED) { + g_tui_ctl->s2n.command = TUI_CMD_EXIT; + g_tui_ctl->s2n.ret = -1; + tloge("exec tui do_ns_tui_release\n"); + if (agent_process_work_tui() != 0) + tloge("wake up tui agent error\n"); + } +} + +static int32_t do_tui_ttf_work(void) +{ + int ret = 0; + switch (g_tui_ctl->s2n.command) { + case TUI_CMD_LOAD_TTF: + ret = load_tui_font_file(); + if (ret == 0) { + tlogi("=======succeed to load ttf\n"); + g_tui_ctl->n2s.event_type = TUI_POLL_CFG_OK; + } else { + tloge("Failed to load normal ttf ret is 0x%x\n", ret); + g_tui_ctl->n2s.event_type = TUI_POLL_CFG_FAIL; + } + break; + case TUI_CMD_EXIT: + if (atomic_read(&g_tui_state) != TUI_STATE_UNUSED && + atomic_dec_and_test(&g_tui_usage)) { + tlogi("tui disable\n"); + (void)init_tui_driver(UNSECURE_ENV); + free_frame_addr(); + free_tui_font_mem(); + free_tui_caller_info(); + set_tui_state(TUI_STATE_UNUSED); + } + break; + case TUI_CMD_FREE_TTF_MEM: + free_tui_font_mem(); + ret = 0; + break; + default: + ret = -EINVAL; + tloge("get error ttf tui command(0x%x)\n", g_tui_ctl->s2n.command); + break; + } + return ret; +} + +static void process_tui_enable(void) +{ + if (atomic_read(&g_tui_state) == TUI_STATE_CONFIG) + return; + + tlogi("tui enable\n"); + set_tui_state(TUI_STATE_CONFIG); + /* do dss and tp init */ + if (init_tui_driver(SECURE_ENV) != 0) { + g_tui_ctl->s2n.ret = -1; + set_tui_state(TUI_STATE_ERROR); + (void)init_tui_driver(UNSECURE_ENV); + free_tui_caller_info(); + set_tui_state(TUI_STATE_UNUSED); + return; + } + atomic_inc(&g_tui_usage); +} + +static void process_tui_disable(void) +{ + if (atomic_read(&g_tui_state) == TUI_STATE_UNUSED || + !atomic_dec_and_test(&g_tui_usage)) + return; + + tlogi("tui disable\n"); + (void)init_tui_driver(UNSECURE_ENV); + free_frame_addr(); + free_tui_caller_info(); + set_tui_state(TUI_STATE_UNUSED); +} + +static void process_tui_pause(void) +{ + if (atomic_read(&g_tui_state) == TUI_STATE_UNUSED) + return; + + tlogi("tui pause\n"); + (void)init_tui_driver(UNSECURE_ENV); + set_tui_state(TUI_STATE_CONFIG); +} + +static int do_tui_config_work(void) +{ + int ret = 0; + + switch (g_tui_ctl->s2n.command) { + case TUI_CMD_ENABLE: + process_tui_enable(); + break; + case TUI_CMD_DISABLE: + process_tui_disable(); + break; + case TUI_CMD_PAUSE: + process_tui_pause(); + break; + case TUI_CMD_POLL: + process_tui_msg(); + break; + case TUI_CMD_DO_SYNC: + tlogd("enable tp irq cmd\n"); + break; + case TUI_CMD_SET_STATE: + tlogi("tui set state %d\n", g_tui_ctl->s2n.value); + set_tui_state(g_tui_ctl->s2n.value); + break; + case TUI_CMD_START_DELAY_WORK: + tlogd("start delay work\n"); + break; + case TUI_CMD_CANCEL_DELAY_WORK: + tlogd("cancel delay work\n"); + break; + default: + ret = -EINVAL; + tloge("get error config tui command(0x%x)\n", g_tui_ctl->s2n.command); + break; + } + return ret; +} + +static int do_tui_work(void) +{ + int ret = 0; + + /* clear s2n cmd ret */ + g_tui_ctl->s2n.ret = 0; + switch (g_tui_ctl->s2n.command) { + case TUI_CMD_ENABLE: + case TUI_CMD_DISABLE: + case TUI_CMD_PAUSE: + case TUI_CMD_POLL: + case TUI_CMD_DO_SYNC: + case TUI_CMD_SET_STATE: + case TUI_CMD_START_DELAY_WORK: + case TUI_CMD_CANCEL_DELAY_WORK: + ret = do_tui_config_work(); + break; + case TUI_CMD_LOAD_TTF: + case TUI_CMD_EXIT: + case TUI_CMD_FREE_TTF_MEM: + ret = do_tui_ttf_work(); + break; + default: + ret = -EINVAL; + tloge("get error tui command\n"); + break; + } + return ret; +} + +void set_tui_caller_info(unsigned int devid, int pid) +{ + atomic_set(&g_tui_attached_device, (int)devid); + atomic_set(&g_tui_pid, pid); +} + +unsigned int tui_attach_device(void) +{ + return (unsigned int)atomic_read(&g_tui_attached_device); +} + +static int tui_kthread_work_fn(void *data) +{ + int ret; + ret = init_tui_agent(); + if (ret != 0) { + tloge("init tui agent error, ret = %d\n", ret); + return ret; + } + + while (1) { + tc_ns_wait_event(TEE_TUI_AGENT_ID); + + if (kthread_should_stop()) + break; + + do_tui_work(); + + if (tc_ns_send_event_response(TEE_TUI_AGENT_ID) != 0) + tloge("send event response error\n"); + } + + exit_tui_agent(); + + return 0; +} + +#define READ_BUF 128 +static ssize_t tui_dbg_state_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[READ_BUF] = {0}; + unsigned int r; + int ret; + struct tui_drv_node *pos = NULL; + + if (filp == NULL || ubuf == NULL || ppos == NULL) + return -EINVAL; + + ret = snprintf_s(buf, READ_BUF, READ_BUF - 1, "tui state:%s\n", + state_name[atomic_read(&g_tui_state)]); + if (ret < 0) { + tloge("tui dbg state read 1 snprintf is failed, ret = 0x%x\n", ret); + return -EINVAL; + } + r = (unsigned int)ret; + + ret = snprintf_s(buf + r, READ_BUF - r, READ_BUF - r - 1, "%s", "drv config state:"); + if (ret < 0) { + tloge("tui dbg state read 2 snprintf is failed, ret = 0x%x\n", ret); + return -EINVAL; + } + r += (unsigned int)ret; + + mutex_lock(&g_tui_drv_lock); + list_for_each_entry(pos, &g_tui_drv_head, list) { + ret = snprintf_s(buf + r, READ_BUF - r, READ_BUF - r - 1, "%s-%s,", pos->name, 1 == pos->state ? "ok" : "no ok"); + if (ret < 0) { + tloge("tui dbg state read 3 snprintf is failed, ret = 0x%x\n", ret); + mutex_unlock(&g_tui_drv_lock); + return -EINVAL; + } + r += (unsigned int)ret; + } + mutex_unlock(&g_tui_drv_lock); + if (r < READ_BUF) + buf[r - 1] = '\n'; + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static const struct file_operations tui_dbg_state_fops = { + .owner = THIS_MODULE, + .read = tui_dbg_state_read, +}; + +#define MAX_SHOW_BUFF_LEN 32 +static ssize_t tui_status_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + int r; + size_t buf_len = 0; + if (kobj == NULL || attr == NULL || buf == NULL) + return -EINVAL; + + g_tui_state_flag = 0; + r = wait_event_interruptible(g_tui_state_wq, g_tui_state_flag); + if (r != 0) { + tloge("get tui state is interrupted\n"); + return r; + } + buf_len = MAX_SHOW_BUFF_LEN; + r = snprintf_s(buf, buf_len, buf_len - 1, "%s", state_name[atomic_read(&g_tui_state)]); + if (r < 0) { + tloge("tui status show snprintf is failed, ret = 0x%x\n", r); + return -1; + } + + return r; +} + +#define MSG_BUF 512 +static ssize_t tui_dbg_msg_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[MSG_BUF] = {0}; + int ret; + int i; + struct tui_drv_node *pos = NULL; + + if (filp == NULL || ubuf == NULL || ppos == NULL) + return -EINVAL; + + ret = snprintf_s(buf, MSG_BUF, MSG_BUF - 1, "%s", "event format: event_type:val\n" + "event type:\n"); + if (ret < 0) + return -EINVAL; + + unsigned int r = (unsigned int)ret; + + /* event type list */ + for (i = 0; i < TUI_POLL_MAX - 1; i++) { + ret = snprintf_s(buf + r, MSG_BUF - r, MSG_BUF - r - 1, "%s, ", + poll_event_type_name[i]); + if (ret < 0) { + tloge("tui db msg read 2 snprint is error, ret = 0x%x\n", ret); + return -EINVAL; + } + r += (unsigned int)ret; + } + ret = snprintf_s(buf + r, MSG_BUF - r, MSG_BUF - r - 1, "%s\n", poll_event_type_name[i]); + if (ret < 0) { + tloge("tui db msg read 3 snprint is error, ret = 0x%x\n", ret); + return -EINVAL; + } + r += (unsigned int)ret; + + /* cfg drv type list */ + ret = snprintf_s(buf + r, MSG_BUF - r, MSG_BUF - r - 1, "val type for %s or %s:\n", + poll_event_type_name[TUI_POLL_CFG_OK], poll_event_type_name[TUI_POLL_CFG_FAIL]); + if (ret < 0) { + tloge("tui db msg read 4 snprint is error, ret = 0x%x\n", ret); + return -EINVAL; + } + r += (unsigned int)ret; + + mutex_lock(&g_tui_drv_lock); + list_for_each_entry(pos, &g_tui_drv_head, list) { + ret = snprintf_s(buf + r, MSG_BUF - r, MSG_BUF - r - 1, "%s,", pos->name); + if (ret < 0) { + tloge("tui db msg read 5 snprint is error, ret = 0x%x\n", ret); + mutex_unlock(&g_tui_drv_lock); + return -EINVAL; + } + r += (unsigned int)ret; + } + mutex_unlock(&g_tui_drv_lock); + if (r < MSG_BUF) + buf[r - 1] = '\n'; + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static ssize_t tui_dbg_process_tp(const char *tokens, char **begins) +{ + long value = 0; + int base = TP_BASE_VALUE; + + /* simple_strtol is obsolete, use kstrtol instead */ + int32_t ret = kstrtol(tokens, base, &value); + if (ret != 0) + return -EFAULT; + g_tui_ctl->n2s.status = (int)value; + + tokens = strsep(begins, ":"); + if (tokens == NULL) + return -EFAULT; + + ret = kstrtol(tokens, base, &value); + if (ret != 0) + return -EFAULT; + g_tui_ctl->n2s.x = (int)value; + + tokens = strsep(begins, ":"); + if (tokens == NULL) + return -EFAULT; + + int32_t ret = kstrtol(tokens, base, &value); + if (ret != 0) + return -EINVAL; + g_tui_ctl->n2s.y = (int)value; + return 0; +} + +static ssize_t tui_dbg_msg_write(struct file *filp, + const char __user *ubuf, size_t cnt, + loff_t *ppos) +{ + char buf[64]; + int i; + int event_type = -1; + char *tokens = NULL, *begins = NULL; + struct teec_tui_parameter tui_param = {0}; + + if (ubuf == NULL || filp == NULL || ppos == NULL) + return -EINVAL; + + if (cnt >= sizeof(buf)/sizeof(char)) + return -EINVAL; + + if (copy_from_user(buf, ubuf, cnt) != 0) + return -EFAULT; + + buf[cnt] = 0; + begins = buf; + + /* event type */ + tokens = strsep(&begins, ":"); + if (tokens == NULL) + return -EFAULT; + + tlogd("1: tokens:%s\n", tokens); + for (i = 0; i < TUI_POLL_MAX; i++) { + if (strncmp(tokens, poll_event_type_name[i], strlen(poll_event_type_name[i])) == 0) { + event_type = i; + break; + } + } + + /* only for tp and cancel */ + if (event_type != TUI_POLL_TP && event_type != TUI_POLL_CANCEL) + return -EFAULT; + /* drv type */ + tokens = strsep(&begins, ":"); + if (tokens == NULL) + return -EFAULT; + + tlogd("2: tokens:%s\n", tokens); + if (event_type == TUI_POLL_TP) { + if (tui_dbg_process_tp((const char *)tokens, &begins) != 0) + return -EFAULT; + } + tlogd("status=%d x=%d y=%d\n", g_tui_ctl->n2s.status, g_tui_ctl->n2s.x, g_tui_ctl->n2s.y); + + if (tui_send_event(event_type, &tui_param)) + return -EFAULT; + + *ppos += cnt; + + return cnt; +} + +static const struct file_operations tui_dbg_msg_fops = { + .owner = THIS_MODULE, + .read = tui_dbg_msg_read, + .write = tui_dbg_msg_write, +}; + +static struct dentry *g_dbg_dentry = NULL; + +static int tui_powerkey_notifier_call(struct notifier_block *powerkey_nb, unsigned long event, void *data) +{ +#ifndef CONFIG_TEE_TUI_MTK + if (event == PRESS_KEY_DOWN) { + tui_poweroff_work_start(); + } else if (event == PRESS_KEY_UP) { + } else if (event == PRESS_KEY_1S) { + } else if (event == PRESS_KEY_6S) { + } else if (event == PRESS_KEY_8S) { + } else if (event == PRESS_KEY_10S) { + } else { + tloge("[%s]invalid event %ld !\n", __func__, event); + } +#endif +#ifdef CONFIG_HW_COMB_KEY + if (event == POWER_KEY_PRESS_DOWN) { + tui_poweroff_work_start(); + } else { + tloge("[%s]invalid event %ld !\n", __func__, event); + } +#endif + return 0; +} + +static struct notifier_block tui_powerkey_nb; +int register_tui_powerkey_listener(void) +{ + tui_powerkey_nb.notifier_call = tui_powerkey_notifier_call; +#ifdef CONFIG_HW_COMB_KEY + return power_key_register_notifier(&tui_powerkey_nb); +#else + return powerkey_register_notifier(&tui_powerkey_nb); +#endif +} + +int unregister_tui_powerkey_listener(void) +{ + tui_powerkey_nb.notifier_call = tui_powerkey_notifier_call; +#ifdef CONFIG_HW_COMB_KEY + return power_key_unregister_notifier(&tui_powerkey_nb); +#else + return powerkey_unregister_notifier(&tui_powerkey_nb); +#endif +} + +int __init init_tui(const struct device *class_dev) +{ + int retval; + struct sched_param param; + param.sched_priority = MAX_RT_PRIO - 1; + + if (class_dev == NULL) + return -1; + + g_tui_task = kthread_create(tui_kthread_work_fn, NULL, "tuid"); + if (IS_ERR_OR_NULL(g_tui_task)) { + tloge("kthread create is error\n"); + return PTR_ERR(g_tui_task); + } + + sched_setscheduler_nocheck(g_tui_task, SCHED_FIFO, ¶m); + get_task_struct(g_tui_task); + + tz_kthread_bind_mask(g_tui_task); + wake_up_process(g_tui_task); + + INIT_LIST_HEAD(&g_tui_msg_head); + spin_lock_init(&g_tui_msg_lock); + + init_waitqueue_head(&g_tui_state_wq); + init_waitqueue_head(&g_tui_msg_wq); + g_dbg_dentry = debugfs_create_dir("tui", NULL); +#ifdef DEBUG_TUI + debugfs_create_file("message", 0440, g_dbg_dentry, NULL, &tui_dbg_msg_fops); +#endif + debugfs_create_file("d_state", 0440, g_dbg_dentry, NULL, &tui_dbg_state_fops); + g_tui_kobj = kobject_create_and_add("tui", kernel_kobj); + if (g_tui_kobj == NULL) { + tloge("tui kobj create error\n"); + retval = -ENOMEM; + goto error2; + } + retval = sysfs_create_group(g_tui_kobj, &g_tui_attr_group); + + if (retval) { + tloge("sysfs_create_group error, retval = 0x%x\n", retval); + goto error1; + } + + retval = register_tui_powerkey_listener(); + if (retval != 0) { + tloge("tui register failed, retval = 0x%x\n", retval); + goto error1; + } + return 0; +error1: + kobject_put(g_tui_kobj); +error2: + kthread_stop(g_tui_task); + return retval; +} + +void free_tui(void) +{ + if (unregister_tui_powerkey_listener() < 0) + tloge("tui power key unregister failed\n"); + kthread_stop(g_tui_task); + put_task_struct(g_tui_task); + debugfs_remove(g_dbg_dentry); + sysfs_remove_group(g_tui_kobj, &g_tui_attr_group); + kobject_put(g_tui_kobj); +} + +int tc_ns_tui_event(struct tc_ns_dev_file *dev_file, const void *argp) +{ + struct teec_tui_parameter tui_param = {0}; + int ret; + + if (!dev_file || !argp) { + tloge("argp or dev is NULL\n"); + return -EINVAL; + } + + if (copy_from_user(&tui_param, argp, sizeof(tui_param))) { + tloge("copy from user failed\n"); + return -ENOMEM; + } + + if (tui_param.event_type == TUI_POLL_CANCEL || + tui_param.event_type == TUI_POLL_NOTCH || + tui_param.event_type == TUI_POLL_FOLD) { + ret = tui_send_event(tui_param.event_type, &tui_param); + } else { + tloge("no permission to send event\n"); + ret = -EACCES; + } + + return ret; +} + +bool is_tui_agent(unsigned int agent_id) +{ + return agent_id == TEE_TUI_AGENT_ID; +} \ No newline at end of file diff --git a/tzdriver/tui/tui.h b/tzdriver/tui/tui.h new file mode 100644 index 0000000000000000000000000000000000000000..d2bfe5bf7d1b805f5103dea1797713bdcb2a1270 --- /dev/null +++ b/tzdriver/tui/tui.h @@ -0,0 +1,275 @@ +/* + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef TUI_H +#define TUI_H + +#include "teek_ns_client.h" +#include "teek_client_type.h" + +#define TEE_TUI_AGENT_ID 0x54554944 /* TUID */ + +/* tui states */ +#define TUI_STATE_UNUSED 0 +#define TUI_STATE_CONFIG 1 +#define TUI_STATE_RUNNING 2 +#define TUI_STATE_ERROR 3 + +#define TUI_PID_CLEAR 0 +#define TUI_PID_CONFIG 1 +/* command from secure os */ +#define TUI_CMD_ENABLE 1 +#define TUI_CMD_DISABLE 2 +#define TUI_CMD_POLL 3 +#define TUI_CMD_SET_STATE 4 +#define TUI_CMD_PAUSE 5 +#define TUI_CMD_DO_SYNC 6 +#define TUI_CMD_START_DELAY_WORK 7 +#define TUI_CMD_CANCEL_DELAY_WORK 8 +#define TUI_CMD_LOAD_TTF 9 +#define TUI_CMD_FREE_TTF_MEM 11 +#define TUI_CMD_EXIT 12 + +#define TUI_DRV_NAME_MAX 32 + +/* poll event type from normal to secure */ +enum tui_poll_type { + TUI_POLL_CFG_OK, + TUI_POLL_CFG_FAIL, + TUI_POLL_TP, + TUI_POLL_TICK, + TUI_POLL_DELAYED_WORK, + TUI_POLL_TIMEOUT, + TUI_POLL_RESUME_TUI, +/* For some reasons, we need a method to terminate TUI from no secure + * OS, for example the TUI CA maybe killed. + */ + TUI_POLL_CANCEL, + TUI_POLL_HANDLE_TUI, /* for tui to handle event */ + TUI_POLL_NAVI_H_TO_S, /* for navigator hide and show */ + TUI_POLL_NAVI_S_TO_H, + TUI_POLL_SHS_0_TO_1, /* for single hand mode switch */ + TUI_POLL_SHS_0_TO_2, + TUI_POLL_SHS_1_TO_0, + TUI_POLL_SHS_2_TO_0, + TUI_POLL_ROTATION_0, /* for retation switch */ + TUI_POLL_ROTATION_90, + TUI_POLL_ROTATION_180, + TUI_POLL_ROTATION_270, + TUI_POLL_KEYBOARDTYPE_0, + TUI_POLL_KEYBOARDTYPE_3, + TUI_POLL_SEMITRANS, + TUI_POLL_CURSOR, + TUI_POLL_GETFP, + TUI_POLL_NOTCH, /* for tui to get notch height */ + TUI_POLL_DIALOGTIMEOUT, + TUI_POLL_FOLD, /* for tui to get fold_screen */ + TUI_POLL_MAX /* Do Not add type behind this one */ +}; + +/* tui max should be bigger than TUI_POLL_MAX in tui.h */ +static const char *const poll_event_type_name[] = { + "config-ok", + "config-fail", + "tp", + "tui-tick", + "tui-delaywork", + "tui-pause", + "tui-resume", + "tui-terminate", + "tui-handle", + "tui-hs", + "tui-sh", + "tui-01", + "tui-02", + "tui-10", + "tui-20", + "tui-0", + "tui-90", + "tui-180", + "tui-270", + "tui_key_board_type0", + "tui_key_board_type3", + "tui-SEMI", + "tui-cursor", + "tui-gettp", + "tui-notch", + "tui-dialogtimeout", + "tui-fold", + "tui-max" +}; + +static const char *const state_name[] = { + "unused", + "config", + "running", + "error" +}; + +struct tui_ctl_shm { + struct { + int command; + int value; + int ret; + } s2n; + struct { + int event_type; + int value; + unsigned int addr; + unsigned int addr_h; + int tp_info; + int tp_info_h_addr; + int status; + int x; + int y; + uint32_t npages; + uint64_t info_length; + uint32_t phy_size; + } n2s; +}; + +struct tui_msg_node { + int type; + int val; + void *data; + struct list_head list; +}; + +typedef int (*tui_drv_init) (void *pdata, int secure); + +struct tui_drv_node { + tui_drv_init init_func; + void *pdata; + char name[TUI_DRV_NAME_MAX]; + int state; + int priority; + struct list_head list; +}; + +/* tui need memory is calculated dynamically according to the screen resolution */ +struct tui_mem { + unsigned int tui_addr_size; + unsigned int tui_addr; + unsigned int tui_addr_h; + struct device *tui_dev; + char *tui_virt; +}; + +struct ttf_mem { + unsigned int ttf_addr_h; + unsigned int ttf_addr_l; + char *ttf_buff_virt; + unsigned int ttf_file_size; +}; + +typedef struct tui_memory { + phys_addr_t tui_ion_phys_addr; + void *tui_ion_virt_addr; + size_t len; + uint32_t size; + uint32_t configid; + struct sg_table *tui_sg_table; + phys_addr_t fb_phys_addr; + uint32_t npages; + uint64_t info_length; +} tui_ion_mem; + +#ifdef CONFIG_TEE_TUI +extern int ts_tui_report_input(void *finger_data); +extern int tui_fp_notify(void); +int __init init_tui(const struct device *dev); +void free_tui(void); +int tui_send_event(int event, struct teec_tui_parameter *tui_param); +int register_tui_driver(tui_drv_init fun, const char *name, + void *pdata); +void unregister_tui_driver(const char *name); +/* + * TUI has different state that can recieve given types of message, + * there are 3 APIs to send message. + * send_tui_msg_config:send message to TUI in config state only. + */ +int send_tui_msg_config(int type, int val, void *data); +void tui_poweroff_work_start(void); + +void set_tui_caller_info(unsigned int devid, int pid); +void free_tui_caller_info(void); + +unsigned int tui_attach_device(void); +void do_ns_tui_release(void); +int is_tui_in_use(int pid_value); +int tc_ns_tui_event(struct tc_ns_dev_file *dev_file, const void *argp); +bool is_tui_agent(unsigned int agent_id); +#else +static inline bool is_tui_agent(unsigned int agent_id) +{ + (void)agent_id; + return false; +} + +static inline int init_tui(const struct device *dev) +{ + (void)dev; + return 0; +} + +static inline void free_tui(void) +{ +} + +static inline void unregister_tui_driver(const char *name) +{ + (void)name; +} + +static inline int send_tui_msg_config(int type, int val, const void *data) +{ + (void)type; + (void)val; + (void)data; + return 0; +} + +static inline void set_tui_caller_info(unsigned int devid, int pid) +{ + (void)devid; + (void)pid; +} + +static inline void free_tui_caller_info(void) +{ +} + +static inline unsigned int tui_attach_device(void) +{ + return 0; +} + +static inline int is_tui_in_use(int pid_value) +{ + (void)pid_value; + return 0; +} + +static inline void do_ns_tui_release(void) +{ +} + +static inline int tc_ns_tui_event(struct tc_ns_dev_file *dev_file, const void *argp) +{ + (void)dev_file; + (void)argp; + return 0; +} +#endif + +#endif \ No newline at end of file diff --git a/tzdriver/whitelist/Makefile b/tzdriver/whitelist/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..730246c1b86ecfa20837651516d7bb4890981bb5 --- /dev/null +++ b/tzdriver/whitelist/Makefile @@ -0,0 +1,14 @@ +KERNEL_DIR := $(srctree) + +ifneq ($(TARGET_BUILD_VARIANT), user) + ccflags-y += -DDEF_ENG +endif + +EXTRA_CFLAGS += -I$(KERNEL_DIR)/../../../../third_party/bounds_checking_function/include +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/include +EXTRA_CFLAGS += -include internal_functions.h + +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver +EXTRA_CFLAGS += -I$(KERNEL_DIR)/drivers/tzdriver/core + +obj-y += agent_allowed_ca.o \ No newline at end of file diff --git a/tzdriver/whitelist/agent_allowed_ca.c b/tzdriver/whitelist/agent_allowed_ca.c new file mode 100644 index 0000000000000000000000000000000000000000..fb504672fb561d989a65c3f4506a52a324be6ccc --- /dev/null +++ b/tzdriver/whitelist/agent_allowed_ca.c @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * Description: allowed_ext_agent_ca list and functions. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "agent.h" +#include +#include +#include + +static struct ca_info g_allowed_ext_agent_ca[] = { +#ifdef CONFIG_TZDRIVER + { + "/vendor/bin/hiaiserver", + 3094, + TEE_SECE_AGENT_ID, + }, + { + "/vendor/bin/hw/hdf_devhost", + 1114, + TEE_FACE_AGENT1_ID, + }, +#endif +#ifdef DEF_ENG + { + "/vendor/bin/tee_test_agent", + 0, + TEE_SECE_AGENT_ID, + }, +#endif +}; + +int is_allowed_agent_ca(const struct ca_info *ca, + bool check_agent_id) +{ + uint32_t i; + struct ca_info *tmp_ca = g_allowed_ext_agent_ca; + const uint32_t nr = ARRAY_SIZE(g_allowed_ext_agent_ca); + + if (!ca) + return -EFAULT; + + if (!check_agent_id) { + for (i = 0; i < nr; i++) { + if (!strncmp(ca->path, tmp_ca->path, + strlen(tmp_ca->path) + 1) && + ca->uid == tmp_ca->uid) + return 0; + tmp_ca++; + } + } else { + for (i = 0; i < nr; i++) { + if (!strncmp(ca->path, tmp_ca->path, + strlen(tmp_ca->path) + 1) && + ca->uid == tmp_ca->uid && + ca->agent_id == tmp_ca->agent_id) + return 0; + tmp_ca++; + } + } + tlogd("ca-uid is %u, ca_path is %s, agent id is %x\n", ca->uid, + ca->path, ca->agent_id); + + return -EACCES; +} \ No newline at end of file