diff --git a/trustzone-awared-vm/Host/itrustee_tzdriver.patch b/trustzone-awared-vm/Host/itrustee_tzdriver.patch new file mode 100644 index 0000000000000000000000000000000000000000..701864df1b50b3367fd8891c97ba80a5f2270480 --- /dev/null +++ b/trustzone-awared-vm/Host/itrustee_tzdriver.patch @@ -0,0 +1,1230 @@ +diff -Naur '--exclude=.git' itrustee_tzdriver/auth/auth_base_impl.c itrustee_tzdriver_new/auth/auth_base_impl.c +--- itrustee_tzdriver/auth/auth_base_impl.c 2023-11-24 16:18:54.355641440 +0800 ++++ itrustee_tzdriver_new/auth/auth_base_impl.c 2023-11-24 16:29:26.000000000 +0800 +@@ -332,11 +332,26 @@ + + return CHECK_ACCESS_SUCC; + } ++ ++int check_proxy_auth(void) ++{ ++ int ret = check_proc_uid_path(PROXY_PATH_UID_AUTH_CTX); ++ if (ret != 0) { ++ return ret; ++ } ++ ++ return CHECK_ACCESS_SUCC; ++} + #else + int check_teecd_auth(void) + { + return 0; + } ++ ++int check_proxy_auth(void) ++{ ++ return 0; ++} + #endif + + #ifdef CONFIG_TEE_TELEPORT_AUTH +diff -Naur '--exclude=.git' itrustee_tzdriver/auth/auth_base_impl.h itrustee_tzdriver_new/auth/auth_base_impl.h +--- itrustee_tzdriver/auth/auth_base_impl.h 2023-11-24 16:18:54.355641440 +0800 ++++ itrustee_tzdriver_new/auth/auth_base_impl.h 2023-11-24 16:30:00.000000000 +0800 +@@ -84,6 +84,7 @@ + void mutex_crypto_hash_unlock(void); + int check_hidl_auth(void); + int check_teecd_auth(void); ++int check_proxy_auth(void); + #else + + static inline void free_shash_handle(void) +@@ -100,6 +101,11 @@ + { + return 0; + } ++ ++int check_proxy_auth(void) ++{ ++ return 0; ++} + + #endif /* CLIENT_AUTH || TEECD_AUTH */ + +diff -Naur '--exclude=.git' itrustee_tzdriver/core/agent.c itrustee_tzdriver_new/core/agent.c +--- itrustee_tzdriver/core/agent.c 2023-11-24 16:18:54.459641440 +0800 ++++ itrustee_tzdriver_new/core/agent.c 2023-11-25 20:40:44.109797330 +0800 +@@ -296,7 +296,8 @@ + return ret; + } + +-int tc_ns_late_init(unsigned long arg) ++int tc_ns_late_init(const struct tc_ns_dev_file *dev_file, ++ unsigned long arg) + { + int ret = 0; + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; +@@ -318,6 +319,8 @@ + smc_cmd.operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; + ++ if (dev_file->isVM) ++ smc_cmd.nsid = dev_file->nsid; + if (tc_ns_smc(&smc_cmd)) { + ret = -EPERM; + tloge("late int failed\n"); +@@ -595,7 +598,8 @@ + return ret; + } + +-int tc_ns_sync_sys_time(const struct tc_ns_client_time *tc_ns_time) ++int tc_ns_sync_sys_time(const struct tc_ns_dev_file *dev_file, ++ const struct tc_ns_client_time *tc_ns_time) + { + struct tc_ns_smc_cmd smc_cmd = { {0}, 0 }; + int ret = 0; +@@ -621,6 +625,8 @@ + smc_cmd.operation_phys = mailbox_virt_to_phys((uintptr_t)&mb_pack->operation); + smc_cmd.operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; ++ if (dev_file && dev_file->isVM) ++ smc_cmd.nsid = dev_file->nsid; + if (tc_ns_smc(&smc_cmd)) { + tloge("tee adjust time failed, return error\n"); + ret = -EPERM; +@@ -630,7 +636,8 @@ + return ret; + } + +-int sync_system_time_from_user(const struct tc_ns_client_time *user_time) ++int sync_system_time_from_user(const struct tc_ns_dev_file *dev_file, ++ const struct tc_ns_client_time *user_time) + { + int ret = 0; + struct tc_ns_client_time time = { 0 }; +@@ -645,7 +652,7 @@ + return -EFAULT; + } + +- ret = tc_ns_sync_sys_time(&time); ++ ret = tc_ns_sync_sys_time(dev_file, &time); + if (ret != 0) + tloge("sync system time from user failed, ret = 0x%x\n", ret); + +@@ -663,7 +670,7 @@ + time.seconds = (uint32_t)kernel_time.ts.tv_sec; + time.millis = (uint32_t)(kernel_time.ts.tv_nsec / MS_TO_NS); + +- ret = tc_ns_sync_sys_time(&time); ++ ret = tc_ns_sync_sys_time(NULL, &time); + if (ret != 0) + tloge("sync system time from kernel failed, ret = 0x%x\n", ret); + +@@ -947,6 +954,8 @@ + nsid = task_active_pid_ns(current)->ns.inum; + if (dev_file != NULL && dev_file->nsid == 0) + dev_file->nsid = nsid; ++ if (dev_file->isVM) ++ nsid = dev_file->nsid; + #endif + + if (is_agent_already_exist(agent_id, nsid, &event_data, dev_file, &find_flag)) +@@ -1384,3 +1393,4 @@ + put_agent_event(event_data); + } + } ++ +diff -Naur '--exclude=.git' itrustee_tzdriver/core/agent.h itrustee_tzdriver_new/core/agent.h +--- itrustee_tzdriver/core/agent.h 2023-11-24 16:18:54.459641440 +0800 ++++ itrustee_tzdriver_new/core/agent.h 2023-11-24 16:33:18.000000000 +0800 +@@ -118,7 +118,8 @@ + unsigned int agent_id, unsigned int nsid); + int is_agent_alive(unsigned int agent_id, unsigned int nsid); + int tc_ns_set_native_hash(unsigned long arg, unsigned int cmd_id); +-int tc_ns_late_init(unsigned long arg); ++int tc_ns_late_init(const struct tc_ns_dev_file *dev_file, ++ unsigned long arg); + int tc_ns_register_agent(struct tc_ns_dev_file *dev_file, unsigned int agent_id, + unsigned int buffer_size, void **buffer, bool user_agent); + int tc_ns_unregister_agent(unsigned int agent_id, unsigned int nsid); +@@ -126,7 +127,8 @@ + int tc_ns_wait_event(unsigned int agent_id, unsigned int nsid); + int tc_ns_send_event_response(unsigned int agent_id, unsigned int nsid); + void send_crashed_event_response_single(const struct tc_ns_dev_file *dev_file); +-int sync_system_time_from_user(const struct tc_ns_client_time *user_time); ++int sync_system_time_from_user(const struct tc_ns_dev_file *dev_file, ++ const struct tc_ns_client_time *user_time); + void sync_system_time_from_kernel(void); + int tee_agent_clear_work(struct tc_ns_client_context *context, + unsigned int dev_file_id); +diff -Naur '--exclude=.git' itrustee_tzdriver/core/gp_ops.c itrustee_tzdriver_new/core/gp_ops.c +--- itrustee_tzdriver/core/gp_ops.c 2023-11-24 16:18:54.459641440 +0800 ++++ itrustee_tzdriver_new/core/gp_ops.c 2023-11-24 16:40:48.000000000 +0800 +@@ -312,6 +312,84 @@ + return 0; + } + ++int read_from_VMclient(void *dest, size_t dest_size, ++ const void __user *src, size_t size, pid_t vm_pid) ++{ ++ struct task_struct *vmp_task; ++ int i_rdlen; ++ int i_index; ++ int ret; ++ ++ if (!dest || !src) { ++ tloge("src or dest is NULL input buffer\n"); ++ return -EINVAL; ++ } ++ ++ if (size > dest_size) { ++ tloge("size is larger than dest_size or size is 0\n"); ++ return -EINVAL; ++ } ++ if (!size) ++ return 0; ++ ++ tlogv("django verbose, execute access_process_vm"); ++ vmp_task = get_pid_task(find_get_pid(vm_pid), PIDTYPE_PID); ++ if (vmp_task == NULL) { ++ tloge("no task for pid %d \n", vm_pid); ++ return -EFAULT; ++ } ++ tlogv("django verbose, task_struct * for pid %d is 0x%px", vm_pid, vmp_task); ++ ++ i_rdlen = access_process_vm(vmp_task, (unsigned long)(src), dest, size, FOLL_FORCE); ++ if (i_rdlen != size) { ++ tloge("only read %d of %ld bytes by access_process_vm \n", i_rdlen, size); ++ return -EFAULT; ++ } ++ tlogv("django verbose, read %d byes by access_process_vm succeed", ++ i_rdlen); ++ for (i_index = 0; i_index < 32 && i_index < size; i_index ++) { ++ tlogv("django verbose, *(dest + i_index) + %d) = %2.2x", ++ i_index, *((char*)dest + i_index)); ++ } ++ return 0; ++} ++ ++int write_to_VMclient(void __user *dest, size_t dest_size, ++ const void *src, size_t size, pid_t vm_pid) ++{ ++ struct task_struct *vmp_task; ++ int i_wtlen; ++ int i_index; ++ int ret; ++ ++ if (!dest || !src) { ++ tloge("src or dest is NULL input buffer\n"); ++ return -EINVAL; ++ } ++ ++ if (size > dest_size) { ++ tloge("size is larger than dest_size or size is 0\n"); ++ return -EINVAL; ++ } ++ if (!size) ++ return 0; ++ ++ vmp_task = get_pid_task(find_get_pid(vm_pid), PIDTYPE_PID); ++ if (vmp_task == NULL) { ++ tloge("no task for pid %d \n", vm_pid); ++ return -EFAULT; ++ } ++ ++ i_wtlen = access_process_vm(vmp_task, (unsigned long)(dest), src, size, FOLL_FORCE | FOLL_WRITE); ++ if (i_wtlen != size) { ++ tloge("only write %d of %ld bytes by access_process_vm \n", i_wtlen, size); ++ return -EFAULT; ++ } ++ tlogv("django verbose, write %d byes by access_process_vm succeed", ++ i_wtlen); ++ return 0; ++} ++ + static bool is_input_tempmem(unsigned int param_type) + { + if (param_type == TEEC_MEMREF_TEMP_INPUT || +@@ -321,7 +399,8 @@ + return false; + } + +-static int update_input_data(const union tc_ns_client_param *client_param, ++static int update_input_data(const struct tc_call_params *call_params, ++ const union tc_ns_client_param *client_param, + uint32_t buffer_size, void *temp_buf, + unsigned int param_type, uint8_t kernel_params) + { +@@ -331,11 +410,22 @@ + + buffer_addr = client_param->memref.buffer | + ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM); +- if (read_from_client(temp_buf, buffer_size, +- (void *)(uintptr_t)buffer_addr, +- buffer_size, kernel_params) != 0) { +- tloge("copy memref buffer failed\n"); +- return -EFAULT; ++ if (call_params->dev->isVM && !kernel_params) { ++ tlogd("is VM\n"); ++ if (read_from_VMclient(temp_buf, buffer_size, ++ (void *)(uintptr_t)buffer_addr, ++ buffer_size, call_params->dev->vmpid) != 0) { ++ tloge("copy memref buffer failed\n"); ++ return -EFAULT; ++ } ++ } else { ++ tlogd("is not VM\n"); ++ if (read_from_client(temp_buf, buffer_size, ++ (void *)(uintptr_t)buffer_addr, ++ buffer_size, kernel_params) != 0) { ++ tloge("copy memref buffer failed\n"); ++ return -EFAULT; ++ } + } + return 0; + } +@@ -393,7 +483,7 @@ + op_params->local_tmpbuf[index].temp_buffer = temp_buf; + op_params->local_tmpbuf[index].size = buffer_size; + +- if (update_input_data(client_param, buffer_size, temp_buf, ++ if (update_input_data(call_params, client_param, buffer_size, temp_buf, + param_type, kernel_params) != 0) + return -EFAULT; + +@@ -405,8 +495,9 @@ + return 0; + } + +-static int check_buffer_for_ref(uint32_t *buffer_size, +- const union tc_ns_client_param *client_param, uint8_t kernel_params) ++static int check_buffer_for_ref(const struct tc_call_params *call_params, ++ uint32_t *buffer_size, const union tc_ns_client_param *client_param, ++ uint8_t kernel_params) + { + uint64_t size_addr = client_param->memref.size_addr | + ((uint64_t)client_param->memref.size_h_addr << ADDR_TRANS_NUM); +@@ -497,7 +588,7 @@ + return -EINVAL; + + client_param = &(call_params->context->params[index]); +- if (check_buffer_for_ref(&buffer_size, client_param, kernel_params) != 0) ++ if (check_buffer_for_ref(call_params, &buffer_size, client_param, kernel_params) != 0) + return -EINVAL; + + op_params->mb_pack->operation.params[index].memref.buffer = 0; +@@ -572,6 +663,134 @@ + return 0; + } + ++typedef union { ++ struct{ ++ uint64_t user_addr; ++ uint64_t page_num; ++ }block; ++ struct{ ++ uint64_t vm_page_size; ++ uint64_t shared_mem_size; ++ }share; ++}struct_page_block; ++ ++int fill_vm_shared_mem_info_block(uint64_t block_buf, uint32_t block_nums, ++ uint32_t offset, uint32_t buffer_size, uint64_t info_addr, uint32_t vm_page_size,pid_t vm_pid) ++{ ++ struct pagelist_info *page_info = NULL; ++ struct page **host_pages = NULL; ++ uint64_t *phys_addr = NULL; ++ uint32_t host_page_num; ++ uint32_t i; ++ uint32_t j; ++ uint32_t k; ++ uint32_t block_page_total_no = 0; ++ struct task_struct *vmp_task; ++ uint32_t vm_pages_no = 0; ++ uint32_t host_pages_no = 0; ++ uint32_t host_offset = 0; ++ uint64_t vm_start_vaddr; ++ void *host_start_vaddr; ++ uint32_t page_total_no = 0; ++ uint32_t vm_pages_total_size = 0; ++ vmp_task = get_pid_task(find_get_pid(vm_pid), PIDTYPE_PID); ++ if (vmp_task == NULL) { ++ tloge("no task for pid %d", vm_pid); ++ return -EFAULT; ++ } ++ uint32_t expect_page_num = PAGE_ALIGN(buffer_size + (offset & (~PAGE_MASK))) / PAGE_SIZE; ++ struct_page_block *page_block = (struct_page_block *)(uintptr_t)block_buf; ++ for (i = 0; i < block_nums; i++){ ++ vm_start_vaddr = page_block[i].block.user_addr; ++ vm_pages_no = page_block[i].block.page_num; ++ ++ if (i==0 && vm_page_size > PAGE_SIZE) { ++ vm_start_vaddr += (offset & PAGE_MASK); ++ vm_pages_total_size = vm_pages_no * vm_page_size - (offset & PAGE_MASK); ++ } else { ++ vm_pages_total_size = vm_pages_no * vm_page_size; ++ } ++ ++ host_offset = ((uint32_t)(uintptr_t)vm_start_vaddr) & (~PAGE_MASK); ++ host_start_vaddr = (void *)(((uint64_t)vm_start_vaddr) & PAGE_MASK); ++ host_pages_no = PAGE_ALIGN(host_offset + vm_pages_total_size) / PAGE_SIZE; ++ if (i== block_nums -1 && vm_page_size > PAGE_SIZE) ++ host_pages_no = expect_page_num - page_total_no; ++ ++ host_pages = (struct page **)vmalloc(host_pages_no * sizeof(uint64_t)); ++ if (host_pages == NULL) ++ return -EFAULT; ++ tlogd("page_block[%u].block.user_addr = %llx, page_block[%u].block.page_num = %llx\n", i, vm_start_vaddr, i, vm_pages_no); ++ ++ #if (KERNEL_VERSION(6, 5, 0) <= LINUX_VERSION_CODE) ++ host_page_num = get_user_pages_remote(vmp_task->mm, host_start_vaddr, ++ (unsigned long)host_pages_no, ++ FOLL_FORCE, host_pages, NULL); ++ #elif (KERNEL_VERSION(5, 9, 0) <= LINUX_VERSION_CODE) ++ host_page_num = get_user_pages_remote(vmp_task->mm, host_start_vaddr, ++ (unsigned long)host_pages_no, ++ FOLL_FORCE, host_pages, ++ NULL, NULL); ++ #elif (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE) ++ host_page_num = get_user_pages_remote(vmp_task, vmp_task->mm, ++ host_start_vaddr, (unsigned long)host_pages_no, FOLL_FORCE, ++ host_pages, NULL, NULL); ++ #elif (KERNEL_VERSION(4, 9, 0) <= LINUX_VERSION_CODE) ++ host_page_num = get_user_pages_remote(vmp_task, vmp_task->mm, ++ host_start_vaddr, (unsigned long)host_pages_no, ++ FOLL_FORCE, host_pages, NULL); ++ #else ++ host_page_num = get_user_pages_remote(vmp_task, vmp_task->mm, ++ host_start_vaddr, (unsigned long)host_pages_no, ++ 1, 1, host_pages, NULL); ++ #endif ++ if (host_page_num != host_pages_no) { ++ tloge("get pages failed, page_num = %u, expect %u\n", host_page_num, host_pages_no); ++ if (host_page_num > 0) { ++ release_pages(host_pages, host_page_num); ++ } ++ vfree(host_pages); ++ return -EFAULT; ++ } ++ ++ phys_addr = (uint64_t *)(uintptr_t)info_addr + (sizeof(*page_info) / sizeof(uint64_t)); ++ phys_addr = (uint64_t *)((char *)phys_addr + page_total_no * sizeof(uint64_t)); ++ block_page_total_no = 0; ++ for (j = 0; j < host_pages_no; j++) { ++ struct page *page = NULL; ++ page = host_pages[j]; ++ if (page == NULL) { ++ release_pages(host_pages, host_page_num); ++ vfree(host_pages); ++ tloge("page == NULL \n"); ++ return -EFAULT; ++ } ++ void *host_page_phy = (uintptr_t)page_to_phys(page); ++ if (vm_page_size < PAGE_SIZE) { ++ if (j !=0) ++ host_offset = 0; ++ uint32_t litil_page_num = (PAGE_SIZE - host_offset) / vm_page_size; ++ uint64_t host_page_start_addr = (uint64_t)host_page_phy + host_offset; ++ for (k = 0; k < litil_page_num && block_page_total_no < vm_pages_no;k++) { ++ phys_addr[block_page_total_no++] = host_page_start_addr + k * vm_page_size; ++ } ++ } else if (vm_page_size >= PAGE_SIZE){ ++ phys_addr[j] = (uintptr_t)page_to_phys(page); ++ } ++ } ++ page_total_no += (vm_page_size >= PAGE_SIZE ? host_pages_no : vm_pages_no); ++ vfree(host_pages); ++ } ++ ++ page_info = (struct pagelist_info *)(uintptr_t)info_addr; ++ page_info->page_num = page_total_no; ++ page_info->page_size = (vm_page_size > PAGE_SIZE ? PAGE_SIZE : vm_page_size); ++ page_info->sharedmem_offset = offset & (~PAGE_MASK); ++ page_info->sharedmem_size = buffer_size; ++ ++ return 0; ++} ++ + static int transfer_shared_mem(const struct tc_call_params *call_params, + struct tc_op_params *op_params, uint8_t kernel_params, + uint32_t param_type, unsigned int index) +@@ -580,10 +799,11 @@ + void *start_vaddr = NULL; + union tc_ns_client_param *client_param = NULL; + uint32_t buffer_size; +- uint32_t pages_no; ++ uint32_t pages_no = 0; + uint32_t offset; + uint32_t buff_len; + uint64_t buffer_addr; ++ uint32_t i; + + if (index >= TEE_PARAM_NUM) + return -EINVAL; +@@ -591,22 +811,59 @@ + client_param = &(call_params->context->params[index]); + if (check_buffer_for_sharedmem(&buffer_size, client_param, kernel_params)) + return -EINVAL; +- + buffer_addr = client_param->memref.buffer | + ((uint64_t)client_param->memref.buffer_h_addr << ADDR_TRANS_NUM); +- buff = (void *)(uint64_t)(buffer_addr + client_param->memref.offset); +- start_vaddr = (void *)(((uint64_t)buff) & PAGE_MASK); +- offset = ((uint32_t)(uintptr_t)buff) & (~PAGE_MASK); +- pages_no = PAGE_ALIGN(offset + buffer_size) / PAGE_SIZE; +- +- buff_len = sizeof(struct pagelist_info) + (sizeof(uint64_t) * pages_no); +- buff = mailbox_alloc(buff_len, MB_FLAG_ZERO); +- if (buff == NULL) +- return -EFAULT; + +- if (fill_shared_mem_info((uint64_t)start_vaddr, pages_no, offset, buffer_size, (uint64_t)buff)) { +- mailbox_free(buff); +- return -EFAULT; ++ if (call_params->dev->isVM) { ++ uint32_t block_buf_size = buffer_size - sizeof(struct_page_block); ++ void *tmp_buf = kzalloc(buffer_size, GFP_KERNEL); ++ if (read_from_client(tmp_buf, buffer_size, buffer_addr, buffer_size, 0)) { ++ tloge("copy blocks failed\n"); ++ return -EFAULT; ++ } ++ struct_page_block *block_buf = (struct_page_block *)((char *)tmp_buf + sizeof(struct_page_block)); ++ uint32_t block_nums = block_buf_size / sizeof(struct_page_block); ++ uint32_t share_mem_size = ((struct_page_block *)tmp_buf)->share.shared_mem_size; ++ uint32_t vm_page_size = ((struct_page_block *)tmp_buf)->share.vm_page_size; ++ ++ call_params->dev->vm_page_size = vm_page_size; ++ offset = (uint64_t)(client_param->memref.h_offset + client_param->memref.offset); ++ for(i = 0;i < block_nums; i++){ ++ pages_no += block_buf[i].block.page_num; ++ } ++ if (vm_page_size > PAGE_SIZE){ ++ buff = (void *)(uint64_t)(client_param->memref.h_offset + client_param->memref.offset); ++ pages_no = PAGE_ALIGN((((uint32_t)(uintptr_t)buff) & (~PAGE_MASK)) + share_mem_size) / PAGE_SIZE; ++ tlogd("page_no = %u \n", pages_no); ++ } ++ ++ buff_len = sizeof(struct pagelist_info) + (sizeof(uint64_t) * pages_no); ++ buff = mailbox_alloc(buff_len, MB_FLAG_ZERO); ++ if (buff == NULL) { ++ kfree(tmp_buf); ++ return -EFAULT; ++ } ++ if (fill_vm_shared_mem_info_block((uint64_t)block_buf, block_nums, offset, ++ share_mem_size, (uint64_t)buff, vm_page_size, call_params->dev->vmpid)) { ++ kfree(tmp_buf); ++ mailbox_free(buff); ++ return -EFAULT; ++ } ++ kfree(tmp_buf); ++ } else { ++ buff = (void *)(uint64_t)(buffer_addr + client_param->memref.offset); ++ start_vaddr = (void *)(((uint64_t)buff) & PAGE_MASK); ++ offset = ((uint32_t)(uintptr_t)buff) & (~PAGE_MASK); ++ pages_no = PAGE_ALIGN(offset + buffer_size) / PAGE_SIZE; ++ ++ buff_len = sizeof(struct pagelist_info) + (sizeof(uint64_t) * pages_no); ++ buff = mailbox_alloc(buff_len, MB_FLAG_ZERO); ++ if (buff == NULL) ++ return -EFAULT; ++ if (fill_shared_mem_info((uint64_t)start_vaddr, pages_no, offset, buffer_size, (uint64_t)buff)) { ++ mailbox_free(buff); ++ return -EFAULT; ++ } + } + + op_params->local_tmpbuf[index].temp_buffer = buff; +@@ -775,13 +1032,24 @@ + if (buffer_size == 0) + return 0; + /* Only update the buffer when the buffer size is valid in complete case */ +- if (write_to_client((void *)(uintptr_t)buffer_addr, +- operation->params[index].memref.size, +- op_params->local_tmpbuf[index].temp_buffer, +- operation->params[index].memref.size, +- call_params->dev->kernel_api) != 0) { +- tloge("copy tempbuf failed\n"); +- return -ENOMEM; ++ if (call_params->dev->isVM && !call_params->dev->kernel_api) { ++ if (write_to_VMclient((void *)(uintptr_t)buffer_addr, ++ operation->params[index].memref.size, ++ op_params->local_tmpbuf[index].temp_buffer, ++ operation->params[index].memref.size, ++ call_params->dev->vmpid) != 0) { ++ tloge("copy tempbuf failed\n"); ++ return -ENOMEM; ++ } ++ } else { ++ if (write_to_client((void *)(uintptr_t)buffer_addr, ++ operation->params[index].memref.size, ++ op_params->local_tmpbuf[index].temp_buffer, ++ operation->params[index].memref.size, ++ call_params->dev->kernel_api) != 0) { ++ tloge("copy tempbuf failed\n"); ++ return -ENOMEM; ++ } + } + return 0; + } +@@ -958,7 +1226,10 @@ + } else if (param_type == TEEC_MEMREF_SHARED_INOUT) { + #ifdef CONFIG_NOCOPY_SHAREDMEM + temp_buf = local_tmpbuf[index].temp_buffer; +- if (temp_buf != NULL) { ++ if (temp_buf != NULL && call_params->dev->isVM) { ++ release_vm_shared_mem_page(temp_buf, local_tmpbuf[index].size, call_params->dev->vm_page_size); ++ mailbox_free(temp_buf); ++ } else if (temp_buf != NULL && !call_params->dev->isVM) { + release_shared_mem_page(temp_buf, local_tmpbuf[index].size); + mailbox_free(temp_buf); + } +diff -Naur '--exclude=.git' itrustee_tzdriver/core/gp_ops.h itrustee_tzdriver_new/core/gp_ops.h +--- itrustee_tzdriver/core/gp_ops.h 2023-11-24 16:18:54.459641440 +0800 ++++ itrustee_tzdriver_new/core/gp_ops.h 2023-11-24 16:41:14.000000000 +0800 +@@ -30,5 +30,9 @@ + bool is_tmp_mem(uint32_t param_type); + bool is_ref_mem(uint32_t param_type); + bool is_val_param(uint32_t param_type); ++int write_to_VMclient(void __user *dest, size_t dest_size, ++ const void *src, size_t size, pid_t vm_pid); ++int read_from_VMclient(void *dest, size_t dest_size, ++ const void __user *src, size_t size, pid_t vm_pid); + + #endif +diff -Naur '--exclude=.git' itrustee_tzdriver/core/session_manager.c itrustee_tzdriver_new/core/session_manager.c +--- itrustee_tzdriver/core/session_manager.c 2023-11-24 16:18:54.459641440 +0800 ++++ itrustee_tzdriver_new/core/session_manager.c 2023-11-24 16:45:46.000000000 +0800 +@@ -595,7 +595,7 @@ + } + + static int tc_ns_service_init(const unsigned char *uuid, uint32_t uuid_len, +- struct tc_ns_service **new_service) ++ struct tc_ns_service **new_service, uint32_t nsid) + { + int ret = 0; + struct tc_ns_service *service = NULL; +@@ -616,7 +616,7 @@ + } + + #ifdef CONFIG_CONFIDENTIAL_CONTAINER +- service->nsid = task_active_pid_ns(current)->ns.inum; ++ service->nsid = nsid; + #else + service->nsid = PROC_PID_INIT_INO; + #endif +@@ -654,7 +654,11 @@ + struct tc_ns_service *service = NULL; + bool is_full = false; + #ifdef CONFIG_CONFIDENTIAL_CONTAINER +- unsigned int nsid = task_active_pid_ns(current)->ns.inum; ++ unsigned int nsid; ++ if (dev_file->isVM) ++ nsid = dev_file->nsid; ++ else ++ nsid = task_active_pid_ns(current)->ns.inum; + #else + unsigned int nsid = PROC_PID_INIT_INO; + #endif +@@ -683,7 +687,7 @@ + goto add_service; + } + /* Create a new service if we couldn't find it in list */ +- ret = tc_ns_service_init(context->uuid, UUID_LEN, &service); ++ ret = tc_ns_service_init(context->uuid, UUID_LEN, &service, nsid); + /* unlock after init to make sure find service from all is correct */ + mutex_unlock(&g_service_list_lock); + if (ret != 0) { +@@ -797,10 +801,19 @@ + } + return 0; + } +- if (copy_from_user(params->mb_load_mem + sizeof(load_flag), +- (const void __user *)params->file_buffer + loaded_size, load_size)) { +- tloge("file buf get fail\n"); +- return -EFAULT; ++ if (params->dev_file->isVM) { ++ if (read_from_VMclient(params->mb_load_mem + sizeof(load_flag), ++ load_size, (const void __user *)(params->file_buffer + loaded_size), ++ load_size, (pid_t)params->dev_file->vmpid)) { ++ tloge("file buf get failed \n"); ++ return -EFAULT; ++ } ++ } else { ++ if (copy_from_user(params->mb_load_mem + sizeof(load_flag), ++ (const void __user *)(params->file_buffer + loaded_size), load_size)) { ++ tloge("file buf get failed \n"); ++ return -EFAULT; ++ } + } + return 0; + } +@@ -1379,10 +1392,109 @@ + return ret; + } + ++static int process_vm_ref(struct tc_ns_dev_file *dev_file, ++ struct tc_ns_client_context *context, unsigned long long *vm_buffers) ++{ ++ struct tc_ns_shared_mem *shared_mem = NULL; ++ int index = 0; ++ uint32_t buffer_size; ++ unsigned int offset = 0; ++ void *buffer_addr = NULL; ++ void *size_addr = NULL; ++ unsigned long long vm_hvas[TEE_PARAM_NUM]={0}; ++ ++ if (!dev_file->isVM || !context->file_buffer) ++ return 0; ++ ++ if (copy_from_user(vm_hvas, context->file_buffer, context->file_size) != 0) { ++ tloge("copy from user failed\n"); ++ return -EFAULT; ++ } ++ ++ mutex_lock(&dev_file->shared_mem_lock); ++ list_for_each_entry(shared_mem, &dev_file->shared_mem_list, head) { ++ for (index = 0; index < TEE_PARAM_NUM; index++) { ++ buffer_addr = (void *)(uintptr_t)(context->params[index].memref.buffer | ++ ((uint64_t)context->params[index].memref.buffer_h_addr << ADDR_TRANS_NUM)); ++ if (shared_mem->user_addr == buffer_addr) { ++ buffer_addr = (void *)(uintptr_t)(shared_mem->kernel_addr); ++ size_addr = (void *)(uintptr_t)(context->params[index].memref.size_addr | ++ ((uint64_t)context->params[index].memref.size_h_addr << ADDR_TRANS_NUM)); ++ offset = context->params[index].memref.offset; ++ ++ if (copy_from_user(&buffer_size, size_addr, sizeof(uint32_t))) { ++ tloge("copy memref.size_addr failed\n"); ++ return -EFAULT; ++ } ++ ++ if (read_from_VMclient(buffer_addr + offset, buffer_size, ++ (uint32_t __user *)(uintptr_t)(vm_hvas[index] + offset), ++ buffer_size, dev_file->vmpid)) { ++ tloge("copy memref.buffer failed\n"); ++ return -EFAULT; ++ } ++ vm_buffers[index] = vm_hvas[index]; ++ } ++ } ++ } ++ mutex_unlock(&dev_file->shared_mem_lock); ++ return 0; ++} ++ ++static int process_vm_ref_end(struct tc_ns_dev_file *dev_file, ++ struct tc_ns_client_context *context, unsigned long long *vm_buffers) ++{ ++ int ret = 0; ++ struct tc_ns_shared_mem *shared_mem = NULL; ++ int index = 0; ++ uint32_t buffer_size; ++ unsigned int offset = 0; ++ void *buffer_addr = NULL; ++ void *size_addr = NULL; ++ ++ if (!dev_file->isVM) ++ return 0; ++ ++ mutex_lock(&dev_file->shared_mem_lock); ++ list_for_each_entry(shared_mem, &dev_file->shared_mem_list, head) { ++ for (index = 0; index < TEE_PARAM_NUM; index++) { ++ buffer_addr = (void *)(uintptr_t)(context->params[index].memref.buffer | ++ ((uint64_t)context->params[index].memref.buffer_h_addr << ADDR_TRANS_NUM)); ++ if (shared_mem->user_addr == buffer_addr) { ++ buffer_addr = (void *)(uintptr_t)(shared_mem->kernel_addr); ++ size_addr = (void *)(uintptr_t)(context->params[index].memref.size_addr | ++ ((uint64_t)context->params[index].memref.size_h_addr << ADDR_TRANS_NUM)); ++ offset = context->params[index].memref.offset; ++ ++ if (copy_from_user(&buffer_size, size_addr, sizeof(uint32_t))) { ++ tloge("copy memref.size_addr failed\n"); ++ return -EFAULT; ++ } ++ ++ if (write_to_VMclient((void *)(uintptr_t)(vm_buffers[index] + offset), ++ buffer_size, (void *)(uintptr_t)(buffer_addr + offset), ++ buffer_size, dev_file->vmpid)) { ++ tloge("copy buf size failed\n"); ++ return -EFAULT; ++ } ++ } ++ } ++ } ++ mutex_unlock(&dev_file->shared_mem_lock); ++ return ret; ++} ++ + static int ioctl_session_send_cmd(struct tc_ns_dev_file *dev_file, + struct tc_ns_client_context *context, void *argp) + { + int ret; ++ unsigned long long vm_buffers[TEE_PARAM_NUM]={0}; ++ ++ if (dev_file->isVM && ++ process_vm_ref(dev_file, context, vm_buffers)) { ++ tloge("copy from VM memref failed\n"); ++ return -EFAULT; ++ } + + ret = tc_ns_send_cmd(dev_file, context); + if (ret != 0) +@@ -1391,6 +1503,11 @@ + if (ret == 0) + ret = -EFAULT; + } ++ if (ret ==0 && dev_file->isVM && ++ process_vm_ref_end(dev_file, context, vm_buffers)) { ++ tloge("copy to VM memref failed\n"); ++ return -EFAULT; ++ } + return ret; + } + +diff -Naur '--exclude=.git' itrustee_tzdriver/core/shared_mem.c itrustee_tzdriver_new/core/shared_mem.c +--- itrustee_tzdriver/core/shared_mem.c 2023-11-24 16:18:54.463641440 +0800 ++++ itrustee_tzdriver_new/core/shared_mem.c 2023-11-24 16:46:18.000000000 +0800 +@@ -116,6 +116,35 @@ + put_page(page); + } + } ++ ++ ++void release_vm_shared_mem_page(uint64_t buf, uint32_t buf_size, uint32_t vm_page_size) ++{ ++ uint32_t i; ++ uint64_t *phys_addr = NULL; ++ struct pagelist_info *page_info = NULL; ++ struct page *page = NULL; ++ struct page *last_page = NULL; ++ ++ page_info = (struct pagelist_info *)(uintptr_t)buf; ++ phys_addr = (uint64_t *)(uintptr_t)buf + (sizeof(*page_info) / sizeof(uint64_t)); ++ ++ if (buf_size != sizeof(*page_info) + sizeof(uint64_t) * page_info->page_num) { ++ tloge("bad size, cannot release page\n"); ++ return; ++ } ++ ++ for (i = 0; i < page_info->page_num; i++) { ++ page = (struct page *)(uintptr_t)phys_to_page(phys_addr[i]); ++ if (page == NULL) ++ continue; ++ if (last_page != page) { ++ set_bit(PG_dirty, &page->flags); ++ put_page(page); ++ } ++ last_page = page; ++ } ++} + #endif + + #ifdef CONFIG_SHARED_MEM_RESERVED +diff -Naur '--exclude=.git' itrustee_tzdriver/core/shared_mem.h itrustee_tzdriver_new/core/shared_mem.h +--- itrustee_tzdriver/core/shared_mem.h 2023-11-24 16:18:54.463641440 +0800 ++++ itrustee_tzdriver_new/core/shared_mem.h 2023-11-24 16:46:46.000000000 +0800 +@@ -65,5 +65,6 @@ + int fill_shared_mem_info(uint64_t start_vaddr, uint32_t pages_no, + uint32_t offset, uint32_t buffer_size, uint64_t info_addr); + void release_shared_mem_page(uint64_t buf, uint32_t buf_size); ++void release_vm_shared_mem_page(uint64_t buf, uint32_t buf_size, uint32_t vm_page_size); + #endif + #endif +diff -Naur '--exclude=.git' itrustee_tzdriver/core/tc_client_driver.c itrustee_tzdriver_new/core/tc_client_driver.c +--- itrustee_tzdriver/core/tc_client_driver.c 2023-11-24 16:18:54.463641440 +0800 ++++ itrustee_tzdriver_new/core/tc_client_driver.c 2023-11-24 16:53:12.000000000 +0800 +@@ -181,6 +181,8 @@ + smc_cmd.operation_h_phys = + (uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM; + ++ if (dev_file->isVM) ++ smc_cmd.nsid = dev_file->nsid; + if (tc_ns_smc(&smc_cmd) != 0) { + ret = -EPERM; + tloge("smc call returns error ret 0x%x\n", smc_cmd.ret_val); +@@ -307,10 +309,14 @@ + + static int tc_login_check(const struct tc_ns_dev_file *dev_file) + { +- int ret = check_teecd_auth(); +- if (ret != 0) { +- tloge("teec auth failed, ret %d\n", ret); +- return -EACCES; ++ int ret; ++ ret =check_proxy_auth(); ++ if (ret) { ++ ret = check_teecd_auth(); ++ if (ret != 0) { ++ tloge("teec auth failed, ret %d\n", ret); ++ return -EACCES; ++ } + } + + if (!dev_file) +@@ -713,12 +719,55 @@ + return ret; + } + ++static int copy_buf_to_VM(unsigned int agent_id, unsigned int nsid, ++ unsigned long buffer_addr, unsigned int vmpid) ++{ ++ int ret = 0; ++ struct smc_event_data *event_data = NULL; ++ ++ event_data = find_event_control(agent_id, nsid); ++ if (!event_data) ++ return -EINVAL; ++ ++ if (write_to_VMclient((void *)(uintptr_t)buffer_addr, ++ event_data->agent_buff_size, ++ event_data->agent_buff_kernel, ++ event_data->agent_buff_size, ++ vmpid) != 0) { ++ tloge("copy agent buffer failed\n"); ++ return -ENOMEM; ++ } ++ return ret; ++} ++ ++static int copy_buf_from_VM(unsigned int agent_id, unsigned int nsid, ++ unsigned long buffer_addr, unsigned int vmpid) ++{ ++ int ret = 0; ++ struct smc_event_data *event_data = NULL; ++ ++ event_data = find_event_control(agent_id, nsid); ++ if (!event_data) ++ return -EINVAL; ++ ++ if (read_from_VMclient(event_data->agent_buff_kernel, ++ event_data->agent_buff_size, ++ (void *)(uintptr_t)buffer_addr, ++ event_data->agent_buff_size, ++ vmpid) != 0) { ++ tloge("copy agent buffer failed\n"); ++ return -EFAULT; ++ } ++ return ret; ++} ++ + /* ioctls for the secure storage daemon */ + int public_ioctl(const struct file *file, unsigned int cmd, unsigned long arg, bool is_from_client_node) + { + int ret = -EINVAL; + struct tc_ns_dev_file *dev_file = NULL; + uint32_t nsid = get_nsid(); ++ unsigned long tmp[2]; + void *argp = (void __user *)(uintptr_t)arg; + if (file == NULL || file->private_data == NULL) { + tloge("invalid params\n"); +@@ -726,18 +775,34 @@ + } + dev_file = file->private_data; + #ifdef CONFIG_CONFIDENTIAL_CONTAINER +- dev_file->nsid = nsid; ++ if (dev_file != NULL && dev_file->nsid == 0) ++ dev_file->nsid = nsid; ++ if (dev_file->isVM) ++ nsid = dev_file->nsid; + #endif + ++ if (dev_file->isVM) { ++ if (copy_from_user(tmp, (void *)(uintptr_t)arg, sizeof(tmp)) != 0) { ++ tloge("copy agent args failed\n"); ++ return -EFAULT; ++ } ++ arg = tmp[0]; ++ } + switch (cmd) { + case TC_NS_CLIENT_IOCTL_WAIT_EVENT: + if (ioctl_check_agent_owner(dev_file, (unsigned int)arg, nsid) != 0) + return -EINVAL; + ret = tc_ns_wait_event((unsigned int)arg, nsid); ++ if (!ret && dev_file->isVM) { ++ ret = copy_buf_to_VM(tmp[0], nsid, tmp[1], dev_file->vmpid); ++ } + break; + case TC_NS_CLIENT_IOCTL_SEND_EVENT_RESPONSE: + if (ioctl_check_agent_owner(dev_file, (unsigned int)arg, nsid) != 0) + return -EINVAL; ++ if (dev_file->isVM) { ++ ret = copy_buf_from_VM(tmp[0], nsid, tmp[1], dev_file->vmpid); ++ } + ret = tc_ns_send_event_response((unsigned int)arg, nsid); + break; + case TC_NS_CLIENT_IOCTL_REGISTER_AGENT: +@@ -834,6 +899,14 @@ + return ret; + } + ++int set_vm_flag(struct tc_ns_dev_file *dev_file, int vmid) ++{ ++ dev_file->nsid = vmid; ++ dev_file->vmpid = vmid; ++ tlogd(" dev_file->vmpid %d\n", (int)dev_file->vmpid); ++ return 0; ++} ++ + void handle_cmd_prepare(unsigned int cmd) + { + if (cmd != TC_NS_CLIENT_IOCTL_WAIT_EVENT && +@@ -853,6 +926,10 @@ + { + int ret = -EFAULT; + void *argp = (void __user *)(uintptr_t)arg; ++ if (cmd == TC_NS_CLIENT_IOCTL_SET_VM_FLAG) { ++ tlogd(" before set_vm_flag \n"); ++ return set_vm_flag(file->private_data, (int)arg); ++ } + handle_cmd_prepare(cmd); + switch (cmd) { + case TC_NS_CLIENT_IOCTL_GET_TEE_VERSION: +@@ -867,10 +944,10 @@ + mutex_unlock(&g_set_ca_hash_lock); + break; + case TC_NS_CLIENT_IOCTL_LATEINIT: +- ret = tc_ns_late_init(arg); ++ ret = tc_ns_late_init(file->private_data, arg); + break; + case TC_NS_CLIENT_IOCTL_SYC_SYS_TIME: +- ret = sync_system_time_from_user( ++ ret = sync_system_time_from_user(file->private_data, + (struct tc_ns_client_time *)(uintptr_t)arg); + break; + default: +@@ -889,6 +966,10 @@ + int ret = -EFAULT; + void *argp = (void __user *)(uintptr_t)arg; + ++ if (cmd == TC_NS_CLIENT_IOCTL_SET_VM_FLAG) { ++ tlogd(" before set_vm_flag \n"); ++ return set_vm_flag(file->private_data, (int)arg); ++ } + handle_cmd_prepare(cmd); + switch (cmd) { + case TC_NS_CLIENT_IOCTL_SES_OPEN_REQ: +@@ -920,20 +1001,30 @@ + { + int ret; + struct tc_ns_dev_file *dev = NULL; ++ int vm = 0; + (void)inode; + +- ret = check_teecd_auth(); +- if (ret != 0) { +- tloge("teec auth failed, ret %d\n", ret); +- return -EACCES; ++ ret =check_proxy_auth(); ++ if (ret) { ++ ret = check_teecd_auth(); ++ if (ret != 0) { ++ tloge("teec auth failed, ret %d\n", ret); ++ return -EACCES; ++ } ++ } else { ++ vm = 1; + } + + file->private_data = NULL; + ret = tc_ns_client_open(&dev, TEE_REQ_FROM_USER_MODE); +- if (ret == 0) ++ if (ret == 0) { + file->private_data = dev; ++ if (vm) ++ dev->isVM = true; ++ } + #ifdef CONFIG_TEE_REBOOT +- get_teecd_pid(); ++ if (!vm && check_teecd_auth() == 0) ++ get_teecd_pid(); + #endif + return ret; + } +diff -Naur '--exclude=.git' itrustee_tzdriver/core/tc_client_driver.h itrustee_tzdriver_new/core/tc_client_driver.h +--- itrustee_tzdriver/core/tc_client_driver.h 2023-11-24 16:18:54.463641440 +0800 ++++ itrustee_tzdriver_new/core/tc_client_driver.h 2023-11-24 16:53:36.000000000 +0800 +@@ -38,6 +38,7 @@ + int tc_ns_client_close(struct tc_ns_dev_file *dev); + int is_agent_alive(unsigned int agent_id, unsigned int nsid); + int tc_ns_register_host_nsid(void); ++int set_vm_flag(struct tc_ns_dev_file *dev_file, int vmid); + + #if defined(CONFIG_CONFIDENTIAL_CONTAINER) || defined(CONFIG_TEE_TELEPORT_SUPPORT) + const struct file_operations *get_cvm_fops(void); +diff -Naur '--exclude=.git' itrustee_tzdriver/core/tc_cvm_driver.c itrustee_tzdriver_new/core/tc_cvm_driver.c +--- itrustee_tzdriver/core/tc_cvm_driver.c 2023-11-24 16:18:54.463641440 +0800 ++++ itrustee_tzdriver_new/core/tc_cvm_driver.c 2023-11-24 16:56:40.000000000 +0800 +@@ -57,6 +57,10 @@ + { + int ret = -EFAULT; + void *argp = (void __user *)(uintptr_t)arg; ++ if (cmd == TC_NS_CLIENT_IOCTL_SET_VM_FLAG) { ++ tlogd(" before set_vm_flag \n"); ++ return set_vm_flag(file->private_data, (int)arg); ++ } + handle_cmd_prepare(cmd); + + switch (cmd) { +diff -Naur '--exclude=.git' itrustee_tzdriver/Makefile itrustee_tzdriver_new/Makefile +--- itrustee_tzdriver/Makefile 2023-11-24 16:18:54.007641440 +0800 ++++ itrustee_tzdriver_new/Makefile 2023-11-24 16:58:48.000000000 +0800 +@@ -38,8 +38,11 @@ + + # you should config right path according to your run-time environment + KPATH := /usr/src/kernels +-KDIR := $(KPATH)/$(shell ls $(KPATH)) +-KDIR = /usr/src/linux-5.10.0-60.114.0.141.oe2203.aarch64 ++#KDIR := $(KPATH)/$(shell ls $(KPATH)) ++#KDIR = /usr/src/linux-5.10.0-60.114.0.141.oe2203.aarch64 ++KERN_VER = $(shell uname -r) ++KDIR = /lib/modules/$(KERN_VER)/build ++ + EXTRA_CFLAGS += -isystem /usr/lib/gcc/aarch64-linux-gnu/10.3.1/include + EXTRA_CFLAGS += -isystem /usr/lib/gcc/aarch64-openEuler-linux-gnu/12/include + EXTRA_CFLAGS += -fstack-protector-strong -DCONFIG_TEELOG -DCONFIG_TZDRIVER_MODULE -DCONFIG_TEECD_AUTH -DCONFIG_PAGES_MEM=y -DCONFIG_CLOUDSERVER_TEECD_AUTH +@@ -49,6 +52,7 @@ + EXTRA_CFLAGS += -DCONFIG_TEE_LOG_ACHIVE_PATH=\"/var/log/tee/last_teemsg\" + EXTRA_CFLAGS += -DNOT_TRIGGER_AP_RESET -DLAST_TEE_MSG_ROOT_GID -DCONFIG_NOCOPY_SHAREDMEM -DCONFIG_TA_AFFINITY=y -DCONFIG_TA_AFFINITY_CPU_NUMS=128 + EXTRA_CFLAGS += -DTEECD_PATH_UID_AUTH_CTX=\"/usr/bin/teecd:0\" ++EXTRA_CFLAGS += -DPROXY_PATH_UID_AUTH_CTX=\"/usr/bin/vtz_proxy:0\" + EXTRA_CFLAGS += -DCONFIG_AUTH_SUPPORT_UNAME -DCONFIG_AUTH_HASH -std=gnu99 + EXTRA_CFLAGS += -DCONFIG_TEE_UPGRADE -DCONFIG_TEE_REBOOT -DCONFIG_CONFIDENTIAL_TEE + EXTRA_CFLAGS += -I$(PWD)/tzdriver_internal/tee_reboot +diff -Naur '--exclude=.git' itrustee_tzdriver/tc_ns_client.h itrustee_tzdriver_new/tc_ns_client.h +--- itrustee_tzdriver/tc_ns_client.h 2023-11-24 16:18:53.627641440 +0800 ++++ itrustee_tzdriver_new/tc_ns_client.h 2023-11-24 16:59:38.000000000 +0800 +@@ -210,6 +210,9 @@ + #define TC_NS_CLIENT_IOCTL_GET_TEE_INFO \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 26, struct tc_ns_tee_info) + ++#define TC_NS_CLIENT_IOCTL_SET_VM_FLAG \ ++ _IOWR(TC_NS_CLIENT_IOC_MAGIC, 27, int) ++ + #define TC_NS_CLIENT_IOCTL_CHECK_CCOS \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 32, unsigned int) + +diff -Naur '--exclude=.git' itrustee_tzdriver/teek_ns_client.h itrustee_tzdriver_new/teek_ns_client.h +--- itrustee_tzdriver/teek_ns_client.h 2023-11-24 16:18:53.719641440 +0800 ++++ itrustee_tzdriver_new/teek_ns_client.h 2023-11-24 17:00:18.000000000 +0800 +@@ -129,6 +129,9 @@ + int load_app_flag; + #ifdef CONFIG_CONFIDENTIAL_CONTAINER + uint32_t nsid; ++ uint32_t vmpid; ++ bool isVM; ++ uint32_t vm_page_size; + #endif + struct completion close_comp; /* for kthread close unclosed session */ + #ifdef CONFIG_TEE_TELEPORT_SUPPORT +diff -Naur '--exclude=.git' itrustee_tzdriver/tlogger/tlogger.c itrustee_tzdriver_new/tlogger/tlogger.c +--- itrustee_tzdriver/tlogger/tlogger.c 2023-11-24 16:18:54.927641440 +0800 ++++ itrustee_tzdriver_new/tlogger/tlogger.c 2023-11-24 17:03:54.000000000 +0800 +@@ -61,6 +61,7 @@ + #define SET_TLOGCAT_STAT_BASE 7 + #define GET_TLOGCAT_STAT_BASE 8 + #define GET_TEE_INFO_BASE 9 ++#define SET_VM_FLAG 10 + + /* get tee verison */ + #define MAX_TEE_VERSION_LEN 256 +@@ -75,6 +76,8 @@ + _IO(LOGGERIOCTL, GET_TLOGCAT_STAT_BASE) + #define TEELOGGER_GET_TEE_INFO \ + _IOR(LOGGERIOCTL, GET_TEE_INFO_BASE, struct tc_ns_tee_info) ++#define TEELOGGER_SET_VM_FLAG \ ++ _IOR(LOGGERIOCTL, SET_VM_FLAG, int) + + int g_tlogcat_f = 0; + +@@ -515,7 +518,7 @@ + } + #endif + +-static struct tlogger_group *get_tlogger_group(void) ++static struct tlogger_group *get_tlogger_group(uint32_t vmpid) + { + struct tlogger_group *group = NULL; + #ifdef CONFIG_CONFIDENTIAL_CONTAINER +@@ -524,6 +527,9 @@ + uint32_t nsid = PROC_PID_INIT_INO; + #endif + ++ if (vmpid) ++ nsid = vmpid; ++ + list_for_each_entry(group, &g_reader_group_list, node) { + if (group->nsid == nsid) + return group; +@@ -596,7 +602,7 @@ + return -ENODEV; + + mutex_lock(&g_reader_group_mutex); +- group = get_tlogger_group(); ++ group = get_tlogger_group(0); + if (group == NULL) { + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)group)) { +@@ -828,6 +834,35 @@ + return 0; + } + ++int set_tlog_vm_flag(struct file *file, uint32_t vmpid) ++{ ++ struct tlogger_reader *reader = NULL; ++ struct tlogger_group *group = NULL; ++ ++ if (!file || !file->private_data) { ++ return -1; ++ } ++ ++ reader = file->private_data; ++ mutex_lock(&g_reader_group_mutex); ++ group = get_tlogger_group(vmpid); ++ if (group == NULL) { ++ group = kzalloc(sizeof(*group), GFP_KERNEL); ++ if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)group)) { ++ mutex_unlock(&g_reader_group_mutex); ++ return -ENOMEM; ++ } ++ init_tlogger_group(group); ++ group->nsid = vmpid; ++ list_add_tail(&group->node, &g_reader_group_list); ++ } else { ++ group->reader_cnt++; ++ } ++ mutex_unlock(&g_reader_group_mutex); ++ reader->group = group; ++ return 0; ++} ++ + static long process_tlogger_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) + { +@@ -865,6 +900,9 @@ + case TEELOGGER_GET_TEE_INFO: + ret = tc_ns_get_tee_info(file, (void *)(uintptr_t)arg); + break; ++ case TEELOGGER_SET_VM_FLAG: ++ ret = set_tlog_vm_flag(file, (int)arg); ++ break; + default: + tloge("ioctl error default\n"); + break; +@@ -1043,11 +1081,13 @@ + + while (next_item && read_off <= read_off_end) { + item_len = next_item->buffer_len + sizeof(*next_item); +- write_len = kernel_write(filep, next_item->log_buffer, +- next_item->real_len, pos); +- if (write_len < 0) { +- tloge("Failed to write last teemsg %zd\n", write_len); +- return -1; ++ if (next_item->nsid == 0) { ++ write_len = kernel_write(filep, next_item->log_buffer, ++ next_item->real_len, pos); ++ if (write_len < 0) { ++ tloge("Failed to write last teemsg %zd\n", write_len); ++ return -1; ++ } + } + + tlogd("Succeed to Write last teemsg, len=%zd\n", write_len); diff --git a/trustzone-awared-vm/Host/itrustee_tzdriver_new.zip b/trustzone-awared-vm/Host/itrustee_tzdriver_new.zip new file mode 100644 index 0000000000000000000000000000000000000000..0d44284fc870dd7918355d89e0dad91b4f396ada Binary files /dev/null and b/trustzone-awared-vm/Host/itrustee_tzdriver_new.zip differ diff --git a/trustzone-awared-vm/Host/qemu.patch b/trustzone-awared-vm/Host/qemu.patch new file mode 100644 index 0000000000000000000000000000000000000000..50735c87a332f2b03a64c1da315d116f50f7deb6 --- /dev/null +++ b/trustzone-awared-vm/Host/qemu.patch @@ -0,0 +1,934 @@ +diff -Naur '--exclude=.git' qemu/hw/char/tc_ns_client.h qemu_after/hw/char/tc_ns_client.h +--- qemu/hw/char/tc_ns_client.h 1970-01-01 08:00:00.000000000 +0800 ++++ qemu_after/hw/char/tc_ns_client.h 2023-10-23 15:09:10.840630820 +0800 +@@ -0,0 +1,162 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2012-2023. All rights reserved. ++ * Licensed under the Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR ++ * PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ */ ++ ++#ifndef _TC_NS_CLIENT_H_ ++#define _TC_NS_CLIENT_H_ ++#include "tee_client_type.h" ++#define TC_DEBUG ++ ++#define INVALID_TYPE 0x00 ++#define TEECD_CONNECT 0x01 ++#ifndef ZERO_SIZE_PTR ++#define ZERO_SIZE_PTR ((void *)16) ++#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= (unsigned long)ZERO_SIZE_PTR) ++#endif ++ ++#define UUID_SIZE 16 ++ ++#define TC_NS_CLIENT_IOC_MAGIC 't' ++#define TC_NS_CLIENT_DEV "tc_ns_client" ++#define TC_NS_CLIENT_DEV_NAME "/dev/tc_ns_client" ++#define TC_TEECD_PRIVATE_DEV_NAME "/dev/tc_private" ++#define TC_NS_CVM_DEV_NAME "/dev/tc_ns_cvm" ++ ++enum ConnectCmd { ++ GET_FD, ++ GET_TEEVERSION, ++ SET_SYS_XML, ++ GET_TEECD_VERSION, ++}; ++ ++typedef struct { ++ unsigned int method; ++ unsigned int mdata; ++} TC_NS_ClientLogin; ++ ++typedef union { ++ struct { ++ unsigned long long buffer; ++ unsigned long long offset; ++ unsigned long long size_addr; ++ } memref; ++ struct { ++ unsigned long long a_addr; ++ unsigned long long b_addr; ++ } value; ++} TC_NS_ClientParam; ++ ++typedef struct { ++ unsigned int code; ++ unsigned int origin; ++} TC_NS_ClientReturn; ++ ++typedef struct { ++ unsigned char uuid[UUID_SIZE]; ++ unsigned int session_id; ++ unsigned int cmd_id; ++ TC_NS_ClientReturn returns; ++ TC_NS_ClientLogin login; ++ TC_NS_ClientParam params[TEEC_PARAM_NUM]; ++ unsigned int paramTypes; ++ bool started; ++ unsigned int callingPid; ++ unsigned int file_size; ++ union { ++ char *file_buffer; ++ struct { ++ uint32_t file_addr; ++ uint32_t file_h_addr; ++ } memref; ++ }; ++} TC_NS_ClientContext; ++ ++typedef struct { ++ uint32_t seconds; ++ uint32_t millis; ++} TC_NS_Time; ++ ++typedef struct { ++ uint16_t tzdriver_version_major; ++ uint16_t tzdriver_version_minor; ++ uint32_t reserved[15]; ++} TC_NS_TEE_Info; ++ ++enum SecFileType { ++ LOAD_TA = 0, ++ LOAD_SERVICE, ++ LOAD_LIB, ++ LOAD_DYNAMIC_DRV, ++ LOAD_PATCH, ++ LOAD_TYPE_MAX ++}; ++ ++struct SecFileInfo { ++ enum SecFileType fileType; ++ uint32_t fileSize; ++ int32_t secLoadErr; ++}; ++ ++struct SecLoadIoctlStruct { ++ struct SecFileInfo secFileInfo; ++ TEEC_UUID uuid; ++ union { ++ char *fileBuffer; ++ struct { ++ uint32_t file_addr; ++ uint32_t file_h_addr; ++ } memref; ++ }; ++}__attribute__((packed)); ++ ++struct AgentIoctlArgs { ++ uint32_t id; ++ uint32_t bufferSize; ++ union { ++ void *buffer; ++ unsigned long long addr; ++ }; ++}; ++ ++#define TC_NS_CLIENT_IOCTL_SES_OPEN_REQ _IOW(TC_NS_CLIENT_IOC_MAGIC, 1, TC_NS_ClientContext) ++#define TC_NS_CLIENT_IOCTL_SES_CLOSE_REQ _IOWR(TC_NS_CLIENT_IOC_MAGIC, 2, TC_NS_ClientContext) ++#define TC_NS_CLIENT_IOCTL_SEND_CMD_REQ _IOWR(TC_NS_CLIENT_IOC_MAGIC, 3, TC_NS_ClientContext) ++#define TC_NS_CLIENT_IOCTL_SHRD_MEM_RELEASE _IOWR(TC_NS_CLIENT_IOC_MAGIC, 4, unsigned int) ++#define TC_NS_CLIENT_IOCTL_WAIT_EVENT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 5, unsigned int) ++#define TC_NS_CLIENT_IOCTL_SEND_EVENT_RESPONSE _IOWR(TC_NS_CLIENT_IOC_MAGIC, 6, unsigned int) ++#define TC_NS_CLIENT_IOCTL_REGISTER_AGENT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 7, struct AgentIoctlArgs) ++#define TC_NS_CLIENT_IOCTL_UNREGISTER_AGENT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 8, unsigned int) ++#define TC_NS_CLIENT_IOCTL_LOAD_APP_REQ _IOWR(TC_NS_CLIENT_IOC_MAGIC, 9, struct SecLoadIoctlStruct) ++#define TC_NS_CLIENT_IOCTL_NEED_LOAD_APP _IOWR(TC_NS_CLIENT_IOC_MAGIC, 10, TC_NS_ClientContext) ++#define TC_NS_CLIENT_IOCTL_LOAD_APP_EXCEPT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 11, unsigned int) ++#define TC_NS_CLIENT_IOCTL_CANCEL_CMD_REQ _IOWR(TC_NS_CLIENT_IOC_MAGIC, 13, TC_NS_ClientContext) ++#define TC_NS_CLIENT_IOCTL_LOGIN _IOWR(TC_NS_CLIENT_IOC_MAGIC, 14, int) ++#define TC_NS_CLIENT_IOCTL_TST_CMD_REQ _IOWR(TC_NS_CLIENT_IOC_MAGIC, 15, int) ++#define TC_NS_CLIENT_IOCTL_TUI_EVENT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 16, int) ++#define TC_NS_CLIENT_IOCTL_SYC_SYS_TIME _IOWR(TC_NS_CLIENT_IOC_MAGIC, 17, TC_NS_Time) ++#define TC_NS_CLIENT_IOCTL_SET_NATIVE_IDENTITY _IOWR(TC_NS_CLIENT_IOC_MAGIC, 18, int) ++#define TC_NS_CLIENT_IOCTL_LOAD_TTF_FILE_AND_NOTCH_HEIGHT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 19, unsigned int) ++#define TC_NS_CLIENT_IOCTL_LATEINIT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 20, unsigned int) ++#define TC_NS_CLIENT_IOCTL_GET_TEE_VERSION _IOWR(TC_NS_CLIENT_IOC_MAGIC, 21, unsigned int) ++#ifdef CONFIG_CMS_SIGNATURE ++#define TC_NS_CLIENT_IOCTL_UPDATE_TA_CRL _IOWR(TC_NS_CLIENT_IOC_MAGIC, 22, struct TC_NS_ClientCrl) ++#endif ++#ifdef CONFIG_TEE_TELEPORT_SUPPORT ++#define TC_NS_CLIENT_IOCTL_PORTAL_REGISTER _IOWR(TC_NS_CLIENT_IOC_MAGIC, 24, struct AgentIoctlArgs) ++#define TC_NS_CLIENT_IOCTL_PORTAL_WORK _IOWR(TC_NS_CLIENT_IOC_MAGIC, 25, struct AgentIoctlArgs) ++#endif ++#define TC_NS_CLIENT_IOCTL_GET_TEE_INFO _IOWR(TC_NS_CLIENT_IOC_MAGIC, 26, TC_NS_TEE_Info) ++#define TC_NS_CLIENT_IOCTL_SET_VM_FLAG _IOWR(TC_NS_CLIENT_IOC_MAGIC, 27, int) ++ ++TEEC_Result TEEC_CheckOperation(const TEEC_Operation *operation); ++#endif ++ ++ +diff -Naur '--exclude=.git' qemu/hw/char/tee_client_constants.h qemu_after/hw/char/tee_client_constants.h +--- qemu/hw/char/tee_client_constants.h 1970-01-01 08:00:00.000000000 +0800 ++++ qemu_after/hw/char/tee_client_constants.h 2023-10-23 15:09:10.840630820 +0800 +@@ -0,0 +1,126 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2013-2022. All rights reserved. ++ * Licensed under the Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR ++ * PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ */ ++ ++#ifndef _TEE_CLIENT_CONSTANTS_H_ ++#define _TEE_CLIENT_CONSTANTS_H_ ++ ++enum TEEC_ReturnCode { ++ TEEC_SUCCESS = 0x0, /* success */ ++ TEEC_ERROR_INVALID_CMD, /* invalid command */ ++ TEEC_ERROR_SERVICE_NOT_EXIST, /* target service is not exist */ ++ TEEC_ERROR_SESSION_NOT_EXIST, /* session between client and service is not exist */ ++ TEEC_ERROR_SESSION_MAXIMUM, /* exceed max num of sessions */ ++ TEEC_ERROR_REGISTER_EXIST_SERVICE, /* cannot register the service which already exist */ ++ TEEC_ERROR_TAGET_DEAD_FATAL, /* system error occurs in TEE */ ++ TEEC_ERROR_READ_DATA, /* failed to read data in file */ ++ TEEC_ERROR_WRITE_DATA, /* failed to write data to file */ ++ TEEC_ERROR_TRUNCATE_OBJECT, /* data is truncated */ ++ TEEC_ERROR_SEEK_DATA, /* failed to seek data in file */ ++ TEEC_ERROR_FSYNC_DATA, /* failed to sync data in file */ ++ TEEC_ERROR_RENAME_OBJECT, /* failed to rename file */ ++ TEEC_ERROR_TRUSTED_APP_LOAD_ERROR, /* failed to load Trusted Application */ ++ TEEC_ERROR_GENERIC = 0xFFFF0000, /* generic error occurs */ ++ TEEC_ERROR_ACCESS_DENIED = 0xFFFF0001, /* permission check failed, in initilize context or ++ open session or invoke commnad */ ++ TEEC_ERROR_CANCEL = 0xFFFF0002, /* operation is already canceled */ ++ TEEC_ERROR_ACCESS_CONFLICT = 0xFFFF0003, /* confilct occurs in concurrent access to data, ++ error occurs in file operaions generally */ ++ TEEC_ERROR_EXCESS_DATA = 0xFFFF0004, /* exceed max data to be handled by system */ ++ TEEC_ERROR_BAD_FORMAT = 0xFFFF0005, /* data format is invalid, Trusted Application cannot ++ handle it */ ++ TEEC_ERROR_BAD_PARAMETERS = 0xFFFF0006, /* invalid parameters */ ++ TEEC_ERROR_BAD_STATE = 0xFFFF0007, /* operation failed in current state, when try to access ++ storage without initilize storage service */ ++ TEEC_ERROR_ITEM_NOT_FOUND = 0xFFFF0008, /* cannot find target item */ ++ TEEC_ERROR_NOT_IMPLEMENTED = 0xFFFF0009, /* request operation is not implemented */ ++ TEEC_ERROR_NOT_SUPPORTED = 0xFFFF000A, /* request operation is not supported */ ++ TEEC_ERROR_NO_DATA = 0xFFFF000B, /* no data present for current operation */ ++ TEEC_ERROR_OUT_OF_MEMORY = 0xFFFF000C, /* system resource if out of use */ ++ TEEC_ERROR_BUSY = 0xFFFF000D, /* system is too busy to handle current operation */ ++ TEEC_ERROR_COMMUNICATION = 0xFFFF000E, /* error occurs when client try to communicate ++ with Trusted Application */ ++ TEEC_ERROR_SECURITY = 0xFFFF000F, /* security error occurs */ ++ TEEC_ERROR_SHORT_BUFFER = 0xFFFF0010, /* out buffer is not enough for current request */ ++ TEEC_ERROR_MAC_INVALID = 0xFFFF3071, /* MAC value check failed */ ++ TEEC_ERROR_TARGET_DEAD = 0xFFFF3024, /* Trusted Application is crashed */ ++ TEEC_FAIL = 0xFFFF5002, /* common error */ ++ TEEC_ERROR_EXTERNAL_CANCEL = 0xFFFF0011, /* used by adapt only, event caused User Interface operation aborted */ ++ TEEC_ERROR_OVERFLOW = 0xFFFF300F, /* used by adapt only */ ++ TEEC_ERROR_STORAGE_NO_SPACE = 0xFFFF3041, /* used by adapt only */ ++ TEEC_ERROR_SIGNATURE_INVALID = 0xFFFF3072, /* used by adapt only */ ++ TEEC_ERROR_TIME_NOT_SET = 0xFFFF5000, /* used by adapt only */ ++ TEEC_ERROR_TIME_NEEDS_RESET = 0xFFFF5001, /* used by adapt only */ ++ TEEC_ERROR_IPC_OVERFLOW = 0xFFFF9114 /* ipc overflow */ ++}; ++ ++enum TEEC_ReturnCodeOrigin { ++ TEEC_ORIGIN_API = 0x1, /* error occurs in handling client API */ ++ TEEC_ORIGIN_COMMS = 0x2, /* error occurs in communicating between REE and TEE */ ++ TEEC_ORIGIN_TEE = 0x3, /* error occurs in TEE */ ++ TEEC_ORIGIN_TRUSTED_APP = 0x4, /* error occurs in Trusted Application */ ++}; ++ ++enum TEEC_SharedMemCtl { ++ TEEC_MEM_INPUT = 0x1, /* input type of memroy */ ++ TEEC_MEM_OUTPUT = 0x2, /* output type of memory */ ++ TEEC_MEM_INOUT = 0x3, /* memory is used as both input and output */ ++ TEEC_MEM_SHARED_INOUT = 0x4, /* no copy shared memory */ ++}; ++ ++enum TEEC_ParamType { ++ TEEC_NONE = 0x0, /* unused parameter */ ++ TEEC_VALUE_INPUT = 0x01, /* input type of value, refer TEEC_Value */ ++ TEEC_VALUE_OUTPUT = 0x02, /* output type of value, refer TEEC_Value */ ++ TEEC_VALUE_INOUT = 0x03, /* value is used as both input and output, refer TEEC_Value */ ++ TEEC_MEMREF_TEMP_INPUT = 0x05, /* input type of temp memory reference, refer TEEC_TempMemoryReference */ ++ TEEC_MEMREF_TEMP_OUTPUT = 0x06, /* output type of temp memory reference, refer TEEC_TempMemoryReference */ ++ TEEC_MEMREF_TEMP_INOUT = 0x07, /* temp memory reference used as both input and output, ++ refer TEEC_TempMemoryReference */ ++ TEEC_ION_INPUT = 0x08, /* input type of icon memory reference, refer TEEC_IonReference */ ++ TEEC_ION_SGLIST_INPUT = 0x09, /* input type of ion memory block reference, refer TEEC_IonSglistReference */ ++ TEEC_MEMREF_SHARED_INOUT = 0x0a, /* no copy mem */ ++ TEEC_MEMREF_WHOLE = 0xc, /* use whole memory block, refer TEEC_RegisteredMemoryReference */ ++ TEEC_MEMREF_PARTIAL_INPUT = 0xd, /* input type of memory reference, refer TEEC_RegisteredMemoryReference */ ++ TEEC_MEMREF_PARTIAL_OUTPUT = 0xe, /* output type of memory reference, refer TEEC_RegisteredMemoryReference */ ++ TEEC_MEMREF_PARTIAL_INOUT = 0xf /* memory reference used as both input and output, ++ refer TEEC_RegisteredMemoryReference */ ++}; ++ ++/**************************************************** ++ * Session Login Methods ++ ****************************************************/ ++enum TEEC_LoginMethod { ++ TEEC_LOGIN_PUBLIC = 0x0, /* no Login data is provided */ ++ TEEC_LOGIN_USER, /* Login data about the user running the ++ Client Application process is provided */ ++ TEEC_LOGIN_GROUP, /* Login data about the group running ++ the Client Application process is provided */ ++ TEEC_LOGIN_APPLICATION = 0x4, /* Login data about the running Client ++ Application itself is provided */ ++ TEEC_LOGIN_USER_APPLICATION = 0x5, /* Login data about the user running the ++ Client Application and about the ++ Client Application itself is provided */ ++ TEEC_LOGIN_GROUP_APPLICATION = 0x6, /* Login data about the group running ++ the Client Application and about the ++ Client Application itself is provided */ ++ TEEC_LOGIN_IDENTIFY = 0x7, /* Login data is provided by REE system */ ++}; ++enum TST_CMD_ID { ++ TST_CMD_ID_01 = 1, ++ TST_CMD_ID_02, ++ TST_CMD_ID_03, ++ TST_CMD_ID_04, ++ TST_CMD_ID_05 ++}; ++ ++#define TEEC_PARAM_NUM 4 /* teec param max number */ ++#endif +diff -Naur '--exclude=.git' qemu/hw/char/tee_client_list.h qemu_after/hw/char/tee_client_list.h +--- qemu/hw/char/tee_client_list.h 1970-01-01 08:00:00.000000000 +0800 ++++ qemu_after/hw/char/tee_client_list.h 2023-10-23 15:09:10.840630820 +0800 +@@ -0,0 +1,101 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2013-2021. All rights reserved. ++ * iTrustee licensed under the Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR ++ * PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ */ ++ ++#ifndef TEE_CLIENT_LIST_H ++#define TEE_CLIENT_LIST_H ++ ++struct ListNode { ++ struct ListNode *next; /* point to next node */ ++ struct ListNode *prev; /* point to prev node */ ++}; ++ ++#define OFFSET_OF(type, member) (unsigned long)(&(((type *)0)->member)) ++#define CONTAINER_OF(pos, type, member) (type *)(((char *)(pos)) - OFFSET_OF(type, member)) ++ ++#define LIST_DECLARE(name) \ ++ struct ListNode name = { \ ++ .next = &name, \ ++ .prev = &name, \ ++ } ++ ++static inline void ListInit(struct ListNode *list) ++{ ++ list->next = list; ++ list->prev = list; ++} ++ ++#define LIST_HEAD(list) ((list)->next) ++#define LIST_TAIL(list) ((list)->prev) ++#define LIST_EMPTY(list) ((list) == (list)->next) ++ ++static inline void ListInsertHead(struct ListNode *list, struct ListNode *entry) ++{ ++ list->next->prev = entry; ++ entry->next = list->next; ++ entry->prev = list; ++ list->next = entry; ++} ++ ++static inline void ListInsertTail(struct ListNode *list, struct ListNode *entry) ++{ ++ entry->next = list; ++ entry->prev = list->prev; ++ list->prev->next = entry; ++ list->prev = entry; ++} ++ ++static inline void ListRemoveEntry(struct ListNode *entry) ++{ ++ entry->prev->next = entry->next; ++ entry->next->prev = entry->prev; ++} ++ ++static inline struct ListNode *ListRemoveHead(struct ListNode *list) ++{ ++ struct ListNode *entry = NULL; ++ if (!LIST_EMPTY(list)) { ++ entry = list->next; ++ ListRemoveEntry(entry); ++ } ++ return entry; ++} ++ ++static inline struct ListNode *ListRemoveTail(struct ListNode *list) ++{ ++ struct ListNode *entry = NULL; ++ if (!LIST_EMPTY(list)) { ++ entry = list->prev; ++ ListRemoveEntry(entry); ++ } ++ return entry; ++} ++ ++#define LIST_ENTRY(ptr, type, member) \ ++ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member))) ++ ++#define LIST_FOR_EACH(pos, list) \ ++ for (pos = (list)->next; pos != (list); pos = pos->next) ++ ++#define LIST_FOR_EACH_SAFE(pos, n, list) \ ++ for ((pos) = (list)->next, (n) = (pos)->next; (pos) != (list); (pos) = (n), (n) = (pos)->next) ++ ++#define LIST_FOR_EACH_ENTRY(pos, list, member) \ ++ for (pos = LIST_ENTRY((list)->next, typeof(*pos), member); &pos->member != (list); \ ++ pos = LIST_ENTRY(pos->member.next, typeof(*pos), member)) ++ ++#define LIST_FOR_EACH_ENTRY_SAFE(pos, n, list, member) \ ++ for (pos = LIST_ENTRY((list)->next, typeof(*pos), member), n = LIST_ENTRY(pos->member.next, typeof(*pos), \ ++ member); &pos->member != (list); pos = n, n = LIST_ENTRY(n->member.next, typeof(*n), member)) ++ ++#endif ++ ++ +diff -Naur '--exclude=.git' qemu/hw/char/tee_client_type.h qemu_after/hw/char/tee_client_type.h +--- qemu/hw/char/tee_client_type.h 1970-01-01 08:00:00.000000000 +0800 ++++ qemu_after/hw/char/tee_client_type.h 2023-10-23 15:09:10.840630820 +0800 +@@ -0,0 +1,134 @@ ++/* ++ * Copyright (c) Huawei Technologies Co., Ltd. 2013-2022. All rights reserved. ++ * Licensed under the Mulan PSL v2. ++ * You can use this software according to the terms and conditions of the Mulan PSL v2. ++ * You may obtain a copy of Mulan PSL v2 at: ++ * http://license.coscl.org.cn/MulanPSL2 ++ * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR ++ * PURPOSE. ++ * See the Mulan PSL v2 for more details. ++ */ ++ ++#ifndef _TEE_CLIENT_TYPE_H_ ++#define _TEE_CLIENT_TYPE_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include "tee_client_list.h" ++#include "tee_client_constants.h" ++ ++typedef enum TEEC_ReturnCode TEEC_Result; ++ ++typedef struct { ++ uint32_t timeLow; ++ uint16_t timeMid; ++ uint16_t timeHiAndVersion; ++ uint8_t clockSeqAndNode[8]; ++} TEEC_UUID; ++ ++typedef struct { ++ int32_t fd; ++ uint8_t *ta_path; ++ struct ListNode session_list; ++ struct ListNode shrd_mem_list; ++ union { ++ struct { ++ void *buffer; ++ sem_t buffer_barrier; ++ } share_buffer; ++ uint64_t imp; /* for adapt */ ++ }; ++} TEEC_Context; ++ ++typedef struct { ++ uint32_t session_id; ++ TEEC_UUID service_id; ++ uint32_t ops_cnt; ++ union { ++ struct ListNode head; ++ uint64_t imp; /* for adapt */ ++ }; ++ TEEC_Context *context; ++} TEEC_Session; ++ ++typedef struct { ++ void *buffer; ++ uint32_t size; ++ uint32_t flags; /* reference to TEEC_SharedMemCtl */ ++ uint32_t ops_cnt; ++ bool is_allocated; /* identify whether the memory is registered or allocated */ ++ union { ++ struct ListNode head; ++ void* imp; /* for adapt, imp is not used by system CA, only for vendor CA */ ++ }; ++ TEEC_Context *context; ++} TEEC_SharedMemory; ++ ++/* ++ * the corresponding param types are ++ * TEEC_MEMREF_TEMP_INPUT/TEEC_MEMREF_TEMP_OUTPUT/TEEC_MEMREF_TEMP_INOUT ++ */ ++typedef struct { ++ void *buffer; ++ uint32_t size; ++} TEEC_TempMemoryReference; ++ ++/* ++ * the corresponding param types are ++ * TEEC_MEMREF_WHOLE/TEEC_MEMREF_PARTIAL_INPUT ++ * TEEC_MEMREF_PARTIAL_OUTPUT/TEEC_MEMREF_PARTIAL_INOUT ++ */ ++typedef struct { ++ TEEC_SharedMemory *parent; ++ uint32_t size; ++ uint32_t offset; ++} TEEC_RegisteredMemoryReference; ++ ++/* ++ * the corresponding param types are ++ * TEEC_VALUE_INPUT/TEEC_VALUE_OUTPUT/TEEC_VALUE_INOUT ++ */ ++typedef struct { ++ uint32_t a; ++ uint32_t b; ++} TEEC_Value; ++ ++typedef struct { ++ int ion_share_fd; ++ uint32_t ion_size; ++} TEEC_IonReference; ++ ++typedef union { ++ TEEC_TempMemoryReference tmpref; ++ TEEC_RegisteredMemoryReference memref; ++ TEEC_Value value; ++ TEEC_IonReference ionref; ++} TEEC_Parameter; ++ ++typedef struct { ++ uint32_t event_type; /* Tui event type */ ++ uint32_t value; /* return value, is keycode if tui event is getKeycode */ ++ uint32_t notch; /* notch size of the screen for tui */ ++ uint32_t width; /* width of foldable screen */ ++ uint32_t height; /* height of foldable screen */ ++ uint32_t fold_state; /* state of foldable screen */ ++ uint32_t display_state; /* one state of folded state */ ++ uint32_t phy_width; /* real width of the mobile */ ++ uint32_t phy_height; /* real height of the mobile */ ++} TEEC_TUI_Parameter; ++ ++typedef struct { ++ uint32_t started; /* 0 means cancel this operation, others mean to perform this operation */ ++ uint32_t paramTypes; /* use TEEC_PARAM_TYPES to construct this value */ ++ TEEC_Parameter params[TEEC_PARAM_NUM]; ++ TEEC_Session *session; ++ bool cancel_flag; ++} TEEC_Operation; ++ ++#endif ++ ++ +diff -Naur '--exclude=.git' qemu/hw/char/virtio-console.c qemu_after/hw/char/virtio-console.c +--- qemu/hw/char/virtio-console.c 2023-10-15 17:28:44.746034090 +0800 ++++ qemu_after/hw/char/virtio-console.c 2023-10-23 15:11:13.312630820 +0800 +@@ -20,6 +20,14 @@ + #include "qapi/error.h" + #include "qapi/qapi-events-char.h" + ++#include "qom/object.h" ++#include "hw/core/cpu.h" ++#include "sysemu/hw_accel.h" ++#include "monitor/monitor.h" ++#include ++#include ++#include "tc_ns_client.h" ++ + #define TYPE_VIRTIO_CONSOLE_SERIAL_PORT "virtserialport" + #define VIRTIO_CONSOLE(obj) \ + OBJECT_CHECK(VirtConsole, (obj), TYPE_VIRTIO_CONSOLE_SERIAL_PORT) +@@ -44,6 +52,133 @@ + virtio_serial_throttle_port(VIRTIO_SERIAL_PORT(vcon), false); + return FALSE; + } ++//#define DEBUG 1 ++ ++#ifdef DEBUG ++static void debug(const char *fmt, ...) ++{ ++ va_list args; ++ ++ va_start(args, fmt); ++ vfprintf(stderr, fmt, args); ++ va_end(args); ++} ++ ++#define PRINTF_SIZE 16 ++static void dump_buff(const char *buffer, size_t bufLen) ++{ ++ size_t i; ++ if (buffer == NULL || bufLen == 0) { ++ return; ++ } ++ ++ // printf("\n--------------------------------------------------\n"); ++ printf("--------------------------------------------------\n"); ++ printf("bufLen = %d\n", (int)bufLen); ++ for (i = 0; i < bufLen; i++) { ++ if (i % PRINTF_SIZE == 0 && i != 0) { ++ printf("\n"); ++ } ++ printf("%02x ", *(buffer + i)); ++ } ++ printf("\n--------------------------------------------------\n"); ++ return; ++} ++#else ++#define debug(fmt, ...) do { } while (0) ++ ++#define dump_buff(buffer, bufLen) do { } while (0) ++#endif ++ ++#define VTZF_OPEN_TZD 15 ++#define VTZF_OPEN_SESSION 31 ++#define VTZF_SEND_CMD 33 ++#define VTZF_FS_REGISTER_AGENT 45 ++#define VTZF_LOAD_SEC 53 ++ ++#define TEEC_PARAM_NUM 4 /* teec param max number */ ++ ++#define IS_TEMP_MEM(paramType) \ ++ (((paramType) == TEEC_MEMREF_TEMP_INPUT) || ((paramType) == TEEC_MEMREF_TEMP_OUTPUT) || \ ++ ((paramType) == TEEC_MEMREF_TEMP_INOUT)) ++ ++#define IS_PARTIAL_MEM(paramType) \ ++ (((paramType) == TEEC_MEMREF_WHOLE) || ((paramType) == TEEC_MEMREF_PARTIAL_INPUT) || \ ++ ((paramType) == TEEC_MEMREF_PARTIAL_OUTPUT) || ((paramType) == TEEC_MEMREF_PARTIAL_INOUT)) ++ ++#define IS_VALUE_MEM(paramType) \ ++ (((paramType) == TEEC_VALUE_INPUT) || ((paramType) == TEEC_VALUE_OUTPUT) || ((paramType) == TEEC_VALUE_INOUT)) ++ ++#define TEEC_PARAM_TYPE_GET(paramTypes, index) \ ++ (((paramTypes) >> (4*(index))) & 0x0F) ++ ++typedef struct { ++ uint32_t packet_size; ++ uint32_t cmd; ++ uint32_t seq_num; ++ uint32_t vmid; ++ uint32_t flag; ++} struct_packet_cmd_open_tzd; ++ ++typedef struct { ++ uint32_t packet_size; ++ uint32_t cmd; ++ uint32_t seq_num; ++ int32_t ptzfd; ++ void *vmaddr; ++ struct AgentIoctlArgs args; ++} struct_packet_cmd_regagent; ++ ++typedef struct { ++ uint32_t packet_size; ++ uint32_t cmd; ++ uint32_t seq_num; ++ int32_t ptzfd; ++ int32_t cpu_index; ++ struct SecLoadIoctlStruct ioctlArg; ++} struct_packet_cmd_load_sec; ++ ++typedef struct { ++ uint32_t packet_size; ++ uint32_t cmd; ++ uint32_t seq_num; ++ int32_t ptzfd; ++ int32_t cpu_index; ++ TC_NS_ClientContext cliContext; ++} struct_packet_cmd_session; ++ ++typedef struct { ++ uint32_t packet_size; ++ uint32_t cmd; ++ uint32_t seq_num; ++ int32_t ptzfd; ++ int32_t err_flag; ++ int32_t is_fragment; ++ uint32_t fragment_block_num; ++ uint32_t vm_page_size;; ++ uint64_t block_addrs[TEEC_PARAM_NUM];//qemu and proxy don't use ++ uint32_t block_size[TEEC_PARAM_NUM]; ++ unsigned long long addrs[TEEC_PARAM_NUM]; //used by ref mem mmap ++ TC_NS_ClientContext cliContext; ++} struct_packet_cmd_send_cmd; ++ ++typedef struct { ++ uint32_t packet_size; ++ uint32_t cmd; ++ uint32_t seq_num; ++ int32_t ptzfd; ++ uint64_t buffer; ++ uint32_t size; ++ uint32_t offset; ++} struct_packet_cmd_mmap; ++ ++#define FRAG_FLAG 0xAEAE ++ ++typedef struct { ++ uint64_t phy_addr; ++ uint32_t page_num; ++ uint32_t frag_flag; ++}struct_page_block; + + /* Callback function that's called when the guest sends us data */ + static ssize_t flush_buf(VirtIOSerialPort *port, +@@ -51,12 +186,193 @@ + { + VirtConsole *vcon = VIRTIO_CONSOLE(port); + ssize_t ret; +- ++ int i = 0; ++ uint32_t j = 0; ++ uint32_t fragment_block_num = 0; ++ struct_page_block *page_block; ++ hwaddr gpa_param; ++ Error *local_err = NULL; ++ MemoryRegion *mr = NULL; ++ void *ptr_hva = NULL; ++ uint32_t offset = sizeof(struct_packet_cmd_send_cmd); + if (!qemu_chr_fe_backend_connected(&vcon->chr)) { + /* If there's no backend, we can just say we consumed all data. */ + return len; + } + ++ debug("\n"); ++ debug("debug, %s, %s, %d \n", __FILE__, __func__, __LINE__); ++ debug(" virtio-console virtserialport name = %s, id = %d \n", port->name, (int)port->id); ++ debug(" have_data flush_buf, buflen = %d \n", len); ++ dump_buff((char *)buf, 0); ++ ++ if ( len >= 8 ) { ++ uint32_t ui32_cmd = 0; ++ ui32_cmd = *(uint32_t *)((char *)buf + sizeof(uint32_t)); ++ switch( ui32_cmd ) { ++ case VTZF_OPEN_TZD: ++ debug(" command is VTZF_OPEN_TZD \n"); ++ if ( len >= sizeof(struct_packet_cmd_open_tzd)) { ++ struct_packet_cmd_open_tzd* vtzf_packet_cmd = (struct_packet_cmd_open_tzd *)buf; ++ pid_t qemu_pid = getpid(); ++ debug(" qemu_pid = 0x%016lx, %d \n",qemu_pid, qemu_pid); ++ vtzf_packet_cmd->vmid = qemu_pid; ++ } ++ break; ++ case VTZF_LOAD_SEC: ++ debug(" command is VTZF_LOAD_SEC \n"); ++ if (len >= sizeof(struct_packet_cmd_load_sec)) { ++ struct_packet_cmd_load_sec* vtzf_packet_cmd = (struct_packet_cmd_load_sec *)buf; ++ debug(" vtzf_packet_cmd->cliContext.file_buffer = 0x%016lx \n", vtzf_packet_cmd->ioctlArg.fileBuffer); ++ hwaddr gpa = (uint64_t)vtzf_packet_cmd->ioctlArg.fileBuffer; ++ ptr_hva = gpa2hva(&mr, gpa, &local_err); ++ if (local_err) { ++ debug(" gpa2hva failed \n"); ++ } else { ++ debug(" host virtual address of file_buffer = 0x%016lx, %p \n", (uint64_t)ptr_hva, ptr_hva); ++ memory_region_unref(mr); ++ uint64_t ui64_hva; ++ ui64_hva = (uint64_t)ptr_hva; ++ vtzf_packet_cmd->ioctlArg.fileBuffer = (void *)ui64_hva; ++ } ++ } ++ break; ++ case VTZF_FS_REGISTER_AGENT: ++ debug(" command is VTZF_FS_REGISTER_AGENT \n"); ++ if (len >= sizeof(struct_packet_cmd_regagent)) { ++ struct_packet_cmd_regagent* vtzf_packet_cmd = (struct_packet_cmd_regagent *)buf; ++ debug(" vtzf_packet_cmd->cliContext.file_buffer = 0x%016lx \n", vtzf_packet_cmd->vmaddr); ++ hwaddr gpa = (uint64_t)vtzf_packet_cmd->vmaddr; ++ ptr_hva = gpa2hva(&mr, gpa, &local_err); ++ if (local_err) { ++ debug(" gpa2hva failed \n"); ++ } else { ++ debug(" host virtual address of vmaddr = 0x%016lx, %p \n", (uint64_t)ptr_hva, ptr_hva); ++ memory_region_unref(mr); ++ uint64_t ui64_hva; ++ ui64_hva = (uint64_t)ptr_hva; ++ vtzf_packet_cmd->vmaddr = (void *)ui64_hva; ++ } ++ } ++ break; ++ case VTZF_OPEN_SESSION: ++ debug(" command is VTZF_OPEN_SESSION \n"); ++ debug("sizeof(struct_packet_cmd_session) =%d \n", sizeof(struct_packet_cmd_session)); ++ debug("sizeof(TC_NS_ClientContext) =%d \n", sizeof(TC_NS_ClientContext)); ++ if ( len >= sizeof(struct_packet_cmd_session) ) { ++ struct_packet_cmd_session* vtzf_packet_cmd = (struct_packet_cmd_session *)buf; ++ debug(" vtzf_packet_cmd->cliContext.file_size = 0x%08x, %d \n", vtzf_packet_cmd->cliContext.file_size, ++ vtzf_packet_cmd->cliContext.file_size); ++ debug(" vtzf_packet_cmd->cliContext.file_buffer = 0x%016lx \n", vtzf_packet_cmd->cliContext.file_buffer); ++ hwaddr gpa = (uint64_t)vtzf_packet_cmd->cliContext.file_buffer; ++ ptr_hva = gpa2hva(&mr, gpa, &local_err); ++ if (local_err) { ++ debug(" gpa2hva failed \n"); ++ } else { ++ debug(" host virtual address of file_buffer = 0x%016lx, %p \n", (uint64_t)ptr_hva, ptr_hva); ++ memory_region_unref(mr); ++ uint64_t ui64_hva; ++ ui64_hva = (uint64_t)ptr_hva; ++ vtzf_packet_cmd->cliContext.file_buffer = (void *)ui64_hva; ++ } ++ } ++ break; ++ case VTZF_SEND_CMD: ++ debug(" command is VTZF_SEND_CMD \n"); ++ if ( len >= sizeof(struct_packet_cmd_send_cmd) ) { ++ struct_packet_cmd_send_cmd* vtzf_packet_cmd = (struct_packet_cmd_send_cmd *)buf; ++ uint32_t packet_size =vtzf_packet_cmd->packet_size; ++ if (len != packet_size && !vtzf_packet_cmd->fragment_block_num) { ++ debug("err ,len != packet_size \n"); ++ vtzf_packet_cmd->err_flag = 1; ++ break; ++ } ++ uint32_t param_type; ++ bool check_value; ++ ++ for (i = 0; i < TEEC_PARAM_NUM; i++) { ++ param_type = TEEC_PARAM_TYPE_GET(vtzf_packet_cmd->cliContext.paramTypes, i); ++ check_value = (param_type == TEEC_ION_INPUT || param_type == TEEC_ION_SGLIST_INPUT); ++ if (IS_TEMP_MEM(param_type)) { ++ gpa_param = (uint64_t)vtzf_packet_cmd->cliContext.params[i].memref.buffer; ++ ptr_hva = gpa2hva(&mr, gpa_param, &local_err); ++ if (local_err) { ++ debug(" gpa2hva params[%d].memref.buffer failed \n", i); ++ } else { ++ debug(" host virtual address of memref.buffer = 0x%016lx, %p \n", (uint64_t)ptr_hva, ptr_hva); ++ memory_region_unref(mr); ++ uint64_t ui64_hva; ++ ui64_hva = (uint64_t)ptr_hva; ++ vtzf_packet_cmd->cliContext.params[i].memref.buffer = ui64_hva; ++ } ++ } else if (IS_PARTIAL_MEM(param_type)) { ++ gpa_param = (uint64_t)vtzf_packet_cmd->cliContext.params[i].memref.buffer; ++ ptr_hva = gpa2hva(&mr, gpa_param, &local_err); ++ if (local_err) { ++ debug(" gpa2hva params[%d].memref.buffer failed \n", i); ++ } else { ++ debug(" host virtual address of memref.buffer = 0x%016lx, %p \n", (uint64_t)ptr_hva, ptr_hva); ++ memory_region_unref(mr); ++ uint64_t ui64_hva; ++ ui64_hva = (uint64_t)ptr_hva; ++ vtzf_packet_cmd->cliContext.params[i].memref.buffer = ui64_hva; ++ } ++ } else if (param_type == TEEC_MEMREF_SHARED_INOUT) { ++ /* do nothing */ ++ } else if (IS_VALUE_MEM(param_type) || check_value) { ++ /* do nothing */ ++ } else { ++ /* if type is none, ignore it */ ++ } ++ }// end for ++ ++ fragment_block_num = vtzf_packet_cmd->fragment_block_num; ++ if (fragment_block_num != 0) { ++ page_block = (struct_page_block *)((char *)vtzf_packet_cmd + offset); ++ for(j = 0; j < fragment_block_num; j++){ ++ gpa_param = page_block[j].phy_addr; ++ debug("page_block[%d].phy_addr = %llx\n", j, page_block[j].phy_addr); ++ ptr_hva = gpa2hva(&mr, gpa_param, &local_err); ++ if (local_err) { ++ debug(" gpa2hva params[%d].memref.buffer failed \n", i); ++ } else { ++ debug(" host virtual address of memref.buffer = 0x%016lx, %p \n", (uint64_t)ptr_hva, ptr_hva); ++ memory_region_unref(mr); ++ uint64_t ui64_hva; ++ ui64_hva = (uint64_t)ptr_hva; ++ page_block[j].phy_addr = ui64_hva; ++ } ++ } ++ } ++ }//end if ++ break; ++ default: ++ if (len >= sizeof(struct_page_block)) { ++ page_block = (struct_page_block *)buf; ++ if (page_block->frag_flag == FRAG_FLAG && len % sizeof(struct_page_block) == 0) { ++ fragment_block_num = len / sizeof(struct_page_block); ++ page_block->frag_flag = 0; ++ for(j = 0; j < fragment_block_num; j++){ ++ gpa_param = page_block[j].phy_addr; ++ debug("page_block[%d].phy_addr = %llx\n", j, page_block[j].phy_addr); ++ ptr_hva = gpa2hva(&mr, gpa_param, &local_err); ++ if (local_err) { ++ debug(" gpa2hva params[%d].memref.buffer failed \n", i); ++ } else { ++ debug(" host virtual address of memref.buffer = 0x%016lx, %p \n", (uint64_t)ptr_hva, ptr_hva); ++ memory_region_unref(mr); ++ uint64_t ui64_hva; ++ ui64_hva = (uint64_t)ptr_hva; ++ page_block[j].phy_addr = ui64_hva; ++ } ++ } ++ } ++ } ++ debug(" other command \n"); ++ } ++ ++ } // end of if ( len >= 4 ) ++ + ret = qemu_chr_fe_write(&vcon->chr, buf, len); + trace_virtio_console_flush_buf(port->id, len, ret); + +@@ -304,3 +620,10 @@ + } + + type_init(virtconsole_register_types) ++ ++ ++ ++ ++ ++ ++ +diff -Naur '--exclude=.git' qemu/include/monitor/monitor.h qemu_after/include/monitor/monitor.h +--- qemu/include/monitor/monitor.h 2023-10-15 17:28:44.802034090 +0800 ++++ qemu_after/include/monitor/monitor.h 2023-10-23 15:09:10.840630820 +0800 +@@ -4,6 +4,7 @@ + #include "block/block.h" + #include "qapi/qapi-types-misc.h" + #include "qemu/readline.h" ++#include "exec/hwaddr.h" + + extern __thread Monitor *cur_mon; + typedef struct MonitorHMP MonitorHMP; +@@ -36,6 +37,8 @@ + int monitor_set_cpu(int cpu_index); + int monitor_get_cpu_index(void); + ++void *gpa2hva(MemoryRegion **p_mr, hwaddr addr, Error **errp); ++ + void monitor_read_command(MonitorHMP *mon, int show_prompt); + int monitor_read_password(MonitorHMP *mon, ReadLineFunc *readline_func, + void *opaque); +@@ -49,3 +52,4 @@ + int64_t monitor_fdset_dup_fd_find(int dup_fd); + + #endif /* MONITOR_H */ ++ +diff -Naur '--exclude=.git' qemu/monitor/misc.c qemu_after/monitor/misc.c +--- qemu/monitor/misc.c 2023-10-15 17:28:44.826034090 +0800 ++++ qemu_after/monitor/misc.c 2023-10-23 15:09:10.840630820 +0800 +@@ -674,7 +674,7 @@ + memory_dump(mon, count, format, size, addr, 1); + } + +-static void *gpa2hva(MemoryRegion **p_mr, hwaddr addr, Error **errp) ++void *gpa2hva(MemoryRegion **p_mr, hwaddr addr, Error **errp) + { + MemoryRegionSection mrs = memory_region_find(get_system_memory(), + addr, 1); diff --git a/trustzone-awared-vm/Host/vtzb_proxy/Makefile b/trustzone-awared-vm/Host/vtzb_proxy/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..2ad103e68fb315bac8c7122ccd92823ae649f4af --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/Makefile @@ -0,0 +1,40 @@ +TARGET_APP := vtz_proxy +LIBC_SEC := libboundscheck +TARGET_LIBSEC := libboundscheck.so + +all: $(TARGET_LIBSEC) $(TARGET_APP) + @cd $(LIBC_SEC) && $(MAKE) clean + +$(TARGET_LIBSEC): + @echo "compile libboundscheck ..." + @$(MAKE) -C $(LIBC_SEC) + sudo cp -rf $(LIBC_SEC)/lib/libboundscheck.so /usr/lib64 + @echo "compile libboundscheck done" + +APP_CFLAGS += -DSECURITY_AUTH_ENHANCE +APP_CFLAGS += -Ilibboundscheck/include +APP_CFLAGS += -Iinclude -Iinclude/cloud +APP_CFLAGS += -Werror -Wall -Wextra -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack -s -fPIE -pie -D_FORTIFY_SOURCE=2 -O2 +APP_LDFLAGS += -lboundscheck -Llibboundscheck/lib -lpthread + +APP_SOURCES := ./vtzb_proxy.c \ + ./thread_pool.c \ + ./virt.c \ + ./serial_port.c \ + ./vm.c \ + ./debug.c \ + ./agent.c \ + ./process_data.c \ + ./tlogcat.c \ + +APP_OBJECTS := $(APP_SOURCES:.c=.o) + +$(TARGET_APP): $(TARGET_LIBSEC) $(APP_SOURCES) + @echo "compile vtz_proxy ..." + @$(CC) $(APP_CFLAGS) -o $@ $(APP_SOURCES) $(APP_LDFLAGS) + @echo "compile vtz_proxy done" + +clean: + @cd $(LIBC_SEC) && $(MAKE) clean + @rm -rf vtz_proxy + diff --git a/trustzone-awared-vm/Host/vtzb_proxy/agent.c b/trustzone-awared-vm/Host/vtzb_proxy/agent.c new file mode 100644 index 0000000000000000000000000000000000000000..019d3cc4e0493888d4e21f0351a065d0fe61fe47 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/agent.c @@ -0,0 +1,170 @@ +#include "agent.h" +#include "comm_structs.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "serial_port.h" + +#include "tee_client_log.h" +#include "tee_sys_log.h" +#include "debug.h" +#include "thread_pool.h" +#include "vtzb_proxy.h" +#include "vm.h" + +extern ThreadPool g_pool; + +void free_agent_buf(int ptzfd, struct vm_file *vm_fp) +{ + int ret; + struct ListNode *ptr = NULL; + struct ListNode *n = NULL; + struct_agent_args *agent_args = NULL; + unsigned long buf[2]; + if (!vm_fp) + return; + pthread_mutex_lock(&vm_fp->agents_lock); + if (LIST_EMPTY(&vm_fp->agents_head)) + goto END; + + LIST_FOR_EACH_SAFE(ptr, n, &vm_fp->agents_head) { + struct_agent_args *tmp = + CONTAINER_OF(ptr, struct_agent_args, node); + if (tmp->dev_fd == ptzfd) { + ListRemoveEntry(&(tmp->node)); + agent_args = tmp; + if (agent_args) { + buf[0] = agent_args->args.id; + ret = ioctl(ptzfd, TC_NS_CLIENT_IOCTL_UNREGISTER_AGENT, buf); + if (ret) { + tloge("ioctl failed\n"); + } + if (agent_args->thd!= 0) { + thread_pool_submit(&g_pool, Kill_useless_thread, (void *)(agent_args->thd)); + } + pthread_spin_destroy(&agent_args->spinlock); + free(agent_args); + } + } + } +END: + pthread_mutex_unlock(&vm_fp->agents_lock); +} + +void register_agent(struct_packet_cmd_regagent *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret; + struct_packet_rsp_regagent packet_rsp; + unsigned long buf[2]; + buf[0] = (unsigned long)(&packet_cmd->args); + packet_rsp.seq_num = packet_cmd->seq_num + 1; + ret = ioctl(packet_cmd->ptzfd, TC_NS_CLIENT_IOCTL_REGISTER_AGENT, buf); + if (!ret) { + /* Add the agent buffer to the linked list. */ + struct_agent_args *tmp = (struct_agent_args *)malloc(sizeof(struct_agent_args)); + if (!tmp) { + tloge("Failed to allocate memory for agent buffer\n"); + ret = -ENOMEM; + goto END; + } + pthread_spin_init(&tmp->spinlock, PTHREAD_PROCESS_PRIVATE); + ListInit(&tmp->node); + tmp->dev_fd = packet_cmd->ptzfd; + tmp->args = packet_cmd->args; + tmp->vmaddr = packet_cmd->vmaddr; + pthread_mutex_lock(&serial_port->vm_file->agents_lock); + ListInsertTail(&serial_port->vm_file->agents_head, &tmp->node); + pthread_mutex_unlock(&serial_port->vm_file->agents_lock); + } +END: + packet_rsp.packet_size = sizeof(packet_rsp); + packet_rsp.ret = ret; + packet_rsp.args = packet_cmd->args; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + } +} + +void wait_event(struct_packet_cmd_event *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret = -EFAULT; + struct_packet_rsp_general packet_rsp; + unsigned long buf[2]; + struct ListNode *ptr = NULL; + bool bfind = false; + struct_agent_args *agent_args; + buf[0] = packet_cmd->agent_id; + + pthread_mutex_lock(&serial_port->vm_file->agents_lock); + if (!LIST_EMPTY(&serial_port->vm_file->agents_head)) { + LIST_FOR_EACH(ptr, &serial_port->vm_file->agents_head) { + agent_args = + CONTAINER_OF(ptr, struct_agent_args, node); + if (agent_args->args.id == packet_cmd->agent_id) { + buf[1] = (unsigned long)agent_args->vmaddr; + bfind = true; + break; + } + } + } + pthread_mutex_unlock(&serial_port->vm_file->agents_lock); + if (bfind) { + agent_args->thd = pthread_self(); + ret = ioctl(packet_cmd->ptzfd, TC_NS_CLIENT_IOCTL_WAIT_EVENT, buf); + agent_args->thd = 0; + } + packet_rsp.packet_size = sizeof(packet_rsp); + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.ret = ret; + + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + } +} + +void sent_event_response(struct_packet_cmd_event *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret = -EFAULT; + struct_packet_rsp_general packet_rsp; + unsigned long buf[2]; + bool bfind = false; + struct ListNode *ptr = NULL; + buf[0] = packet_cmd->agent_id; + pthread_mutex_lock(&serial_port->vm_file->agents_lock); + if (!LIST_EMPTY(&serial_port->vm_file->agents_head)) { + LIST_FOR_EACH(ptr, &serial_port->vm_file->agents_head) { + struct_agent_args *agent_args = + CONTAINER_OF(ptr, struct_agent_args, node); + if (agent_args->args.id == packet_cmd->agent_id) { + buf[1] = (unsigned long)agent_args->vmaddr; + bfind = true; + break; + } + } + } + pthread_mutex_unlock(&serial_port->vm_file->agents_lock); + + if (bfind) { + ret = ioctl(packet_cmd->ptzfd, TC_NS_CLIENT_IOCTL_SEND_EVENT_RESPONSE, buf); + } + + packet_rsp.packet_size = sizeof(packet_rsp); + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.ret = ret; + + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + } +} \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/agent.h b/trustzone-awared-vm/Host/vtzb_proxy/agent.h new file mode 100644 index 0000000000000000000000000000000000000000..f7ee9b676c51a75155c5eab1961df5bce1a11a37 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/agent.h @@ -0,0 +1,24 @@ +#ifndef __AGENT_H__ +#define __AGENT_H__ + +#include "tc_ns_client.h" +#include "tee_client_list.h" +#include "comm_structs.h" +#include "serial_port.h" +#include "vm.h" + +typedef struct { + struct AgentIoctlArgs args; + int32_t dev_fd; + void *vmaddr; + struct ListNode node; + pthread_spinlock_t spinlock; + pthread_t thd; +} struct_agent_args; + +void free_agent_buf(int ptzfd, struct vm_file *vm_fp); +void register_agent(struct_packet_cmd_regagent *packet_cmd, struct serial_port_file *serial_port); +void wait_event(struct_packet_cmd_event *packet_cmd, struct serial_port_file *serial_port); +void sent_event_response(struct_packet_cmd_event *packet_cmd, struct serial_port_file *serial_port); + +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/comm_structs.h b/trustzone-awared-vm/Host/vtzb_proxy/comm_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..f75141b8c7e1974482c0f958f24abeac6d033c06 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/comm_structs.h @@ -0,0 +1,285 @@ +#ifndef COMM_STRUCTS_H +#define COMM_STRUCTS_H + +#include +#include "tc_ns_client.h" + +#define CERT_BUF_MAX_SIZE 2048 + +#define TC_NS_CLIENT_DEV_FLAG 3 +#define TC_PRIVATE_DEV_FLAG 4 +#define TC_CVM_DEV_FLAG 5 +#define TLOG_DEV_FLAG 6 +#define TLOG_DEV_THD_FLAG 7 + +#define VTZ_OPEN_TZD 15 +#define VTZ_CLOSE_TZD 17 +#define VTZ_LOG_IN_NHIDL 19 +#define VTZ_GET_TEE_VERSION 21 +#define VTZ_GET_TEE_INFO 23 +#define VTZ_LATE_INIT 25 +#define VTZ_SYNC_TIME 27 +#define VTZ_LOG_IN 29 +#define VTZ_OPEN_SESSION 31 +#define VTZ_SEND_CMD 33 +#define VTZ_CANCEL_CMD 35 +#define VTZ_MMAP 37 +#define VTZ_MUNMAP 39 +#define VTZ_CLOSE_SESSION 41 +#define VTZ_CLOSE_PTZDEV 43 +#define VTZ_FS_REGISTER_AGENT 45 +#define VTZ_WAIT_EVENT 49 +#define VTZ_SEND_EVENT_RESPONSE 51 +#define VTZ_LOAD_SEC 53 +#define VTZ_TEST 47 + +#define VTZ_GET_TEEOS_VER 55 +#define VTZ_SET_READER_CUR 57 +#define VTZ_SET_TLOGCAT_STAT 59 +#define VTZ_GET_TLOGCAT_STAT 61 +#define VTZ_GET_LOG 63 +#define VTZ_NOTHING 67 + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_general; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_general; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + uint32_t vmid; + uint32_t flag; +} struct_packet_cmd_open_tzd; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + int32_t ptzfd; + int32_t vmid; +} struct_packet_rsp_open_tzd; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_close_tzd; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_close_tzd; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_getteever; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + uint32_t tee_ver; +} struct_packet_rsp_getteever; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + bool istlog; +} struct_packet_cmd_getteeinfo; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + TC_NS_TEE_Info info; +} struct_packet_rsp_getteeinfo; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + void *vmaddr; + struct AgentIoctlArgs args; +} struct_packet_cmd_regagent; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + struct AgentIoctlArgs args; +} struct_packet_rsp_regagent; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + uint32_t agent_id; +} struct_packet_cmd_event; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + uint32_t index; +} struct_packet_cmd_lateinit; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_lateinit; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + TC_NS_Time tcNsTime; +} struct_packet_cmd_synctime; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_synctime; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + uint8_t cert_buffer[CERT_BUF_MAX_SIZE]; +} struct_packet_cmd_login; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_login_non; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_login; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + __s32 cpu_index; + struct SecLoadIoctlStruct ioctlArg; +} struct_packet_cmd_load_sec; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + struct SecLoadIoctlStruct ioctlArg; +} struct_packet_rsp_load_sec; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + __s32 cpu_index; + TC_NS_ClientContext cliContext; +} struct_packet_cmd_session; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + TC_NS_ClientContext cliContext; +} struct_packet_rsp_session; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + int32_t err_flag; + int32_t is_fragment; + uint32_t fragment_block_num; + uint32_t vm_page_size; + uint64_t block_addrs[4];//qemu and proxy don't use + uint32_t block_size[4]; + unsigned long long addrs[4]; //used by ref mem mmap + TC_NS_ClientContext cliContext; +} struct_packet_cmd_send_cmd; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + TC_NS_ClientContext cliContext; +} struct_packet_rsp_send_cmd; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + __s32 cpu_index; + TC_NS_ClientContext cliContext; + pid_t pid; +} struct_packet_cmd_cancel_cmd; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + TC_NS_ClientContext cliContext; +} struct_packet_rsp_cancel_cmd; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + uint64_t buffer; + uint32_t size; + uint32_t offset; +} struct_packet_cmd_mmap; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_mmap; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; +} struct_packet_cmd_nothing; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_nothing; + +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/debug.c b/trustzone-awared-vm/Host/vtzb_proxy/debug.c new file mode 100644 index 0000000000000000000000000000000000000000..1df4f5b8de963fc5b3f21e4a2a542e9375d562be --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/debug.c @@ -0,0 +1,40 @@ +#include "debug.h" +#include +#include +#include +#include + +double __get_us(struct timeval t) +{ + return (t.tv_sec * 1000000 + t.tv_usec); +} + +#ifdef DEBUG +void debug(const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vfprintf(stderr, fmt, args); + va_end(args); +} + +#define PRINTF_SIZE 16 +void dump_buff(const char *buffer, size_t bufLen) +{ + size_t i; + if (buffer == NULL || bufLen == 0) { + return; + } + printf("--------------------------------------------------\n"); + printf("bufLen = %d\n", (int)bufLen); + for (i = 0; i < bufLen; i++) { + if (i % PRINTF_SIZE == 0 && i != 0) { + printf("\n"); + } + printf("%02x ", *(buffer + i)); + } + printf("\n--------------------------------------------------\n"); + return; +} +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/debug.h b/trustzone-awared-vm/Host/vtzb_proxy/debug.h new file mode 100644 index 0000000000000000000000000000000000000000..84377e9e214b76b019c41c41fad989fa76d73c31 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/debug.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2012-2023. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef __DEBUG_H__ +#define __DEBUG_H__ + +#include +#include + +double __get_us(struct timeval t); +#ifdef DEBUG +void debug(const char *fmt, ...); +void dump_buff(const char *buffer, size_t bufLen); +#else +#define debug(fmt, ...) \ + do { \ + } while (0) + +#define dump_buff(buffer, bufLen) \ + do { \ + } while (0) +#endif + +#endif diff --git a/trustzone-awared-vm/Host/vtzb_proxy/include/cloud/tee_client_log.h b/trustzone-awared-vm/Host/vtzb_proxy/include/cloud/tee_client_log.h new file mode 100644 index 0000000000000000000000000000000000000000..59681ecdb2109e3f6001b800e5c7acb436abf6ce --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/include/cloud/tee_client_log.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2021. All rights reserved. + * iTrustee licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef TEE_CLIENT_LOG_H +#define TEE_CLIENT_LOG_H + +#include + +#ifdef TEEC_DEBUG +#define TEEC_Debug(...) syslog(LOG_USER | LOG_INFO, __VA_ARGS__); +#else +#define TEEC_Debug(...) +#endif + +#define TEEC_Error(...) syslog(LOG_USER | LOG_INFO, __VA_ARGS__); + +#endif diff --git a/trustzone-awared-vm/Host/vtzb_proxy/include/cloud/tee_session_pool.h b/trustzone-awared-vm/Host/vtzb_proxy/include/cloud/tee_session_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..5812698b70756c576d5da81f314d742ddb3e9400 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/include/cloud/tee_session_pool.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2021. All rights reserved. + * iTrustee licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef _TEE_SESSION_POOL_H_ +#define _TEE_SESSION_POOL_H_ + +#include +#include "tee_client_api.h" + +struct SessionInfo { + TEEC_Session session; + bool isDead; +}; + +struct SessionPool { + TEEC_Context *context; /* context owner */ + TEEC_UUID uuid; + uint32_t poolSize; /* expected count of sessions to open */ + struct SessionInfo *sessionsInfo; + uint32_t opened; /* counf of sessions opend successfully */ + uint32_t inuse; /* count of sessions in using */ + sem_t keys; /* keys value equal opend - inuse */ + uint8_t *usage; /* a bitmap mark session in-use */ + uint32_t usageSize; /* bitmap size in bytes */ + pthread_mutex_t usageLock; +}; + +TEEC_Result TEEC_SessionPoolCreate(TEEC_Context *context, const TEEC_UUID *destination, + struct SessionPool **sessionPool, uint32_t poolSize); +TEEC_Result TEEC_SessionPoolInvoke(struct SessionPool *sessionPool, uint32_t commandID, + TEEC_Operation *operation, uint32_t *returnOrigin); +void TEEC_SessionPoolDestroy(struct SessionPool *sessionPool); +void TEEC_SessionPoolQuery(struct SessionPool *sessionPool, uint32_t *size, + uint32_t *opened, uint32_t *inuse, bool showBitmap); + +#endif diff --git a/trustzone-awared-vm/Host/vtzb_proxy/include/tc_ns_client.h b/trustzone-awared-vm/Host/vtzb_proxy/include/tc_ns_client.h new file mode 100644 index 0000000000000000000000000000000000000000..24e2cd6a582deab75332e477c8ee87ee328c4ac0 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/include/tc_ns_client.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2012-2023. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef _TC_NS_CLIENT_H_ +#define _TC_NS_CLIENT_H_ +#include "tee_client_type.h" +#define TC_DEBUG + +#define INVALID_TYPE 0x00 +#define TEECD_CONNECT 0x01 +#ifndef ZERO_SIZE_PTR +#define ZERO_SIZE_PTR ((void *)16) +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= (unsigned long)ZERO_SIZE_PTR) +#endif + +#define UUID_SIZE 16 + +#define TC_NS_CLIENT_IOC_MAGIC 't' +#define TC_NS_CLIENT_DEV "tc_ns_client" +#define TC_NS_CLIENT_DEV_NAME "/dev/tc_ns_client" +#define TC_TEECD_PRIVATE_DEV_NAME "/dev/tc_private" +#define TC_NS_CVM_DEV_NAME "/dev/tc_ns_cvm" + +enum ConnectCmd { + GET_FD, + GET_TEEVERSION, + SET_SYS_XML, + GET_TEECD_VERSION, +}; + +typedef struct { + unsigned int method; + unsigned int mdata; +} TC_NS_ClientLogin; + +typedef union { + struct { + unsigned int buffer; + unsigned int buffer_h_addr; + unsigned int offset; + unsigned int h_offset; + unsigned int size_addr; + unsigned int size_h_addr; + } memref; + struct { + unsigned int a_addr; + unsigned int a_h_addr; + unsigned int b_addr; + unsigned int b_h_addr; + } value; +} TC_NS_ClientParam; + +typedef struct { + unsigned int code; + unsigned int origin; +} TC_NS_ClientReturn; + +typedef struct { + unsigned char uuid[UUID_SIZE]; + unsigned int session_id; + unsigned int cmd_id; + TC_NS_ClientReturn returns; + TC_NS_ClientLogin login; + TC_NS_ClientParam params[TEEC_PARAM_NUM]; + unsigned int paramTypes; + bool started; + unsigned int callingPid; + unsigned int file_size; + union { + char *file_buffer; + struct { + uint32_t file_addr; + uint32_t file_h_addr; + } memref; + }; +} TC_NS_ClientContext; + +typedef struct { + uint32_t seconds; + uint32_t millis; +} TC_NS_Time; + +typedef struct { + uint16_t tzdriver_version_major; + uint16_t tzdriver_version_minor; + uint32_t reserved[15]; +} TC_NS_TEE_Info; + +enum SecFileType { + LOAD_TA = 0, + LOAD_SERVICE, + LOAD_LIB, + LOAD_DYNAMIC_DRV, + LOAD_PATCH, + LOAD_TYPE_MAX +}; + +struct SecFileInfo { + enum SecFileType fileType; + uint32_t fileSize; + int32_t secLoadErr; +}; + +struct SecLoadIoctlStruct { + struct SecFileInfo secFileInfo; + TEEC_UUID uuid; + union { + char *fileBuffer; + struct { + uint32_t file_addr; + uint32_t file_h_addr; + } memref; + }; +}__attribute__((packed)); + +struct AgentIoctlArgs { + uint32_t id; + uint32_t bufferSize; + union { + void *buffer; + unsigned long long addr; + }; +}; + +#define TC_NS_CLIENT_IOCTL_SES_OPEN_REQ _IOW(TC_NS_CLIENT_IOC_MAGIC, 1, TC_NS_ClientContext) +#define TC_NS_CLIENT_IOCTL_SES_CLOSE_REQ _IOWR(TC_NS_CLIENT_IOC_MAGIC, 2, TC_NS_ClientContext) +#define TC_NS_CLIENT_IOCTL_SEND_CMD_REQ _IOWR(TC_NS_CLIENT_IOC_MAGIC, 3, TC_NS_ClientContext) +#define TC_NS_CLIENT_IOCTL_SHRD_MEM_RELEASE _IOWR(TC_NS_CLIENT_IOC_MAGIC, 4, unsigned int) +#define TC_NS_CLIENT_IOCTL_WAIT_EVENT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 5, unsigned int) +#define TC_NS_CLIENT_IOCTL_SEND_EVENT_RESPONSE _IOWR(TC_NS_CLIENT_IOC_MAGIC, 6, unsigned int) +#define TC_NS_CLIENT_IOCTL_REGISTER_AGENT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 7, struct AgentIoctlArgs) +#define TC_NS_CLIENT_IOCTL_UNREGISTER_AGENT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 8, unsigned int) +#define TC_NS_CLIENT_IOCTL_LOAD_APP_REQ _IOWR(TC_NS_CLIENT_IOC_MAGIC, 9, struct SecLoadIoctlStruct) +#define TC_NS_CLIENT_IOCTL_NEED_LOAD_APP _IOWR(TC_NS_CLIENT_IOC_MAGIC, 10, TC_NS_ClientContext) +#define TC_NS_CLIENT_IOCTL_LOAD_APP_EXCEPT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 11, unsigned int) +#define TC_NS_CLIENT_IOCTL_CANCEL_CMD_REQ _IOWR(TC_NS_CLIENT_IOC_MAGIC, 13, TC_NS_ClientContext) +#define TC_NS_CLIENT_IOCTL_LOGIN _IOWR(TC_NS_CLIENT_IOC_MAGIC, 14, int) +#define TC_NS_CLIENT_IOCTL_TST_CMD_REQ _IOWR(TC_NS_CLIENT_IOC_MAGIC, 15, int) +#define TC_NS_CLIENT_IOCTL_TUI_EVENT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 16, int) +#define TC_NS_CLIENT_IOCTL_SYC_SYS_TIME _IOWR(TC_NS_CLIENT_IOC_MAGIC, 17, TC_NS_Time) +#define TC_NS_CLIENT_IOCTL_SET_NATIVE_IDENTITY _IOWR(TC_NS_CLIENT_IOC_MAGIC, 18, int) +#define TC_NS_CLIENT_IOCTL_LOAD_TTF_FILE_AND_NOTCH_HEIGHT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 19, unsigned int) +#define TC_NS_CLIENT_IOCTL_LATEINIT _IOWR(TC_NS_CLIENT_IOC_MAGIC, 20, unsigned int) +#define TC_NS_CLIENT_IOCTL_GET_TEE_VERSION _IOWR(TC_NS_CLIENT_IOC_MAGIC, 21, unsigned int) +#ifdef CONFIG_CMS_SIGNATURE +#define TC_NS_CLIENT_IOCTL_UPDATE_TA_CRL _IOWR(TC_NS_CLIENT_IOC_MAGIC, 22, struct TC_NS_ClientCrl) +#endif +#ifdef CONFIG_TEE_TELEPORT_SUPPORT +#define TC_NS_CLIENT_IOCTL_PORTAL_REGISTER _IOWR(TC_NS_CLIENT_IOC_MAGIC, 24, struct AgentIoctlArgs) +#define TC_NS_CLIENT_IOCTL_PORTAL_WORK _IOWR(TC_NS_CLIENT_IOC_MAGIC, 25, struct AgentIoctlArgs) +#endif +#define TC_NS_CLIENT_IOCTL_GET_TEE_INFO _IOWR(TC_NS_CLIENT_IOC_MAGIC, 26, TC_NS_TEE_Info) +#define TC_NS_CLIENT_IOCTL_SET_VM_FLAG _IOWR(TC_NS_CLIENT_IOC_MAGIC, 27, int) +TEEC_Result TEEC_CheckOperation(const TEEC_Operation *operation); +#endif + diff --git a/trustzone-awared-vm/Host/vtzb_proxy/include/tee_client_api.h b/trustzone-awared-vm/Host/vtzb_proxy/include/tee_client_api.h new file mode 100644 index 0000000000000000000000000000000000000000..d68997907464646b715802c560da1962b9139867 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/include/tee_client_api.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2013-2021. All rights reserved. + * iTrustee licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef _TEE_CLIENT_API_H_ +#define _TEE_CLIENT_API_H_ + +#ifndef LOG_TAG +#define LOG_TAG NULL +#endif + +#include +#include "tee_client_type.h" +#include "tee_client_ext_api.h" +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define S_VAR_NOT_USED(variable) do { (void)(variable); } while (0) + +#define TEEC_PARAM_TYPES(param0Type, param1Type, param2Type, param3Type) \ + ((param3Type) << 12 | (param2Type) << 8 | (param1Type) << 4 | (param0Type)) + +#define TEEC_PARAM_TYPE_GET(paramTypes, index) \ + (((paramTypes) >> (4*(index))) & 0x0F) + +#define TEEC_VALUE_UNDEF 0xFFFFFFFF + +/* + * initializes a new TEE Context, forming a connection between this Client Application and the TEE + * + * @param name [IN] TEE name (unused) + * @param context [OUT] pointer to TEEC_Context to be initialized + * + * @return TEEC_SUCCESS operation success + * @return TEEC_ERROR_BAD_PARAMETERS invalid parameter + * @return TEEC_ERROR_GENERIC system error unhandled + */ +TEEC_Result TEEC_InitializeContext( + const char *name, + TEEC_Context *context); + +/* + * finalizes an initialized TEE Context, closing the connection between the Client Application and the TEE + * + * @param context [IN/OUT] pointer to TEEC_Context initialized by TEEC_InitializeContext + * + * @return void + */ +void TEEC_FinalizeContext( + TEEC_Context *context); + +/* + * opens a new Session between the Client Application and the specified Trusted Application + * + * @param context [IN/OUT] a pointer to an initialized TEE Context + * @param session [OUT] a pointer to a Session structure to be opened + * @param destination [IN] a pointer to a structure containing the UUID of the destination Trusted Application + * @param connectionMethod [IN] the method of connection to use + * @param connectionData [IN] any necessary data required to support the connection method + * @param operation [IN/OUT] a pointer to an Operation containing a set of Parameters to exchange with the + * Trusted Application + * @param returnOrigin [OUT] a pointer to a variable which will contain the return origin, This field may be NULL + * if the return origin is not needed + * + * @return TEEC_SUCCESS operation success + * @return TEEC_ERROR_BAD_PARAMETERS invalid parameter, context or session or destination is NULL + * @return TEEC_ERROR_ACCESS_DENIED client Application's connection request is denied + * @return TEEC_ERROR_OUT_OF_MEMORY system resource is out of use + * @return TEEC_ERROR_TRUSTED_APP_LOAD_ERROR load Trusted Application failed + * @return others refer TEEC_ReturnCode + */ +TEEC_Result TEEC_OpenSession( + TEEC_Context *context, + TEEC_Session *session, + const TEEC_UUID *destination, + uint32_t connectionMethod, + const void *connectionData, + TEEC_Operation *operation, + uint32_t *returnOrigin); + +/* + * closes a Session which has been opened with a Trusted Application + * + * @param session [IN/OUT] pointer to a session to be closed + * + * @return void + */ +void TEEC_CloseSession( + TEEC_Session *session); + +/* + * invokes a Command within the specified Session + * + * @param session [IN/OUT] the open Session in which the command will be invoked + * @param commandID [IN] the identifier of the Command within the Trusted Application to invoke + * @param operation [IN/OUT] a pointer to a Client Application initialized TEEC_Operation structure + * @param returnOrigin [OUT] a pointer to a variable which will contain the return origin + * + * @return TEEC_SUCCESS operation success + * @return TEEC_ERROR_BAD_PARAMETERS invalid parameter, session is NULL or operation data invalid + * @return TEEC_ERROR_ACCESS_DENIED invoke command operation is denied + * @return TEEC_ERROR_OUT_OF_MEMORY system resource is out of use + * @return others refer TEEC_ReturnCode + */ +TEEC_Result TEEC_InvokeCommand( + TEEC_Session *session, + uint32_t commandID, + TEEC_Operation *operation, + uint32_t *returnOrigin); + +/* + * registers a block of existing Client Application memory as a block of Shared Memory within + * the scope of the specified TEE Context, in accordance with the parameters which have been set by the + * Client Application inside the sharedMem structure (don't support 0 size data) + * + * @param context [IN/OUT] a pointer to an initialized TEE Context + * @param sharedMem [IN/OUT] a pointer to a Shared Memory structure to register + * + * @return TEEC_SUCCESS operation success + * @return TEEC_ERROR_BAD_PARAMETERS invalid parameter, context or sharedMem is NULL + */ +TEEC_Result TEEC_RegisterSharedMemory( + TEEC_Context *context, + TEEC_SharedMemory *sharedMem); + +/* + * allocates a new block of memory as a block of Shared Memory within the scope of the specified TEE Context + * size of sharedMem should not be 0 + * + * @param context [IN/OUT] a pointer to an initialized TEE Context + * @param sharedMem [IN/OUT] a pointer to a Shared Memory structure to allocate + * + * @return TEEC_SUCCESS operation success + * @return TEEC_ERROR_BAD_PARAMETERS invalid parameter, context or sharedMem is NULL + * @return TEEC_ERROR_OUT_OF_MEMORY system resource is out of use + */ +TEEC_Result TEEC_AllocateSharedMemory( + TEEC_Context *context, + TEEC_SharedMemory *sharedMem); + +/* + * deregisters or deallocates a previously initialized block of Shared Memory + * if memory is allocated by TEEC_AllocateSharedMemory, system will free this memory + * if memory is registered by TEEC_RegisterSharedMemory, system will not free this memory + * + * @param sharedMem [IN/OUT] a pointer to a valid Shared Memory structure + * + * @return void + */ +void TEEC_ReleaseSharedMemory( + TEEC_SharedMemory *sharedMem); + +/* + * requests the cancellation of a pending open Session operation or a Command invocation operation + * this operation is not supported currently + * + * @param operation [IN/OUT] a pointer to a Client Application instantiated Operation structure + * + * @return void + */ +void TEEC_RequestCancellation( + TEEC_Operation *operation); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/trustzone-awared-vm/Host/vtzb_proxy/include/tee_client_constants.h b/trustzone-awared-vm/Host/vtzb_proxy/include/tee_client_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..2a7e31f678d2c9ad31a2ac44d9b4e7cc58b6700a --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/include/tee_client_constants.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2013-2022. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef _TEE_CLIENT_CONSTANTS_H_ +#define _TEE_CLIENT_CONSTANTS_H_ + +enum TEEC_ReturnCode { + TEEC_SUCCESS = 0x0, /* success */ + TEEC_ERROR_INVALID_CMD, /* invalid command */ + TEEC_ERROR_SERVICE_NOT_EXIST, /* target service is not exist */ + TEEC_ERROR_SESSION_NOT_EXIST, /* session between client and service is not exist */ + TEEC_ERROR_SESSION_MAXIMUM, /* exceed max num of sessions */ + TEEC_ERROR_REGISTER_EXIST_SERVICE, /* cannot register the service which already exist */ + TEEC_ERROR_TAGET_DEAD_FATAL, /* system error occurs in TEE */ + TEEC_ERROR_READ_DATA, /* failed to read data in file */ + TEEC_ERROR_WRITE_DATA, /* failed to write data to file */ + TEEC_ERROR_TRUNCATE_OBJECT, /* data is truncated */ + TEEC_ERROR_SEEK_DATA, /* failed to seek data in file */ + TEEC_ERROR_FSYNC_DATA, /* failed to sync data in file */ + TEEC_ERROR_RENAME_OBJECT, /* failed to rename file */ + TEEC_ERROR_TRUSTED_APP_LOAD_ERROR, /* failed to load Trusted Application */ + TEEC_ERROR_GENERIC = 0xFFFF0000, /* generic error occurs */ + TEEC_ERROR_ACCESS_DENIED = 0xFFFF0001, /* permission check failed, in initilize context or + open session or invoke commnad */ + TEEC_ERROR_CANCEL = 0xFFFF0002, /* operation is already canceled */ + TEEC_ERROR_ACCESS_CONFLICT = 0xFFFF0003, /* confilct occurs in concurrent access to data, + error occurs in file operaions generally */ + TEEC_ERROR_EXCESS_DATA = 0xFFFF0004, /* exceed max data to be handled by system */ + TEEC_ERROR_BAD_FORMAT = 0xFFFF0005, /* data format is invalid, Trusted Application cannot + handle it */ + TEEC_ERROR_BAD_PARAMETERS = 0xFFFF0006, /* invalid parameters */ + TEEC_ERROR_BAD_STATE = 0xFFFF0007, /* operation failed in current state, when try to access + storage without initilize storage service */ + TEEC_ERROR_ITEM_NOT_FOUND = 0xFFFF0008, /* cannot find target item */ + TEEC_ERROR_NOT_IMPLEMENTED = 0xFFFF0009, /* request operation is not implemented */ + TEEC_ERROR_NOT_SUPPORTED = 0xFFFF000A, /* request operation is not supported */ + TEEC_ERROR_NO_DATA = 0xFFFF000B, /* no data present for current operation */ + TEEC_ERROR_OUT_OF_MEMORY = 0xFFFF000C, /* system resource if out of use */ + TEEC_ERROR_BUSY = 0xFFFF000D, /* system is too busy to handle current operation */ + TEEC_ERROR_COMMUNICATION = 0xFFFF000E, /* error occurs when client try to communicate + with Trusted Application */ + TEEC_ERROR_SECURITY = 0xFFFF000F, /* security error occurs */ + TEEC_ERROR_SHORT_BUFFER = 0xFFFF0010, /* out buffer is not enough for current request */ + TEEC_ERROR_MAC_INVALID = 0xFFFF3071, /* MAC value check failed */ + TEEC_ERROR_TARGET_DEAD = 0xFFFF3024, /* Trusted Application is crashed */ + TEEC_FAIL = 0xFFFF5002, /* common error */ + TEEC_ERROR_EXTERNAL_CANCEL = 0xFFFF0011, /* used by adapt only, event caused User Interface operation aborted */ + TEEC_ERROR_OVERFLOW = 0xFFFF300F, /* used by adapt only */ + TEEC_ERROR_STORAGE_NO_SPACE = 0xFFFF3041, /* used by adapt only */ + TEEC_ERROR_SIGNATURE_INVALID = 0xFFFF3072, /* used by adapt only */ + TEEC_ERROR_TIME_NOT_SET = 0xFFFF5000, /* used by adapt only */ + TEEC_ERROR_TIME_NEEDS_RESET = 0xFFFF5001, /* used by adapt only */ + TEEC_ERROR_IPC_OVERFLOW = 0xFFFF9114 /* ipc overflow */ +}; + +enum TEEC_ReturnCodeOrigin { + TEEC_ORIGIN_API = 0x1, /* error occurs in handling client API */ + TEEC_ORIGIN_COMMS = 0x2, /* error occurs in communicating between REE and TEE */ + TEEC_ORIGIN_TEE = 0x3, /* error occurs in TEE */ + TEEC_ORIGIN_TRUSTED_APP = 0x4, /* error occurs in Trusted Application */ +}; + +enum TEEC_SharedMemCtl { + TEEC_MEM_INPUT = 0x1, /* input type of memroy */ + TEEC_MEM_OUTPUT = 0x2, /* output type of memory */ + TEEC_MEM_INOUT = 0x3, /* memory is used as both input and output */ + TEEC_MEM_SHARED_INOUT = 0x4, /* no copy shared memory */ +}; + +enum TEEC_ParamType { + TEEC_NONE = 0x0, /* unused parameter */ + TEEC_VALUE_INPUT = 0x01, /* input type of value, refer TEEC_Value */ + TEEC_VALUE_OUTPUT = 0x02, /* output type of value, refer TEEC_Value */ + TEEC_VALUE_INOUT = 0x03, /* value is used as both input and output, refer TEEC_Value */ + TEEC_MEMREF_TEMP_INPUT = 0x05, /* input type of temp memory reference, refer TEEC_TempMemoryReference */ + TEEC_MEMREF_TEMP_OUTPUT = 0x06, /* output type of temp memory reference, refer TEEC_TempMemoryReference */ + TEEC_MEMREF_TEMP_INOUT = 0x07, /* temp memory reference used as both input and output, + refer TEEC_TempMemoryReference */ + TEEC_ION_INPUT = 0x08, /* input type of icon memory reference, refer TEEC_IonReference */ + TEEC_ION_SGLIST_INPUT = 0x09, /* input type of ion memory block reference, refer TEEC_IonSglistReference */ + TEEC_MEMREF_SHARED_INOUT = 0x0a, /* no copy mem */ + TEEC_MEMREF_WHOLE = 0xc, /* use whole memory block, refer TEEC_RegisteredMemoryReference */ + TEEC_MEMREF_PARTIAL_INPUT = 0xd, /* input type of memory reference, refer TEEC_RegisteredMemoryReference */ + TEEC_MEMREF_PARTIAL_OUTPUT = 0xe, /* output type of memory reference, refer TEEC_RegisteredMemoryReference */ + TEEC_MEMREF_PARTIAL_INOUT = 0xf /* memory reference used as both input and output, + refer TEEC_RegisteredMemoryReference */ +}; + +/**************************************************** + * Session Login Methods + ****************************************************/ +enum TEEC_LoginMethod { + TEEC_LOGIN_PUBLIC = 0x0, /* no Login data is provided */ + TEEC_LOGIN_USER, /* Login data about the user running the + Client Application process is provided */ + TEEC_LOGIN_GROUP, /* Login data about the group running + the Client Application process is provided */ + TEEC_LOGIN_APPLICATION = 0x4, /* Login data about the running Client + Application itself is provided */ + TEEC_LOGIN_USER_APPLICATION = 0x5, /* Login data about the user running the + Client Application and about the + Client Application itself is provided */ + TEEC_LOGIN_GROUP_APPLICATION = 0x6, /* Login data about the group running + the Client Application and about the + Client Application itself is provided */ + TEEC_LOGIN_IDENTIFY = 0x7, /* Login data is provided by REE system */ +}; +enum TST_CMD_ID { + TST_CMD_ID_01 = 1, + TST_CMD_ID_02, + TST_CMD_ID_03, + TST_CMD_ID_04, + TST_CMD_ID_05 +}; + +#define TEEC_PARAM_NUM 4 /* teec param max number */ +#endif + diff --git a/trustzone-awared-vm/Host/vtzb_proxy/include/tee_client_list.h b/trustzone-awared-vm/Host/vtzb_proxy/include/tee_client_list.h new file mode 100644 index 0000000000000000000000000000000000000000..d14656c051631001ed5e82140489ae7614d36ee3 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/include/tee_client_list.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2013-2021. All rights reserved. + * iTrustee licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef TEE_CLIENT_LIST_H +#define TEE_CLIENT_LIST_H + +struct ListNode { + struct ListNode *next; /* point to next node */ + struct ListNode *prev; /* point to prev node */ +}; + +#define OFFSET_OF(type, member) (unsigned long)(&(((type *)0)->member)) +#define CONTAINER_OF(pos, type, member) (type *)(((char *)(pos)) - OFFSET_OF(type, member)) + +#define LIST_DECLARE(name) \ + struct ListNode name = { \ + .next = &name, \ + .prev = &name, \ + } + +static inline void ListInit(struct ListNode *list) +{ + list->next = list; + list->prev = list; +} + +#define LIST_HEAD(list) ((list)->next) +#define LIST_TAIL(list) ((list)->prev) +#define LIST_EMPTY(list) ((list) == (list)->next) + +static inline void ListInsertHead(struct ListNode *list, struct ListNode *entry) +{ + list->next->prev = entry; + entry->next = list->next; + entry->prev = list; + list->next = entry; +} + +static inline void ListInsertTail(struct ListNode *list, struct ListNode *entry) +{ + entry->next = list; + entry->prev = list->prev; + list->prev->next = entry; + list->prev = entry; +} + +static inline void ListRemoveEntry(struct ListNode *entry) +{ + entry->prev->next = entry->next; + entry->next->prev = entry->prev; +} + +static inline struct ListNode *ListRemoveHead(struct ListNode *list) +{ + struct ListNode *entry = NULL; + if (!LIST_EMPTY(list)) { + entry = list->next; + ListRemoveEntry(entry); + } + return entry; +} + +static inline struct ListNode *ListRemoveTail(struct ListNode *list) +{ + struct ListNode *entry = NULL; + if (!LIST_EMPTY(list)) { + entry = list->prev; + ListRemoveEntry(entry); + } + return entry; +} + +#define LIST_ENTRY(ptr, type, member) \ + ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member))) + +#define LIST_FOR_EACH(pos, list) \ + for (pos = (list)->next; pos != (list); pos = pos->next) + +#define LIST_FOR_EACH_SAFE(pos, n, list) \ + for ((pos) = (list)->next, (n) = (pos)->next; (pos) != (list); (pos) = (n), (n) = (pos)->next) + +#define LIST_FOR_EACH_ENTRY(pos, list, member) \ + for (pos = LIST_ENTRY((list)->next, typeof(*pos), member); &pos->member != (list); \ + pos = LIST_ENTRY(pos->member.next, typeof(*pos), member)) + +#define LIST_FOR_EACH_ENTRY_SAFE(pos, n, list, member) \ + for (pos = LIST_ENTRY((list)->next, typeof(*pos), member), n = LIST_ENTRY(pos->member.next, typeof(*pos), \ + member); &pos->member != (list); pos = n, n = LIST_ENTRY(n->member.next, typeof(*n), member)) + +#endif diff --git a/trustzone-awared-vm/Host/vtzb_proxy/include/tee_client_type.h b/trustzone-awared-vm/Host/vtzb_proxy/include/tee_client_type.h new file mode 100644 index 0000000000000000000000000000000000000000..84e97974215cf79cfefbc9939cba3700e590f0b3 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/include/tee_client_type.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2013-2022. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef _TEE_CLIENT_TYPE_H_ +#define _TEE_CLIENT_TYPE_H_ + +#include +#include +#include +#include +#include +#include "tee_client_list.h" +#include "tee_client_constants.h" + +typedef enum TEEC_ReturnCode TEEC_Result; + +typedef struct { + uint32_t timeLow; + uint16_t timeMid; + uint16_t timeHiAndVersion; + uint8_t clockSeqAndNode[8]; +} TEEC_UUID; + +typedef struct { + int32_t fd; + uint8_t *ta_path; + struct ListNode session_list; + struct ListNode shrd_mem_list; + union { + struct { + void *buffer; + sem_t buffer_barrier; + } share_buffer; + uint64_t imp; /* for adapt */ + }; +} TEEC_Context; + +typedef struct { + uint32_t session_id; + TEEC_UUID service_id; + uint32_t ops_cnt; + union { + struct ListNode head; + uint64_t imp; /* for adapt */ + }; + TEEC_Context *context; +} TEEC_Session; + +typedef struct { + void *buffer; + uint32_t size; + uint32_t flags; /* reference to TEEC_SharedMemCtl */ + uint32_t ops_cnt; + bool is_allocated; /* identify whether the memory is registered or allocated */ + union { + struct ListNode head; + void *imp; /* for adapt, imp is not used by system CA, only for vendor CA */ + }; + TEEC_Context *context; +} TEEC_SharedMemory; + +/* + * the corresponding param types are + * TEEC_MEMREF_TEMP_INPUT/TEEC_MEMREF_TEMP_OUTPUT/TEEC_MEMREF_TEMP_INOUT + */ +typedef struct { + void *buffer; + uint32_t size; +} TEEC_TempMemoryReference; + +/* + * the corresponding param types are + * TEEC_MEMREF_WHOLE/TEEC_MEMREF_PARTIAL_INPUT + * TEEC_MEMREF_PARTIAL_OUTPUT/TEEC_MEMREF_PARTIAL_INOUT + */ +typedef struct { + TEEC_SharedMemory *parent; + uint32_t size; + uint32_t offset; +} TEEC_RegisteredMemoryReference; + +/* + * the corresponding param types are + * TEEC_VALUE_INPUT/TEEC_VALUE_OUTPUT/TEEC_VALUE_INOUT + */ +typedef struct { + uint32_t a; + uint32_t b; +} TEEC_Value; + +typedef struct { + int ion_share_fd; + uint32_t ion_size; +} TEEC_IonReference; + +typedef union { + TEEC_TempMemoryReference tmpref; + TEEC_RegisteredMemoryReference memref; + TEEC_Value value; + TEEC_IonReference ionref; +} TEEC_Parameter; + +typedef struct { + uint32_t event_type; /* Tui event type */ + uint32_t value; /* return value, is keycode if tui event is getKeycode */ + uint32_t notch; /* notch size of the screen for tui */ + uint32_t width; /* width of foldable screen */ + uint32_t height; /* height of foldable screen */ + uint32_t fold_state; /* state of foldable screen */ + uint32_t display_state; /* one state of folded state */ + uint32_t phy_width; /* real width of the mobile */ + uint32_t phy_height; /* real height of the mobile */ +} TEEC_TUI_Parameter; + +typedef struct { + uint32_t started; /* 0 means cancel this operation, others mean to perform this operation */ + uint32_t paramTypes; /* use TEEC_PARAM_TYPES to construct this value */ + TEEC_Parameter params[TEEC_PARAM_NUM]; + TEEC_Session *session; + bool cancel_flag; +} TEEC_Operation; + +#endif + diff --git a/trustzone-awared-vm/Host/vtzb_proxy/include/tee_sys_log.h b/trustzone-awared-vm/Host/vtzb_proxy/include/tee_sys_log.h new file mode 100644 index 0000000000000000000000000000000000000000..1fa0c3deaa85c4f994bb149a742945a59f2684dc --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/include/tee_sys_log.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2021. All rights reserved. + * iTrustee licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef TEEC_SYS_LOG_H +#define TEEC_SYS_LOG_H + +#include + +// #define TEE_LOG_MASK TZ_LOG_ERROR +#define TEE_LOG_MASK TZ_LOG_VERBOSE + +#define TZ_LOG_VERBOSE 0 +#define TZ_LOG_INFO 1 +#define TZ_LOG_WARN 2 +#define TZ_LOG_DEBUG 3 +#define TZ_LOG_ERROR 4 + +#define tlogv(...) \ + do { \ + if (TZ_LOG_VERBOSE == TEE_LOG_MASK) \ + syslog(LOG_USER | LOG_NOTICE, __VA_ARGS__); \ + } while (0) + +#define tlogd(...) \ + do { \ + if (TZ_LOG_DEBUG >= TEE_LOG_MASK) \ + syslog(LOG_USER | LOG_DEBUG, __VA_ARGS__); \ + } while (0) + +#define tlogi(...) \ + do { \ + if (TZ_LOG_INFO >= TEE_LOG_MASK) \ + syslog(LOG_USER | LOG_INFO, __VA_ARGS__); \ + } while (0) + +#define tlogw(...) \ + do { \ + if (TZ_LOG_WARN >= TEE_LOG_MASK) \ + syslog(LOG_USER | LOG_WARNING, __VA_ARGS__); \ + } while (0) + +#define tloge(...) \ + do { \ + if (TZ_LOG_ERROR >= TEE_LOG_MASK) \ + syslog(LOG_USER | LOG_ERR, __VA_ARGS__); \ + } while (0) + +#endif + diff --git a/trustzone-awared-vm/Host/vtzb_proxy/process_data.c b/trustzone-awared-vm/Host/vtzb_proxy/process_data.c new file mode 100644 index 0000000000000000000000000000000000000000..734edd33112a70ac22e5625233063b3ad07a2289 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/process_data.c @@ -0,0 +1,45 @@ +#include "process_data.h" +#include +#include +#include "debug.h" +#include "tee_sys_log.h" +#include "securec.h" +#include "vm.h" + +static void *malloc_copy(void *buf, int buf_len , int size, int *poffset) +{ + void *res; + int offset = *poffset; + if (buf_len < offset + size || size < 4) { + memmove_s(buf, buf_len, buf + offset, buf_len - offset); + *poffset = buf_len - offset; + return NULL; + } + res = malloc(size + sizeof(uint64_t)); + if (!res) { + tloge("failed malloc\n"); + return NULL; + } + if (memcpy_s(res + sizeof(uint64_t), size, buf + offset, size)) { + tloge("memcpy_s err\n"); + } + *poffset = offset + size; + return res; +} + +void *get_packet_item(void *buf, int buf_len, int *poffset) +{ + uint32_t packet_size; + void *res = NULL; + if (buf_len == *poffset) { + *poffset = 0; + return NULL; + } + + if (buf_len < *poffset + (int)sizeof(int)) { + return malloc_copy(buf, buf_len, buf_len - *poffset, poffset); + } + packet_size = *(uint32_t *)(buf + *poffset); + res = malloc_copy(buf, buf_len, packet_size, poffset); + return res; +} \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/process_data.h b/trustzone-awared-vm/Host/vtzb_proxy/process_data.h new file mode 100644 index 0000000000000000000000000000000000000000..119eac263699494f0eff7b23a9fd3dd5dda390ab --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/process_data.h @@ -0,0 +1,17 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2012-2023. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef PROCESS_DATA_H +#define PROCESS_DATA_H + +void *get_packet_item(void *buf, int buf_len, int *poffset); +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/serial_port.c b/trustzone-awared-vm/Host/vtzb_proxy/serial_port.c new file mode 100644 index 0000000000000000000000000000000000000000..62cf283711efa4197c20934dbee6f685d3fc9f93 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/serial_port.c @@ -0,0 +1,207 @@ +#include "serial_port.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "securec.h" +#include "tc_ns_client.h" +#include "tee_client_list.h" +#include "tee_client_log.h" +#include "tee_sys_log.h" +#include "comm_structs.h" +#include "vm.h" +#include "debug.h" +#include "virt.h" + +struct serial_port_list g_serial_list; +struct pollfd g_pollfd[SERIAL_PORT_NUM]; +int g_pollfd_len = 0; +struct timeval g_last_time, g_cur_time; +struct serial_port_file *g_serial_array[SERIAL_PORT_NUM]; + +int serial_port_list_init() +{ + int i; + struct serial_port_file *serial_port; + gettimeofday(&g_last_time, NULL); + gettimeofday(&g_cur_time, NULL); + pthread_mutex_init(&g_serial_list.lock, NULL); + ListInit(&g_serial_list.head); + for ( i = 0; i < SERIAL_PORT_NUM; i++) + { + serial_port = (struct serial_port_file *)malloc(sizeof(struct serial_port_file)); + if (!serial_port) { + tloge("Failed to allocate memory for serial_port\n"); + goto ERR; + } + memset_s(serial_port, sizeof(struct serial_port_file), 0, sizeof(struct serial_port_file)); + sprintf(serial_port->path, "%s%d", VTZB_CHAR_DEV, i); + serial_port->opened = false; + serial_port->offset = 0; + serial_port->rd_buf = (char *)malloc(BUF_LEN_MAX_RD); + serial_port->vm_file = NULL; + if (!serial_port->rd_buf) { + tloge("Failed to allocate memory for rd_buf\n"); + free(serial_port); + goto ERR; + } + pthread_mutex_init(&serial_port->lock, NULL); + ListInsertTail(&g_serial_list.head, &serial_port->head); + } + + return 0; +ERR: + serial_port_list_destroy(); + return -ENOMEM; +} + +void serial_port_list_destroy() +{ + struct serial_port_file *serial_port = NULL; + struct serial_port_file *tmp = NULL; + (void)pthread_mutex_lock(&g_serial_list.lock); + LIST_FOR_EACH_ENTRY_SAFE(serial_port, tmp, &g_serial_list.head, head) { + if (serial_port->rd_buf) { + free(serial_port->rd_buf); + serial_port->rd_buf = NULL; + } + if (serial_port->opened) { + close(serial_port->sock); + } + ListRemoveEntry(&serial_port->head); + (void)pthread_mutex_destroy(&serial_port->lock); + free(serial_port); + } + (void)pthread_mutex_unlock(&g_serial_list.lock); + (void)pthread_mutex_destroy(&g_serial_list.lock); +} + +int send_to_vm(struct serial_port_file *serial_port, void *packet_rsp, size_t size_rsp) +{ + int ret = 0; + if (!serial_port || serial_port->sock <= 0 || !packet_rsp) + return -1; + pthread_mutex_lock(&serial_port->lock); + ret = write(serial_port->sock, packet_rsp, size_rsp); + pthread_mutex_unlock(&serial_port->lock); + return ret; +} + +static int connect_domsock_chardev(char *dev_path, int *sock) +{ + int ret; + ret = socket(AF_UNIX, SOCK_STREAM, 0); + if (ret == -1) { + tloge("execute socket() failed \n"); + return -1; + } + + *sock = ret; + + struct sockaddr_un sock_addr; + sock_addr.sun_family = AF_UNIX; + if (memcpy_s(&sock_addr.sun_path, sizeof(sock_addr.sun_path), dev_path, + sizeof(sock_addr.sun_path))) { + tloge("memcpy_s err\n"); + } + ret = connect(*sock, (struct sockaddr *)&sock_addr, sizeof(sock_addr)); + if (ret < 0) { + tloge("connect domain socket %s failed \n", dev_path); + } + return ret; +} + +static void do_check_stat_serial_port() +{ + int ret; + int index; + struct serial_port_file *serial_port; + (void)pthread_mutex_lock(&g_serial_list.lock); + LIST_FOR_EACH_ENTRY(serial_port, &g_serial_list.head, head){ + if (serial_port->opened == false) { + ret = access(serial_port->path, R_OK | W_OK); + if (ret == 0) { + ret = connect_domsock_chardev(serial_port->path, &(serial_port->sock)); + if (ret < 0) { + tloge("connect_domsock_chardev(%s) failed, ret = %d \n", serial_port->path, ret); + } else { + serial_port->opened = true; + serial_port->offset = 0; + g_serial_array[g_pollfd_len] = serial_port; + g_pollfd[g_pollfd_len].fd = serial_port->sock; + g_pollfd[g_pollfd_len].events = POLLIN; + g_pollfd_len++; + } + } + } else { + ret = access(serial_port->path, R_OK | W_OK); + if (ret) { + close(serial_port->sock); + for (index = 0; index < g_pollfd_len; index++) { + if (g_pollfd[index].fd == serial_port->sock) { + break; + } + } + serial_port->sock = 0; + g_serial_array[index] = g_serial_array[g_pollfd_len - 1]; + g_pollfd[index] = g_pollfd[g_pollfd_len - 1]; + g_pollfd_len--; + serial_port->opened = false; + if (serial_port->vm_file) { + destroy_vm_file(serial_port->vm_file); + } + serial_port->vm_file = NULL; + } + } + } + (void)pthread_mutex_unlock(&g_serial_list.lock); +} + +void check_stat_serial_port() +{ + gettimeofday(&g_cur_time, NULL); + if (g_cur_time.tv_sec - g_last_time.tv_sec > CHECK_TIME_SEC) { + do_check_stat_serial_port(); + gettimeofday(&g_last_time, NULL); + } +} + +static int clean_dirty_data() +{ + int ret = 0; + int i = 0; + struct timeval start, end; + void *tmp_buf; + (void)ret; + if (!g_pollfd_len) + return 0; + tmp_buf = malloc(BUF_LEN_MAX_RD); + if (!tmp_buf) + return -ENOMEM; + gettimeofday(&start, NULL); + gettimeofday(&end, NULL); + while(end.tv_sec - start.tv_sec < 1) { + ret = safepoll(g_pollfd, g_pollfd_len, 0); + for (i = 0; i < g_pollfd_len; i++) { + if (g_pollfd[i].revents & POLLIN) { + ret = read(g_pollfd[i].fd, tmp_buf, BUF_LEN_MAX_RD); + } + } + gettimeofday(&end, NULL); + } + free(tmp_buf); + return 0; +} + +int check_stat_serial_port_first() +{ + gettimeofday(&g_cur_time, NULL); + gettimeofday(&g_last_time, NULL); + do_check_stat_serial_port(); + return clean_dirty_data(); +} \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/serial_port.h b/trustzone-awared-vm/Host/vtzb_proxy/serial_port.h new file mode 100644 index 0000000000000000000000000000000000000000..9a74d392c1e1457de9a560744b1032caf10fd39e --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/serial_port.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2012-2023. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef __SERIAL_PORT_H__ +#define __SERIAL_PORT_H__ +#include +#include +#include +#include +#include "tee_client_list.h" +#include "vm.h" + +#define VTZB_CHAR_DEV "/tmp/vm_vtzb_sock" +#define SERIAL_PORT_NUM 15 +#define BUF_LEN_MAX_RD 1024 *512 +#define UNIX_PATH_MAX 108 +#define CHECK_TIME_SEC 2 + +struct serial_port_list { + pthread_mutex_t lock; + struct ListNode head; +}; + +struct serial_port_file { + pthread_mutex_t lock; + char path[UNIX_PATH_MAX]; + int sock; + bool opened; + int index; + struct ListNode head; + char *rd_buf; + int buf_size; + off_t offset; + struct vm_file *vm_file; +}; + +int serial_port_list_init(); +void serial_port_list_destroy(); +int send_to_vm(struct serial_port_file *serial_port, void *packet_rsp, size_t size_rsp); +void *get_rd_buf(int serial_port_fd); +void *get_serial_port_file(int serial_port_fd); +void check_stat_serial_port(); +int check_stat_serial_port_first(); +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/thread_pool.c b/trustzone-awared-vm/Host/vtzb_proxy/thread_pool.c new file mode 100644 index 0000000000000000000000000000000000000000..9b399d4025e3e276b61e728f8da43c6f541db2f9 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/thread_pool.c @@ -0,0 +1,218 @@ +#include +#include +#include +#include +#include +#include +#include +#include "thread_pool.h" +#include "debug.h" +#include "vm.h" + +ThreadFuncArgs g_thd_args[THREAD_POOL_SIZE]; +TimeOut g_time_out[THREAD_POOL_SIZE]; + +/* Custom signal handler for killing zombie threads. */ +void signal_handler(int signum) { + (void)signum; + pthread_exit(NULL); +} + +/* Initialize the thread pool. */ +int thread_pool_init(ThreadPool *pool) +{ + pool->task_cnt = 0; + pool->busy_cnt = 0; + pool->front = pool->rear = 0; + pool->destroying = 0; + memset(pool->task_queue, 0, sizeof(Task) * TASK_QUEUE_SIZE); + memset(pool->kill_flag, 0, sizeof(bool) * THREAD_POOL_SIZE); + memset(pool->session_ids, 0, sizeof(unsigned int) * THREAD_POOL_SIZE); + pthread_create(&pool->admin_tid, NULL, admin_thread, pool); + pthread_mutex_init(&pool->task_mutex, NULL); + pthread_mutex_init(&pool->session_mutex, NULL); + pthread_mutex_init(&pool->time_mutex, NULL); + pthread_mutex_init(&pool->busy_mutex, NULL); + pthread_cond_init(&pool->queue_not_empty, NULL); + pthread_cond_init(&pool->queue_not_full, NULL); + for (int i = 0; i < THREAD_POOL_SIZE; i++) { + g_thd_args[i].index = i; + g_thd_args[i].pool = pool; + pthread_create(&pool->threads[i], NULL, thread_func, &g_thd_args[i]); + pthread_detach(pool->threads[i]); + } + return 0; +} + +/* Recreate a new thread to fill the gap in the thread pool after killing a thread. */ +void replenish_thread_pool(ThreadPool *pool, pthread_t thd) +{ + for (int i = 0; i < THREAD_POOL_SIZE; i++) { + if (pthread_equal(pool->threads[i], thd)) { + g_thd_args[i].index = i; + g_thd_args[i].pool = pool; + pthread_create(&pool->threads[i], NULL, thread_func, &g_thd_args[i]); + pthread_detach(pool->threads[i]); + pool->kill_flag[i] = false; + break; + } + } +} + +/* Thread function */ +void *thread_func(void *arg) +{ + ThreadFuncArgs *thd_args = (ThreadFuncArgs *)arg; + ThreadPool *pool = thd_args->pool; + int index = thd_args->index; + if (signal(SIGUSR1, signal_handler) == SIG_ERR) { + return NULL; + } + + while (1) { + pthread_mutex_lock(&pool->task_mutex); + + /* Wait for the task queue to become non-empty. */ + while (pool->task_cnt == 0 && !pool->destroying) { + pthread_cond_wait(&pool->queue_not_empty, &pool->task_mutex); + } + + /* If the thread pool is being destroyed, exit the thread. */ + if (pool->destroying) { + pthread_mutex_unlock(&pool->task_mutex); + break; + } + + if (pool->kill_flag[index]) { + pthread_cond_signal(&pool->queue_not_empty); + pthread_mutex_unlock(&pool->task_mutex); + continue; + } + + /* Retrieve the task and execute it. */ + Task task = pool->task_queue[pool->front]; + pool->front = (pool->front + 1) % TASK_QUEUE_SIZE; + pool->task_cnt--; + pthread_cond_broadcast(&pool->queue_not_full); + pthread_mutex_unlock(&pool->task_mutex); + + pthread_mutex_lock(&pool->busy_mutex); + pool->busy_cnt++; + if (pool->task_args[index]) + free(pool->task_args[index]); + pool->task_args[index] = task.arg; + pthread_mutex_unlock(&pool->busy_mutex); + + task.task_func(task.arg); + + pthread_mutex_lock(&pool->busy_mutex); + pool->busy_cnt--; + pool->task_args[index] = NULL; + pthread_mutex_unlock(&pool->busy_mutex); + } + + return NULL; +} + +void *admin_thread(void *arg) +{ + int i; + ThreadPool *pool = (ThreadPool *)arg; + struct timeval cur_time; + long time_sec = 0; + while (!pool->destroying) + { + sleep(DEFAULT_TIME_SEC); + gettimeofday(&cur_time, NULL); + time_sec = cur_time.tv_sec; + pthread_mutex_lock(&pool->time_mutex); + for (i = 0; i < THREAD_POOL_SIZE; i++) { + if (g_time_out[i].flag != 0 && (time_sec - g_time_out[i].start_time) > DEFAULT_TIME_SEC) { + kill_open_session_thd(g_time_out[i]); + g_time_out[i].flag = 0; + } + } + pthread_mutex_unlock(&pool->time_mutex); + } + return NULL; +} + +/* Submit the task to the thread pool. */ +void thread_pool_submit(ThreadPool *pool, void *(*task_func)(void *), void *arg) +{ + pthread_mutex_lock(&pool->task_mutex); + + /* Wait for the task queue to become non-full. */ + while (pool->task_cnt == TASK_QUEUE_SIZE && !pool->destroying) { + pthread_cond_wait(&pool->queue_not_full, &pool->task_mutex); + } + + /* If the thread pool is being destroyed, no longer accept new tasks. */ + if (pool->destroying) { + pthread_mutex_unlock(&pool->task_mutex); + return; + } + + /* Add the task to the queue. */ + pool->task_queue[pool->rear].task_func = task_func; + pool->task_queue[pool->rear].arg = arg; + pool->rear = (pool->rear + 1) % TASK_QUEUE_SIZE; + pool->task_cnt++; + /* Notify waiting threads of a new task. */ + pthread_cond_signal(&pool->queue_not_empty); + + pthread_mutex_unlock(&pool->task_mutex); +} + +/* Destroy the thread pool. */ +void thread_pool_destroy(ThreadPool *pool) +{ + /* Stop accepting new tasks. */ + pthread_mutex_lock(&pool->task_mutex); + pool->destroying = 1; + pthread_mutex_unlock(&pool->task_mutex); + + pthread_cond_broadcast(&pool->queue_not_empty); + pthread_join(pool->admin_tid, NULL); + + pthread_mutex_destroy(&pool->task_mutex); + pthread_cond_destroy(&pool->queue_not_empty); +} + +bool check_if_thd_exist(pthread_t thd) +{ + int kill_rc = pthread_kill(thd, 0); + if(kill_rc != 0) + return false; + return true; +} + +void set_thread_session_id(ThreadPool *pool, pthread_t thd, unsigned int id) +{ + pthread_mutex_lock(&pool->session_mutex); + for (int i = 0; i < THREAD_POOL_SIZE; i++) { + if (pthread_equal(pool->threads[i], thd)) { + pool->session_ids[i] = id; + break; + } + } + pthread_mutex_unlock(&pool->session_mutex); +} + +unsigned int get_thread_session_id(ThreadPool *pool, pthread_t thd, unsigned int session_id) +{ + unsigned int id = 0; + pthread_mutex_lock(&pool->session_mutex); + for (int i = 0; i < THREAD_POOL_SIZE; i++) { + if (pthread_equal(pool->threads[i], thd)) { + if (pool->session_ids[i] == session_id) { + id = pool->session_ids[i]; + pool->kill_flag[i] = true; + pool->session_ids[i] = 0; + } + break; + } + } + pthread_mutex_unlock(&pool->session_mutex); + return id; +} \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/thread_pool.h b/trustzone-awared-vm/Host/vtzb_proxy/thread_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..18b7aebf62376b7826d428aa55b0c66791193da1 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/thread_pool.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2012-2023. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef __THREAD_POLL_H__ +#define __THREAD_POLL_H__ + +#include +#include +#include +#include +#include +#include + +#define THREAD_POOL_SIZE 196 +#define TASK_QUEUE_SIZE 32 +#define DEFAULT_TIME_SEC 30 + +/* task structure */ +typedef struct { + void *(*task_func)(void *);// Task function pointer + void *arg; // Task argument +} Task; + +/* the thread pool structure */ +typedef struct { + pthread_t admin_tid; + pthread_t threads[THREAD_POOL_SIZE]; // Thread array + unsigned int session_ids[THREAD_POOL_SIZE]; // Session ID of the ongoing command + bool kill_flag[THREAD_POOL_SIZE]; + void *task_args[THREAD_POOL_SIZE]; + Task task_queue[TASK_QUEUE_SIZE]; // Task queue + int task_cnt; // Number of tasks in the task queue + int busy_cnt; + int front; // Queue head index + int rear; // Queue tail index + int destroying; // Destruction flag + pthread_mutex_t task_mutex; // Mutex + pthread_mutex_t session_mutex; // Mutex + pthread_mutex_t time_mutex; // Mutex + pthread_mutex_t busy_mutex; // Mutex + pthread_cond_t queue_not_empty; // Condition variable + pthread_cond_t queue_not_full; // Condition variable +} ThreadPool; + +typedef struct { + ThreadPool *pool; + int index; +} ThreadFuncArgs; + +int thread_pool_init(ThreadPool *pool); +void thread_pool_destroy(ThreadPool *pool); +void *thread_func(void *arg); +void *admin_thread(void *arg); +void thread_pool_submit(ThreadPool *pool, void *(*task_func)(void *), void *arg); +void replenish_thread_pool(ThreadPool *pool, pthread_t thd); +void set_thread_session_id(ThreadPool *pool, pthread_t thd, unsigned int id); +unsigned int get_thread_session_id(ThreadPool *pool, pthread_t thd, unsigned int session_id); +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/tlogcat.c b/trustzone-awared-vm/Host/vtzb_proxy/tlogcat.c new file mode 100644 index 0000000000000000000000000000000000000000..50fd311b50d553c2cac9235f8ea1e05a68fb66db --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/tlogcat.c @@ -0,0 +1,140 @@ +#include "tlogcat.h" +#include +#include +#include +#include +#include "securec.h" +#include "debug.h" +#include "serial_port.h" +#include "comm_structs.h" + +static char g_log_teeVersion[MAX_TEE_VERSION_LEN]; + +static void tlog_get_teever(struct_packet_cmd_get_ver *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret = 0; + struct_packet_rsp_get_ver packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + ret = ioctl(packet_cmd->ptzfd, TEELOGGER_GET_VERSION, g_log_teeVersion); + packet_rsp.ret = ret; + if (memcpy_s(packet_rsp.version_info, MAX_TEE_VERSION_LEN, + g_log_teeVersion, MAX_TEE_VERSION_LEN)) { + tloge("memcpy_s err \n"); + } + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + } +} + +static void tlog_set_reader_cur( + struct_packet_cmd_set_reader_cur *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret = 0; + struct_packet_rsp_set_reader_cur packet_rsp; + + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + ret = ioctl(packet_cmd->ptzfd, TEELOGGER_SET_READERPOS_CUR, 0); + packet_rsp.ret = ret; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + } +} + +static void tlog_set_stat(struct_packet_cmd_set_tlogcat_stat *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret = 0; + struct_packet_rsp_set_tlogcat_stat packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + ret = ioctl(packet_cmd->ptzfd, TEELOGGER_SET_TLOGCAT_STAT, 0); + packet_rsp.ret = ret; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + } +} + +static void tlog_get_stat(struct_packet_cmd_get_tlogcat_stat *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret = 0; + struct_packet_rsp_get_tlogcat_stat packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + ret = ioctl(packet_cmd->ptzfd, TEELOGGER_GET_TLOGCAT_STAT, 0); + packet_rsp.ret = ret; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + } +} + +static void tlog_get_log(struct_packet_cmd_get_log *packet_cmd, + struct serial_port_file *serial_port) +{ + int32_t result; + int32_t ret = 0; + struct timeval tv; + fd_set readset; + struct_packet_rsp_get_log packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + while(1){ + do { + tv.tv_sec = 20; + tv.tv_usec = 0; + FD_ZERO(&readset); + FD_SET(packet_cmd->ptzfd, &readset); + tlogd("while select\n"); + result = select((packet_cmd->ptzfd + 1), &readset, NULL, NULL, &tv); + } while (result == -1 && errno == EINTR); + if (result <= 0) { + goto END; + } + ret = read(packet_cmd->ptzfd, packet_rsp.buffer, LOG_BUFFER_LEN); + if (ret == 0) + continue; +END: + packet_rsp.length = ret < 0 ? 0 : ret; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + } + break; + } +} + +void tlog(uint32_t cmd, void *packet_cmd,struct serial_port_file *serial_port) +{ + switch (cmd) + { + case VTZ_GET_TEEOS_VER: + (void)tlog_get_teever((struct_packet_cmd_get_ver *)packet_cmd, + serial_port); + break; + case VTZ_SET_READER_CUR: + (void)tlog_set_reader_cur((struct_packet_cmd_set_reader_cur *)packet_cmd, + serial_port); + break; + case VTZ_SET_TLOGCAT_STAT: + (void)tlog_set_stat((struct_packet_cmd_set_tlogcat_stat *)packet_cmd, + serial_port); + break; + case VTZ_GET_TLOGCAT_STAT: + (void)tlog_get_stat((struct_packet_cmd_get_tlogcat_stat *)packet_cmd, + serial_port); + break; + case VTZ_GET_LOG: + (void)tlog_get_log((struct_packet_cmd_get_log *)packet_cmd, + serial_port); + default: + break; + } +} \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/tlogcat.h b/trustzone-awared-vm/Host/vtzb_proxy/tlogcat.h new file mode 100644 index 0000000000000000000000000000000000000000..f031dc886a9a266df6bbbbe7b97be8b4e78fb75f --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/tlogcat.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2012-2023. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef __TLOGCAT_H__ +#define __TLOGCAT_H__ +#include +#include "tc_ns_client.h" +#include "tee_sys_log.h" +#include "tee_client_list.h" +#include "serial_port.h" + +/* for tlog ioctl */ +/* LOG_BUFFER_LEN: The maximum transmission size for one serial communication is 2048 bytes. + * If the data size exceeds this limit, it may need to be sent in multiple segments. + * The receiving end might find it inconvenient to handle these segments individually. + */ +#define LOG_BUFFER_LEN 2000 +#define TEELOGGERIO 0xBE +#define GET_VERSION_BASE 5 +#define SET_READERPOS_CUR_BASE 6 +#define SET_TLOGCAT_STAT_BASE 7 +#define GET_TLOGCAT_STAT_BASE 8 +#define GET_TEE_INFO_BASE 9 +#define SET_VM_FLAG 10 +#define MAX_TEE_VERSION_LEN 256U +#define TEELOGGER_GET_VERSION \ + _IOR(TEELOGGERIO, GET_VERSION_BASE, char[MAX_TEE_VERSION_LEN]) +/* set the log reader pos to current pos */ +#define TEELOGGER_SET_READERPOS_CUR _IO(TEELOGGERIO, SET_READERPOS_CUR_BASE) +#define TEELOGGER_SET_TLOGCAT_STAT _IO(TEELOGGERIO, SET_TLOGCAT_STAT_BASE) +#define TEELOGGER_GET_TLOGCAT_STAT _IO(TEELOGGERIO, GET_TLOGCAT_STAT_BASE) +#define TEELOGGER_GET_TEE_INFO _IOR(TEELOGGERIO, GET_TEE_INFO_BASE, TC_NS_TEE_Info) +#define TEELOGGER_SET_VM_FLAG _IOR(TEELOGGERIO, SET_VM_FLAG, int) + +/* + * Structure related to log + */ +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_get_ver; + +#define VERSION_INFO_LEN 156U +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + unsigned char version_info[MAX_TEE_VERSION_LEN]; +} struct_packet_rsp_get_ver; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_set_reader_cur; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_set_reader_cur; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_set_tlogcat_stat; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_set_tlogcat_stat; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_get_tlogcat_stat; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_get_tlogcat_stat; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_get_log; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + int length; + char buffer[LOG_BUFFER_LEN]; +} struct_packet_rsp_get_log; + +void tlog(uint32_t cmd, void *packet_cmd, struct serial_port_file *serial_port); + +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/virt.c b/trustzone-awared-vm/Host/vtzb_proxy/virt.c new file mode 100644 index 0000000000000000000000000000000000000000..1fc9c95f965416bac81e028fad769097b06fe08e --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/virt.c @@ -0,0 +1,88 @@ +#include "virt.h" + +int safepoll(struct pollfd *fds, nfds_t nfds, int timeout) +{ + int ret; + + do { + ret = poll(fds, nfds, timeout); + } while (ret == -1 && errno == EINTR); + + if (ret == -1) + ret = -errno; + + return ret; +} + +ssize_t safewrite(int fd, const void *buf, size_t count, bool eagain_ret) +{ + ssize_t ret; + size_t len; + int flags; + bool nonblock; + + nonblock = false; + flags = fcntl(fd, F_GETFL); + if (flags > 0 && flags & O_NONBLOCK) + nonblock = true; + + len = count; + while (len > 0) { + ret = write(fd, buf, len); + if (ret == -1) { + if (errno == EINTR) + continue; + + if (errno == EAGAIN) { + if (nonblock && eagain_ret) { + return -EAGAIN; + } else { + continue; + } + } + return -errno; + } else if (ret == 0) { + break; + } else { + buf += ret; + len -= ret; + } + } + return count - len; +} + +ssize_t saferead(int fd, void *buf, size_t count, bool eagain_ret) +{ + size_t ret, len; + int flags; + bool nonblock; + + nonblock = false; + flags = fcntl(fd, F_GETFL); + if (flags > 0 && flags & O_NONBLOCK) + nonblock = true; + + len = count; + while (len > 0) { + ret = read(fd, buf, len); + if ((int)ret == -1) { + if (errno == EINTR) + continue; + + if (errno == EAGAIN) { + if (nonblock && eagain_ret) { + return -EAGAIN; + } else { + continue; + } + } + return -errno; + } else if (ret == 0) { + break; + } else { + buf += ret; + len -= ret; + } + } + return count - len; +} \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/virt.h b/trustzone-awared-vm/Host/vtzb_proxy/virt.h new file mode 100644 index 0000000000000000000000000000000000000000..386ee1a51b06bd739b3fba83638f58b77d50e2d2 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/virt.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2012-2023. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef VTZB_VIRT_H +#define VTZB_VIRT_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int safepoll(struct pollfd *fds, nfds_t nfds, int timeout); +ssize_t safewrite(int fd, const void *buf, size_t count, bool eagain_ret); +ssize_t saferead(int fd, void *buf, size_t count, bool eagain_ret); + +#endif // VTZB_VIRT_H \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/vm.c b/trustzone-awared-vm/Host/vtzb_proxy/vm.c new file mode 100644 index 0000000000000000000000000000000000000000..41c0ab4dfc7fd72534860699b9a529bdb8558599 --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/vm.c @@ -0,0 +1,261 @@ +#include "errno.h" +#include "vm.h" +#include "thread_pool.h" +#include "comm_structs.h" +#include "serial_port.h" + +extern ThreadPool g_pool; +extern TimeOut g_time_out[THREAD_POOL_SIZE]; + +LIST_DECLARE(g_vm_list); +pthread_mutex_t g_mutex_vm = PTHREAD_MUTEX_INITIALIZER; + +void add_session_list(int ptzfd, struct vm_file *vm_fp, TC_NS_ClientContext *clicontext) +{ + struct fd_file *fd_p = NULL; + struct session *sessionp = NULL; + fd_p = find_fd_file(ptzfd, vm_fp); + if (!fd_p) + return; + sessionp = (struct session *)malloc(sizeof(struct session)); + if (!sessionp) + return; + sessionp->session_id = clicontext->session_id; + ListInit(&sessionp->head); + pthread_mutex_lock(&fd_p->session_lock); + ListInsertTail(&fd_p->session_head, &sessionp->head); + pthread_mutex_unlock(&fd_p->session_lock); +} + +void *Kill_useless_thread(void *args) +{ + pthread_t tid = (pthread_t)args; + int result = pthread_kill(tid, SIGUSR1); + if (result == 0) { + replenish_thread_pool(&g_pool, tid); + } + return NULL; +} + +static void try_kill_thread(struct session *sp) +{ + if (!sp) + return; + if (sp->thread_id != 0 && get_thread_session_id(&g_pool, sp->thread_id, sp->session_id)) { + thread_pool_submit(&g_pool, Kill_useless_thread, (void *)(sp->thread_id)); + } +} + +static void do_remove_session(unsigned int session_id, struct fd_file *fd_p) +{ + struct ListNode *ptr = NULL; + struct ListNode *n = NULL; + if (!fd_p) + return ; + pthread_mutex_lock(&fd_p->session_lock); + if (!LIST_EMPTY(&fd_p->session_head)) { + LIST_FOR_EACH_SAFE(ptr, n, &fd_p->session_head) { + struct session *sp = CONTAINER_OF(ptr, struct session, head); + if (sp->session_id == session_id) { + ListRemoveEntry(&(sp->head)); + try_kill_thread(sp); + free(sp); + } + } + } + pthread_mutex_unlock(&fd_p->session_lock); +} + +void remove_session(int ptzfd, int session_id, struct vm_file *vm_fp) +{ + struct fd_file *fd_p = NULL; + if (!vm_fp) + return; + fd_p = find_fd_file(ptzfd, vm_fp); + if (fd_p) { + do_remove_session(session_id, fd_p); + } +} + +struct fd_file *find_fd_file(int ptzfd, struct vm_file *vm_fp) +{ + struct ListNode *ptr = NULL; + struct fd_file *fd_p = NULL; + int bfind = 0; + if (!vm_fp) + return NULL; + pthread_mutex_lock(&vm_fp->fd_lock); + if (!LIST_EMPTY(&vm_fp->fds_head)) { + LIST_FOR_EACH(ptr, &vm_fp->fds_head) { + fd_p = CONTAINER_OF(ptr, struct fd_file, head); + if (fd_p->ptzfd == ptzfd) { + bfind = 1; + break; + } + } + } + pthread_mutex_unlock(&vm_fp->fd_lock); + if (bfind) + return fd_p; + return NULL; +} + +void add_fd_list(int fd, struct vm_file *vm_fp) +{ + struct fd_file *fd_p; + if (!vm_fp) + return; + fd_p = (struct fd_file *)malloc(sizeof(struct fd_file)); + if (!fd_p) + return; + fd_p->ptzfd = fd; + pthread_mutex_init(&fd_p->session_lock, NULL); + ListInit(&fd_p->session_head); + ListInit(&fd_p->head); + + pthread_mutex_lock(&vm_fp->fd_lock); + ListInsertTail(&vm_fp->fds_head, &fd_p->head); + pthread_mutex_unlock(&vm_fp->fd_lock); +} + +static void do_remove_fd(struct fd_file *fd_p) +{ + struct ListNode *ptr = NULL; + struct ListNode *n = NULL; + unsigned int session_id; + (void)session_id; + if (!fd_p) + return; + pthread_mutex_lock(&fd_p->session_lock); + if (!LIST_EMPTY(&fd_p->session_head)) { + LIST_FOR_EACH_SAFE(ptr, n, &fd_p->session_head) { + struct session *sp = CONTAINER_OF(ptr, struct session, head); + ListRemoveEntry(&(sp->head)); + try_kill_thread(sp); + free(sp); + } + } + pthread_mutex_unlock(&fd_p->session_lock); +} + +int remove_fd(int ptzfd, struct vm_file *vm_fp) +{ + if (!vm_fp) + return -EINVAL; + struct fd_file *fd_p = find_fd_file(ptzfd, vm_fp); + if (fd_p) { + pthread_mutex_lock(&vm_fp->fd_lock); + ListRemoveEntry(&fd_p->head); + pthread_mutex_unlock(&vm_fp->fd_lock); + do_remove_fd(fd_p); + free(fd_p); + return 0; + } + return -EINVAL; +} + +struct vm_file *create_vm_file(uint32_t vmid) +{ + bool isfind = false; + struct ListNode *ptr = NULL; + struct vm_file *tmp = NULL; + pthread_mutex_lock(&g_mutex_vm); + if (!LIST_EMPTY(&g_vm_list)) { + LIST_FOR_EACH(ptr, &g_vm_list) { + tmp = CONTAINER_OF(ptr, struct vm_file, head); + if (tmp->vmpid == vmid) { + isfind = true; + break; + } + } + } + + if (!isfind) { + tmp = (struct vm_file *)malloc(sizeof(struct vm_file)); + if (!tmp) { + tloge("Failed to allocate memory for vm_file\n"); + goto END; + } + pthread_mutex_init(&tmp->fd_lock, NULL); + pthread_mutex_init(&tmp->agents_lock, NULL); + pthread_mutex_init(&tmp->shrd_mem_lock, NULL); + ListInit(&tmp->head); + ListInit(&tmp->fds_head); + ListInit(&tmp->agents_head); + ListInit(&tmp->shrd_mem_head); + tmp->vmpid = vmid; + ListInsertTail(&g_vm_list, &tmp->head); + } +END: + pthread_mutex_unlock(&g_mutex_vm); + return tmp; +} + +int destroy_vm_file(struct vm_file *vm_file) +{ + int ret = 0; + struct ListNode *ptr = NULL; + struct ListNode *n = NULL; + struct fd_file *fd_p = NULL; + if (!vm_file) + return 0; + pthread_mutex_lock(&vm_file->fd_lock); + if (!LIST_EMPTY(&vm_file->fds_head)) { + LIST_FOR_EACH_SAFE(ptr, n, &vm_file->fds_head) { + fd_p = CONTAINER_OF(ptr, struct fd_file, head); + ListRemoveEntry(&fd_p->head); + do_remove_fd(fd_p); + free(fd_p); + } + } + pthread_mutex_unlock(&vm_file->fd_lock); + pthread_mutex_lock(&g_mutex_vm); + ListRemoveEntry(&(vm_file->head)); + free(vm_file); + pthread_mutex_unlock(&g_mutex_vm); + return ret; +} + +void kill_open_session_thd(TimeOut t_out) +{ + struct_packet_rsp_session packet_rsp; + pthread_t tid = t_out.tid; + packet_rsp.packet_size = sizeof(packet_rsp); + packet_rsp.seq_num = t_out.seq_num + 1; + packet_rsp.ret = -1; + thread_pool_submit(&g_pool, Kill_useless_thread, (void *)tid); + (void)send_to_vm(t_out.serial_port, &packet_rsp, sizeof(packet_rsp)); +} + +int set_start_time(pthread_t tid, int seq_num, + struct serial_port_file *serial_port) +{ + int i; + struct timeval cur_time; + gettimeofday(&cur_time, NULL); + pthread_mutex_lock(&g_pool.time_mutex); + for (i = 0; i < THREAD_POOL_SIZE; i++) { + if (g_time_out[i].flag == 0) { + g_time_out[i].flag = 1; + g_time_out[i].seq_num = seq_num; + g_time_out[i].start_time = cur_time.tv_sec; + g_time_out[i].tid = tid; + g_time_out[i].serial_port = serial_port; + break; + } + } + pthread_mutex_unlock(&g_pool.time_mutex); + return i; +} + +void remove_start_time(int i) +{ + if (i >= THREAD_POOL_SIZE) + return; + pthread_mutex_lock(&g_pool.time_mutex); + g_time_out[i].flag =0; + g_time_out[i].seq_num = 0; + g_time_out[i].start_time = 0; + g_time_out[i].tid = 0; + pthread_mutex_unlock(&g_pool.time_mutex); +} \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/vm.h b/trustzone-awared-vm/Host/vtzb_proxy/vm.h new file mode 100644 index 0000000000000000000000000000000000000000..77914d0e4a01fa7de0903e0020b4adbe0ca4361a --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/vm.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2012-2023. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef __VM_H__ +#define __VM_H__ +#include +#include +#include +#include "tc_ns_client.h" +#include "tee_sys_log.h" +#include "tee_client_list.h" +#include "debug.h" + +struct vm_file { + uint32_t vmpid; + int log_fd; + struct ListNode head; + pthread_mutex_t fd_lock; + struct ListNode fds_head; + pthread_mutex_t agents_lock; + struct ListNode agents_head; + pthread_mutex_t shrd_mem_lock; + struct ListNode shrd_mem_head; +}; + +struct fd_file { + int32_t ptzfd; + struct ListNode head; + pthread_mutex_t session_lock; + struct ListNode session_head; +}; + +struct session { + struct ListNode head; + unsigned int session_id; + pthread_t thread_id; +}; + +typedef struct { + int flag; + int start_time; + pthread_t tid; + int seq_num; + struct serial_port_file *serial_port; +} TimeOut; + +struct fd_file *find_fd_file(int ptzfd, struct vm_file *vm_fp); +int remove_fd(int ptzfd, struct vm_file *vm_fp); +void add_fd_list(int fd, struct vm_file *vm_fp); +void remove_session(int ptzfd, int session_id, struct vm_file *vm_fp); +void add_session_list(int ptzfd, struct vm_file *vm_fp, TC_NS_ClientContext *clicontext); +int destroy_vm_file(struct vm_file *vm_file); +struct vm_file *create_vm_file(uint32_t vmid); +void *Kill_useless_thread(void *args); +int set_start_time(pthread_t tid, int seq_num, struct serial_port_file *serial_port); +void remove_start_time(int i); +void kill_open_session_thd(TimeOut t_out); +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/vtzb_proxy.c b/trustzone-awared-vm/Host/vtzb_proxy/vtzb_proxy.c new file mode 100644 index 0000000000000000000000000000000000000000..212e22b380a1d7de4d85cd28fafecf283711642c --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/vtzb_proxy.c @@ -0,0 +1,702 @@ +#include "vtzb_proxy.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "securec.h" +#include "tc_ns_client.h" +#include "tee_client_list.h" +#include "comm_structs.h" +#include "virt.h" +#include "thread_pool.h" +#include "debug.h" +#include "agent.h" +#include "serial_port.h" +#include "process_data.h" +#include "tlogcat.h" + +ThreadPool g_pool = {0}; +extern int g_pollfd_len; +extern struct pollfd g_pollfd[SERIAL_PORT_NUM]; +extern struct serial_port_file *g_serial_array[SERIAL_PORT_NUM]; + +static void open_tzdriver(struct_packet_cmd_open_tzd *packet_cmd, + struct serial_port_file *serial_port) +{ + int fd = -1; + int ret; + struct_packet_rsp_open_tzd packet_rsp; + struct vm_file *vm_fp = NULL; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + packet_rsp.vmid = packet_cmd->vmid; + if (packet_cmd->flag == TLOG_DEV_THD_FLAG) { + if (!serial_port->vm_file || !serial_port->vm_file->log_fd) { + fd = open(TC_LOGGER_DEV_NAME, O_RDONLY); + ret = ioctl(fd, TEELOGGER_SET_VM_FLAG, packet_cmd->vmid); + } else { + fd = serial_port->vm_file->log_fd; + } + } else if(packet_cmd->flag == TLOG_DEV_FLAG) { + fd = open(TC_LOGGER_DEV_NAME, O_RDONLY); + ret = ioctl(fd, TEELOGGER_SET_VM_FLAG, packet_cmd->vmid); + } else{ + switch (packet_cmd->flag) + { + case TC_NS_CLIENT_DEV_FLAG: + fd = open(TC_NS_CLIENT_DEV_NAME, O_RDWR); + break; + case TC_PRIVATE_DEV_FLAG: + fd = open(TC_TEECD_PRIVATE_DEV_NAME, O_RDWR); + break; + case TC_CVM_DEV_FLAG: + fd = open(TC_NS_CVM_DEV_NAME, O_RDWR); + break; + default: + break; + } + if (fd != -1) + ret = ioctl(fd, TC_NS_CLIENT_IOCTL_SET_VM_FLAG, packet_cmd->vmid); + } + + packet_rsp.ptzfd = fd; + if (fd < 0) { + tloge("open tee client dev failed, fd is %d\n", fd); + packet_rsp.ret = fd; + goto END; + } + packet_rsp.ret = 0; + +END: + if (fd > 0) { + if (!serial_port->vm_file) { + vm_fp = create_vm_file(packet_cmd->vmid); + serial_port->vm_file = vm_fp; + } else { + vm_fp = serial_port->vm_file; + } + add_fd_list(fd, vm_fp); + if (packet_cmd->flag == TLOG_DEV_THD_FLAG) { + vm_fp->log_fd = fd; + } + } + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp) && fd > 0) { + remove_fd(fd, vm_fp); + (void)close(fd); + } +} + +static void close_tzdriver(struct_packet_cmd_close_tzd *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret = -1; + struct_packet_rsp_close_tzd packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + packet_rsp.ret = 0; + (void)ret; + if (!serial_port->vm_file) + return; + + if (packet_cmd->ptzfd > 2) { + free_agent_buf(packet_cmd->ptzfd, serial_port->vm_file); + if (remove_fd(packet_cmd->ptzfd, serial_port->vm_file) == 0) + ret = close(packet_cmd->ptzfd); + } + + if (send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)) != sizeof(packet_rsp)) + tloge("close ptzfd send to VM failed \n"); +} + +static void log_in_NonHidl(struct_packet_cmd_login_non *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret; + struct_packet_rsp_login packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + ret = ioctl(packet_cmd->ptzfd, TC_NS_CLIENT_IOCTL_LOGIN, NULL); + packet_rsp.ret = ret; + + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) + tloge("send to VM failed \n"); +} + +static void log_in(struct_packet_cmd_login *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret; + struct_packet_rsp_login packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + ret = ioctl(packet_cmd->ptzfd, TC_NS_CLIENT_IOCTL_LOGIN, packet_cmd->cert_buffer); + packet_rsp.ret = ret; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + } +} + +static void get_tee_ver(struct_packet_cmd_getteever *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret; + struct_packet_rsp_getteever packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + ret = ioctl(packet_cmd->ptzfd, TC_NS_CLIENT_IOCTL_GET_TEE_VERSION, &packet_rsp.tee_ver); + packet_rsp.ret = ret; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) + tloge("send to VM failed \n"); +} + +static void get_tee_info(struct_packet_cmd_getteeinfo *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret; + struct_packet_rsp_getteeinfo packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + if (packet_cmd->istlog) { + ret = ioctl(packet_cmd->ptzfd, TEELOGGER_GET_TEE_INFO, &packet_rsp.info); + } else{ + ret = ioctl(packet_cmd->ptzfd, TC_NS_CLIENT_IOCTL_GET_TEE_INFO, &packet_rsp.info); + } + packet_rsp.ret = ret; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) + tloge("send to VM failed \n"); +} + +static void sync_sys_time(struct_packet_cmd_synctime *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret; + struct_packet_rsp_synctime packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + ret = ioctl(packet_cmd->ptzfd, TC_NS_CLIENT_IOCTL_SYC_SYS_TIME, &packet_cmd->tcNsTime); + packet_rsp.ret = ret; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) + tloge("send to VM failed \n"); +} + +static void open_session(struct_packet_cmd_session *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret; + int index; + struct_packet_rsp_session packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + index = set_start_time(pthread_self(), packet_cmd->seq_num, serial_port); + ret = ioctl(packet_cmd->ptzfd, TC_NS_CLIENT_IOCTL_SES_OPEN_REQ, &packet_cmd->cliContext); + remove_start_time(index); + packet_rsp.ret = ret; + packet_rsp.cliContext = packet_cmd->cliContext; + if (ret == 0) + add_session_list(packet_cmd->ptzfd, serial_port->vm_file, &packet_rsp.cliContext); + + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) + tloge("send to VM failed \n"); +} + +static void close_session(struct_packet_cmd_session *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret; + struct_packet_rsp_general packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + if (!serial_port->vm_file) + return; + ret = ioctl(packet_cmd->ptzfd, TC_NS_CLIENT_IOCTL_SES_CLOSE_REQ, &packet_cmd->cliContext); + packet_rsp.ret = ret; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) + tloge("send to VM failed \n"); + remove_session(packet_cmd->ptzfd, packet_cmd->cliContext.session_id, serial_port->vm_file); +} + +static int process_address(struct_packet_cmd_send_cmd *packet_cmd, + ClientParam params[], struct vm_file *vm_fp) +{ + int index; + int icount = 0; + int ret = 0; + uint32_t paramTypes[TEEC_PARAM_NUM]; + uint64_t *vm_hvas = (uint64_t *)packet_cmd->cliContext.file_buffer; + uint32_t offset = sizeof(struct_packet_cmd_send_cmd); + for (index = 0; index < TEEC_PARAM_NUM; index++) { + paramTypes[index] = + TEEC_PARAM_TYPE_GET(packet_cmd->cliContext.paramTypes, index); + if (IS_PARTIAL_MEM(paramTypes[index])) { + void *vm_buffer = (void *)packet_cmd->addrs[index]; + bool b_found = false; + struct ListNode *ptr = NULL; + + params[index].memref.buf_size = packet_cmd->cliContext.params[index].memref.size_addr; + packet_cmd->cliContext.params[index].memref.size_addr = + (unsigned int)((uintptr_t)¶ms[index].memref.buf_size); + packet_cmd->cliContext.params[index].memref.size_h_addr = + (unsigned int)((uint64_t)¶ms[index].memref.buf_size >> H_OFFSET); + + pthread_mutex_lock(&vm_fp->shrd_mem_lock); + if (!LIST_EMPTY(&vm_fp->shrd_mem_head)) { + LIST_FOR_EACH(ptr, &vm_fp->shrd_mem_head) { + struct_shrd_mem *shrd_mem = + CONTAINER_OF(ptr, struct_shrd_mem, node); + if (shrd_mem->vm_buffer == vm_buffer) { + vm_hvas[index] = packet_cmd->cliContext.params[index].memref.buffer + | (uint64_t)packet_cmd->cliContext.params[index].memref.buffer_h_addr << H_OFFSET; + /* Switch to the user address corresponding to the mmap space on the host. */ + packet_cmd->cliContext.params[index].memref.buffer = + (unsigned int)(uintptr_t)shrd_mem->buffer; + packet_cmd->cliContext.params[index].memref.buffer_h_addr = + ((unsigned long long)(uintptr_t)shrd_mem->buffer) >> H_OFFSET; + icount++; + b_found = true; + break; + } + } + } + pthread_mutex_unlock(&vm_fp->shrd_mem_lock); + if (b_found == false) { + tloge("can't find mmap buffer %p \n", vm_buffer); + ret = -1; + return ret; + } + } else if (IS_TEMP_MEM(paramTypes[index])) { + params[index].memref.buf_size = packet_cmd->cliContext.params[index].memref.size_addr; + packet_cmd->cliContext.params[index].memref.size_addr = + (unsigned int)((uintptr_t)¶ms[index].memref.buf_size); + packet_cmd->cliContext.params[index].memref.size_h_addr = + (unsigned int)((uint64_t)¶ms[index].memref.buf_size >> H_OFFSET); + } else if (IS_VALUE_MEM(paramTypes[index])) { + params[index].value.val_a = packet_cmd->cliContext.params[index].value.a_addr; + params[index].value.val_b = packet_cmd->cliContext.params[index].value.b_addr; + + packet_cmd->cliContext.params[index].value.a_addr = + (unsigned int)(uintptr_t)¶ms[index].value.val_a; + packet_cmd->cliContext.params[index].value.a_h_addr = + (unsigned int)((uint64_t)¶ms[index].value.val_a >> H_OFFSET); + packet_cmd->cliContext.params[index].value.b_addr = + (unsigned int)(uintptr_t)¶ms[index].value.val_b; + packet_cmd->cliContext.params[index].value.b_h_addr = + (unsigned int)((uint64_t)¶ms[index].value.val_b >> H_OFFSET); + } else if(IS_SHARED_MEM(paramTypes[index])) { + uint32_t share_mem_size = packet_cmd->cliContext.params[index].memref.size_addr; + struct_page_block *page_block = (struct_page_block *)((char *)packet_cmd + offset); + uint32_t block_buf_size = packet_cmd->block_size[index]; + uint32_t tmp_buf_size = sizeof(struct_page_block) + packet_cmd->block_size[index]; + params[index].share.buf_size = tmp_buf_size; + offset += packet_cmd->block_size[index]; + void *tmp_buf = malloc(tmp_buf_size); + if (!tmp_buf) { + tloge("malloc failed \n"); + return -ENOMEM; + } + ((struct_page_block *)tmp_buf)->share.shared_mem_size = share_mem_size; + ((struct_page_block *)tmp_buf)->share.vm_page_size = packet_cmd->vm_page_size; + struct_page_block *block_buf = (struct_page_block *)((char *)tmp_buf + sizeof(struct_page_block)); + if (memcpy_s((void *)block_buf, block_buf_size, (void *)page_block, block_buf_size) != 0) { + tloge("memcpy_s failed \n"); + return -EFAULT; + } + params[index].share.buf = tmp_buf; + packet_cmd->cliContext.params[index].memref.buffer = (unsigned int)(uintptr_t)tmp_buf; + packet_cmd->cliContext.params[index].memref.buffer_h_addr = (unsigned int)((uint64_t)tmp_buf >> H_OFFSET); + packet_cmd->cliContext.params[index].memref.size_addr = (unsigned int)(uintptr_t)&(params[index].share.buf_size); + packet_cmd->cliContext.params[index].memref.size_h_addr = (unsigned int)((uint64_t)&(params[index].share.buf_size) >> H_OFFSET); + } + }// end for + if (icount ==0) { + packet_cmd->cliContext.file_buffer = NULL; + } + return ret; +} + +static void process_address_end(struct_packet_cmd_send_cmd *packet_cmd, ClientParam params[]) +{ + int index; + uint32_t paramTypes[TEEC_PARAM_NUM]; + + for (index = 0; index < TEEC_PARAM_NUM; index++) { + paramTypes[index] = + TEEC_PARAM_TYPE_GET(packet_cmd->cliContext.paramTypes, index); + if (IS_PARTIAL_MEM(paramTypes[index])) { + packet_cmd->cliContext.params[index].memref.size_addr = params[index].memref.buf_size; + } else if (IS_TEMP_MEM(paramTypes[index])) { + packet_cmd->cliContext.params[index].memref.size_addr = params[index].memref.buf_size; + } else if (IS_VALUE_MEM(paramTypes[index])) { + packet_cmd->cliContext.params[index].value.a_addr = params[index].value.val_a; + packet_cmd->cliContext.params[index].value.b_addr = params[index].value.val_b; + } else if(IS_SHARED_MEM(paramTypes[index])) { + if (params[index].share.buf) { + free(params[index].share.buf); + } + } + } +} + +static void do_set_thread_id(struct fd_file *fd_p, unsigned int session_id, int flag) +{ + struct ListNode *ptr = NULL; + if (!fd_p) + return ; + pthread_t current_thread; + current_thread = flag > 0 ? pthread_self() : 0; + pthread_mutex_lock(&fd_p->session_lock); + if (!LIST_EMPTY(&fd_p->session_head)) { + LIST_FOR_EACH(ptr, &fd_p->session_head) { + struct session *sp = CONTAINER_OF(ptr, struct session, head); + if (sp->session_id == session_id) { + sp->thread_id = current_thread; + break; + } + } + } + pthread_mutex_unlock(&fd_p->session_lock); + if (flag) + set_thread_session_id(&g_pool, pthread_self(), session_id); + else + set_thread_session_id(&g_pool, pthread_self(), 0); +} + +static void set_thread_id(int ptzfd, unsigned int session_id, int flag, struct vm_file *vm_fp) +{ + struct fd_file *fd_p; + if (!vm_fp) + return; + fd_p = find_fd_file(ptzfd, vm_fp); + if (fd_p) { + do_set_thread_id(fd_p, session_id, flag); + } +} + +static void send_cmd(struct_packet_cmd_send_cmd *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret = -1; + struct_packet_rsp_send_cmd packet_rsp; + ClientParam params[TEEC_PARAM_NUM]; + void *vm_hvas[TEEC_PARAM_NUM] = {0}; + if (!serial_port->vm_file) + return; + packet_cmd->cliContext.file_buffer = (char *)vm_hvas; + packet_cmd->cliContext.file_size = sizeof(void *) * TEEC_PARAM_NUM; + + if (!process_address(packet_cmd, params, serial_port->vm_file)) { + set_thread_id(packet_cmd->ptzfd, packet_cmd->cliContext.session_id, 1, serial_port->vm_file); + ret = ioctl(packet_cmd->ptzfd, TC_NS_CLIENT_IOCTL_SEND_CMD_REQ, &packet_cmd->cliContext); + + set_thread_id(packet_cmd->ptzfd, packet_cmd->cliContext.session_id, 0, serial_port->vm_file); + process_address_end(packet_cmd, params); + } + + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + packet_rsp.ret = ret; + packet_rsp.cliContext = packet_cmd->cliContext; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + } +} + +static void load_sec_file(struct_packet_cmd_load_sec *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret; + struct_packet_rsp_load_sec packet_rsp; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + ret = ioctl(packet_cmd->ptzfd, TC_NS_CLIENT_IOCTL_LOAD_APP_REQ, &packet_cmd->ioctlArg); + packet_rsp.packet_size = sizeof(packet_rsp); + packet_rsp.ret = ret; + packet_rsp.ioctlArg = packet_cmd->ioctlArg; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) + tloge("send to VM failed \n"); +} + +static void vtz_dommap(struct_packet_cmd_mmap *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret = 0; + struct_packet_rsp_mmap packet_rsp; + struct_shrd_mem *tmp = NULL; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + void *buffer = mmap(0, (unsigned long)packet_cmd->size, (PROT_READ | PROT_WRITE), MAP_SHARED, + packet_cmd->ptzfd, (long)(packet_cmd->offset * (uint32_t)PAGE_SIZE)); + if (buffer == MAP_FAILED) { + tloge("mmap failed\n"); + packet_rsp.ret = -ENOMEM; + goto END; + } + packet_rsp.ret = ret; + + tmp = (struct_shrd_mem *)malloc(sizeof(struct_shrd_mem)); + ListInit(&tmp->node); + tmp->buffer = buffer; + tmp->vm_buffer = (void *)packet_cmd->buffer; + tmp->buffer_size = (size_t)packet_cmd->size; + tmp->dev_fd = packet_cmd->ptzfd; + + pthread_mutex_lock(&serial_port->vm_file->shrd_mem_lock); + ListInsertTail(&(serial_port->vm_file->shrd_mem_head), &tmp->node); + pthread_mutex_unlock(&serial_port->vm_file->shrd_mem_lock); +END: + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + pthread_mutex_lock(&serial_port->vm_file->shrd_mem_lock); + ListRemoveEntry(&(tmp->node)); + pthread_mutex_unlock(&serial_port->vm_file->shrd_mem_lock); + (void)munmap(tmp->buffer, tmp->buffer_size); + free(tmp); + } +} + +static void vtz_dounmmap(struct_packet_cmd_mmap *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret = 0; + struct_packet_rsp_mmap packet_rsp; + void *buffer = NULL; + uint32_t buffer_size; + struct ListNode *ptr = NULL; + struct ListNode *n = NULL; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + pthread_mutex_lock(&(serial_port->vm_file->shrd_mem_lock)); + if (!LIST_EMPTY(&(serial_port->vm_file->shrd_mem_head))) { + LIST_FOR_EACH_SAFE(ptr, n, &(serial_port->vm_file->shrd_mem_head)) { + struct_shrd_mem *shrd_mem = + CONTAINER_OF(ptr, struct_shrd_mem, node); + if (shrd_mem->vm_buffer == (void *)packet_cmd->buffer) { + ListRemoveEntry(&(shrd_mem->node)); + buffer = shrd_mem->buffer; + buffer_size = shrd_mem->buffer_size; + free(shrd_mem); + } + } + } + pthread_mutex_unlock(&(serial_port->vm_file->shrd_mem_lock)); + if (buffer != NULL) { + ret = munmap(buffer, (size_t)buffer_size); + if (ret) { + tloge("Release SharedMemory failed, munmap error\n"); + } + } + packet_rsp.ret = ret; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + } +} + +static void vtz_mmap(struct_packet_cmd_mmap *packet_cmd, + struct serial_port_file *serial_port) +{ + if (packet_cmd->cmd == VTZ_MMAP) { + vtz_dommap(packet_cmd, serial_port); + } else { + vtz_dounmmap(packet_cmd, serial_port); + } +} + +static void vtz_nothing(struct_packet_cmd_nothing *packet_cmd, + struct serial_port_file *serial_port) +{ + int ret = 0; + struct_packet_rsp_nothing packet_rsp = {0}; + packet_rsp.seq_num = packet_cmd->seq_num + 1; + packet_rsp.packet_size = sizeof(packet_rsp); + packet_rsp.ret = 0; + ret = send_to_vm(serial_port, &packet_rsp, sizeof(packet_rsp)); + if (ret != sizeof(packet_rsp)) { + tloge("send to VM failed \n"); + } +} + +void *thread_entry(void *args) +{ + struct_packet_cmd_general *packet_general = NULL; + uint32_t ui32_cmd = 0; + uint64_t u64 = *(uint64_t *)(args); + struct serial_port_file *serial_port = (struct serial_port_file *)u64; + char *rd_buf = (char *)(args) + sizeof(uint64_t); + ui32_cmd = *(uint32_t *)(rd_buf + sizeof(uint32_t)); + + if (ui32_cmd == VTZ_OPEN_TZD) { + (void)open_tzdriver((struct_packet_cmd_open_tzd *)rd_buf, serial_port); + goto END; + } + + if (ui32_cmd == VTZ_NOTHING) { + (void)vtz_nothing((struct_packet_cmd_nothing *)rd_buf, serial_port); + goto END; + } + + packet_general = (struct_packet_cmd_general *)rd_buf; + if (!serial_port || !packet_general || + !find_fd_file(packet_general->ptzfd, serial_port->vm_file)) { + goto END; + } + + switch (ui32_cmd) + { + case VTZ_CLOSE_TZD: + (void)close_tzdriver((struct_packet_cmd_close_tzd *)rd_buf, serial_port); + break; + case VTZ_LOG_IN_NHIDL: + (void)log_in_NonHidl((struct_packet_cmd_login_non *)rd_buf, serial_port); + break; + case VTZ_GET_TEE_VERSION: + (void)get_tee_ver((struct_packet_cmd_getteever *)rd_buf, serial_port); + break; + case VTZ_GET_TEE_INFO: + (void)get_tee_info((struct_packet_cmd_getteeinfo *)rd_buf, serial_port); + break; + case VTZ_LATE_INIT: + break; + case VTZ_SYNC_TIME: + (void)sync_sys_time((struct_packet_cmd_synctime *)rd_buf, serial_port); + break; + case VTZ_LOG_IN: + (void)log_in((struct_packet_cmd_login *)rd_buf, serial_port); + break; + case VTZ_LOAD_SEC: + (void)load_sec_file((struct_packet_cmd_load_sec *)rd_buf, serial_port); + break; + case VTZ_OPEN_SESSION: + (void)open_session((struct_packet_cmd_session *)rd_buf, serial_port); + break; + case VTZ_CLOSE_SESSION: + (void)close_session((struct_packet_cmd_session *)rd_buf, serial_port); + break; + case VTZ_SEND_CMD: + (void)send_cmd((struct_packet_cmd_send_cmd *)rd_buf, serial_port); + break; + case VTZ_FS_REGISTER_AGENT: + (void)register_agent((struct_packet_cmd_regagent *)rd_buf, serial_port); + break; + case VTZ_WAIT_EVENT: + (void)wait_event((struct_packet_cmd_event *)rd_buf, serial_port); + break; + case VTZ_SEND_EVENT_RESPONSE: + (void)sent_event_response((struct_packet_cmd_event *)rd_buf, serial_port); + break; + case VTZ_MMAP: + case VTZ_MUNMAP: + (void)vtz_mmap((struct_packet_cmd_mmap *)rd_buf, serial_port); + break; + case VTZ_GET_TEEOS_VER: + case VTZ_SET_READER_CUR: + case VTZ_SET_TLOGCAT_STAT: + case VTZ_GET_TLOGCAT_STAT: + case VTZ_GET_LOG: + (void)tlog(ui32_cmd, (void *)rd_buf, serial_port); + break; + default: + break; + } + +END: + if (args) + free(args); + return NULL; +} + +void proc_event(struct serial_port_file *serial_port) +{ + int ret; + int offset = 0; + int buf_len; + int fd; + if (!serial_port || !serial_port->rd_buf || serial_port->sock <= 0){ + tloge("serial_port ptr or rd_buf is NULL!\n"); + return; + } + fd = serial_port->sock; + ret = read(fd, serial_port->rd_buf + serial_port->offset, BUF_LEN_MAX_RD - serial_port->offset); + + if (ret < 0) { + tloge("read domain socket failed \n"); + return; + } + if (ret == 0) + return; + buf_len = ret + serial_port->offset; + while(1) { + void *packet = NULL; + packet = get_packet_item(serial_port->rd_buf, buf_len, &offset); + if (packet == NULL) + break; + *(uint64_t *)(packet) = (uint64_t)serial_port; + thread_pool_submit(&g_pool, thread_entry, (void *)((uint64_t)packet)); + } + serial_port->offset = offset; +} + +int main() { + int ret = 0; + int i; + serial_port_list_init(); + if (thread_pool_init(&g_pool)) + goto END2; + if (check_stat_serial_port_first()) + goto END1; + + while (1) { + check_stat_serial_port(); + ret = safepoll(g_pollfd, g_pollfd_len, 20*1000); + if (ret == -1) { + tloge("pollfd failed, ret = %d \n", ret); + return -1; + } + if (ret == 0) { + continue; + } + + for (i = 0; i < g_pollfd_len; i++) { + if (g_pollfd[i].revents & POLLHUP || + g_pollfd[i].revents & POLLERR || + g_pollfd[i].revents & POLLNVAL) { + sleep(CHECK_TIME_SEC); + continue; + } + + if (g_pollfd[i].revents & POLLIN) { + proc_event(g_serial_array[i]); + } + } + } + +END1: + thread_pool_destroy(&g_pool); +END2: + serial_port_list_destroy(); + return 0; +} \ No newline at end of file diff --git a/trustzone-awared-vm/Host/vtzb_proxy/vtzb_proxy.h b/trustzone-awared-vm/Host/vtzb_proxy/vtzb_proxy.h new file mode 100644 index 0000000000000000000000000000000000000000..3cb0046ed46d66a2fd4b5db24a5ca43cba79d76d --- /dev/null +++ b/trustzone-awared-vm/Host/vtzb_proxy/vtzb_proxy.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2012-2023. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef __VTZB_PROXY_H__ +#define __VTZB_PROXY_H__ + +#include +#include +#include "tc_ns_client.h" +#include "tee_sys_log.h" +#include "tee_client_list.h" + +#define TC_LOGGER_DEV_NAME "/dev/teelog" +#define H_OFFSET 32 + +#define VTZB_RSP_UNKOWN 0xfffffffe + +#define TEEC_PARAM_TYPE_GET(paramTypes, index) \ + (((paramTypes) >> (4 * (index))) & 0x0F) + +#define IS_TEMP_MEM(paramType) \ + (((paramType) == TEEC_MEMREF_TEMP_INPUT) || ((paramType) == TEEC_MEMREF_TEMP_OUTPUT) || \ + ((paramType) == TEEC_MEMREF_TEMP_INOUT)) + +#define IS_PARTIAL_MEM(paramType) \ + (((paramType) == TEEC_MEMREF_WHOLE) || ((paramType) == TEEC_MEMREF_PARTIAL_INPUT) || \ + ((paramType) == TEEC_MEMREF_PARTIAL_OUTPUT) || ((paramType) == TEEC_MEMREF_PARTIAL_INOUT)) + +#define IS_VALUE_MEM(paramType) \ + (((paramType) == TEEC_VALUE_INPUT) || ((paramType) == TEEC_VALUE_OUTPUT) || ((paramType) == TEEC_VALUE_INOUT)) + +#define IS_SHARED_MEM(paramType) \ + ((paramType) == TEEC_MEMREF_SHARED_INOUT) + +#define PAGE_SIZE getpagesize() + +typedef union { + struct { + uint32_t buf_size; + } memref; + struct { + uint32_t val_a; + uint32_t val_b; + } value; + struct { + void *buf; + uint32_t buf_size; + } share; +} ClientParam; + +typedef struct { + void *vm_buffer; + void *buffer; + uint32_t buffer_size; + int32_t dev_fd; + struct ListNode node; +} struct_shrd_mem; + +typedef union { + struct{ + uint64_t user_addr; + uint64_t page_num; + }block; + struct{ + uint64_t vm_page_size; + uint64_t shared_mem_size; + }share; +}struct_page_block; + +#endif /* __VTZB_PROXY_H__ */ \ No newline at end of file diff --git a/trustzone-awared-vm/README.md b/trustzone-awared-vm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cdf2e2810e185e70b63412b41687af55fff6e898 --- /dev/null +++ b/trustzone-awared-vm/README.md @@ -0,0 +1,26 @@ +# trustzone-awared-vm + +#### 介绍 + +项目(trustzone-awared-vm)旨在通过各种手段,使得REE侧的CA可以在虚拟化场景下使用TrustZone。 + +#### 软件架构 + +trustzone-awared-vm架构 + +本项目借助qemu虚拟串口 virtserial、充分利用内存拷贝与内存共享,构建TrustZone感知的机密虚拟机,其整体架构如图 所示。构建vtzdriver,提供与tzdriver相同的接口供上层应用和库调用。利用qemu提供的virtserial,在VM侧创建字符设备,在Host侧创建socket,连通VM与Host。vtz_proxy接受识别由vtzdriver转发的tzdriver调用,识别后调用tzdriver对应接口。调用结果由vtz_proxy、qemu、vtzdriver返回给上层应用。从而实现在VM中使用TEE的体验与本地Host上无差异。 + +#### 安装教程 + +参考部署文档。 + +#### 使用说明 + +1. 本项目不包含其它开源项目的代码,涉及的第三方开源组件均需要使用者自行获取。 + +#### 参与贡献 + +#### 特技 + +1. 项目提供接口与原接口完全适配,用户无需修改应用 +2. 支持虚拟机使用switchless特性 \ No newline at end of file diff --git a/trustzone-awared-vm/RemoteAttestation/qca_client_demo/Makefile b/trustzone-awared-vm/RemoteAttestation/qca_client_demo/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..982377ffc8528c48b22350f2b90bb20b51fc79b1 --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_client_demo/Makefile @@ -0,0 +1,19 @@ +CUR_DIR=$(shell pwd) + +TARGET_APP := client + +APP_SOURCES := ./client.c \ + ./cJSON/cJSON.c \ + +APP_CFLAGS += -fstack-protector-strong + +APP_CFLAGS += -I$(CUR_DIR)/./cJSON + +APP_OBJECTS := $(APP_SOURCES:.c=.o) +$(TARGET_APP): $(APP_SOURCES) + @$(CC) $(APP_CFLAGS) -o $@ $(APP_SOURCES) $(APP_LDFLAGS) + +clean: + rm -f *.o $(TARGET_APP) + + diff --git a/trustzone-awared-vm/RemoteAttestation/qca_client_demo/client.c b/trustzone-awared-vm/RemoteAttestation/qca_client_demo/client.c new file mode 100644 index 0000000000000000000000000000000000000000..9ecbd3207d8b716cf34e37515745ef116d3e7800 --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_client_demo/client.c @@ -0,0 +1,170 @@ +/* File Name: client.c */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cJSON.h" + +#define DEFAULT_PORT 8000 + +char g_uuid[37] = {0}; +int g_nsid = 0; + +char *create_cJson() { + + cJSON *root = cJSON_CreateObject(); + cJSON_AddStringToObject(root, "Handler", "report-input"); + cJSON *payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "Version", "TEE.RA.1.0"); + cJSON_AddStringToObject(payload, "Nonce", "Vu7DjhtjRTgUsmWUFu8qKcUkObFkJ8TIXrHDRnqU8tv81zmbKmxNDWPrXhs4xDqDkK48fR5ml7pgjsKxM1Yuew"); + //cJSON_AddStringToObject(payload, "Uuid", "e3d37f4a-f24c-48d0-8884-3bdd6c44e988"); + cJSON_AddStringToObject(payload, "Uuid", g_uuid); + cJSON_AddStringToObject(payload, "Hash_alg", "HS256"); + cJSON_AddBoolToObject(payload, "With_tcb", false); + cJSON_AddNullToObject(payload, "Daa_bsn"); + cJSON_AddItemToObject(root, "Payload", payload); + + char *json_string = cJSON_Print(root); + cJSON_Delete(root); + return json_string; +} + +typedef struct { + int packet_size; + int cmd; + int nsid; + char data[]; +} packet_cmd_client; + +typedef struct { + int packet_size; + int cmd; + int rsp; + int ret; + char data[]; +} packet_rsp_client; + +#define CMD_CAN_NOT_CM_VM 0xff001100 +#define CMD_OK 0xff000000 + +int get_report(int sockfd) +{ + int ret = 0; + int recv_size; + int buf_size = 0x4000 + sizeof(packet_rsp_client); + packet_cmd_client *packet_cmd; + packet_rsp_client *packet_rsp; + char *tmp_str = create_cJson(); + printf("tmp_str = %s\n", tmp_str); + if (!tmp_str) { + printf("create_cJson err"); + return -1; + } + packet_cmd = malloc(sizeof(*packet_cmd) + strlen(tmp_str)); + if (!packet_cmd) { + printf("malloc failed\n"); + free(tmp_str); + return -1; + } + memcpy(packet_cmd->data, tmp_str, strlen(tmp_str)); + packet_cmd->cmd = 1; + packet_cmd->nsid = g_nsid; + packet_cmd->packet_size = sizeof(*packet_cmd) + strlen(tmp_str); + + packet_rsp = malloc(buf_size); + if (!packet_rsp) { + printf("malloc failed\n"); + free(tmp_str); + free(packet_cmd); + return -1; + } + + send(sockfd, packet_cmd, packet_cmd->packet_size, 0); + printf("after send to server\n"); + free(tmp_str); + free(packet_cmd); + + /*接收*/ + recv_size = recv(sockfd, packet_rsp, buf_size, 0); + if (packet_rsp->ret == 0 && packet_rsp->rsp == CMD_OK) { + printf("get report success\n"); + printf("report = %s \n", packet_rsp->data); + } else if(packet_rsp->rsp == CMD_CAN_NOT_CM_VM) { + printf("VM Remote Proof Service is not running\n"); + } else { + printf("get report failed\n"); + } +} + +int get_nsid_uuid() +{ + FILE *file = fopen("data.txt", "r"); + + if (file == NULL) { + printf("open file failed\n"); + return -1; + } + + if (fscanf(file, "%36s", g_uuid) != 1) { + printf("cant't read UUID\n"); + fclose(file); + return -1; + } + + if (fscanf(file, "%d", &g_nsid) != 1) { + printf("cant't read nsid\n"); + fclose(file); + return -1; + } + + fclose(file); + + printf("g_UUID: %s\n", g_uuid); + printf("g_nsid: %d\n", g_nsid); + + return 0; +} + +int main(int argc, char **argv) +{ + int sockfd; + struct sockaddr_in servaddr; + + if (get_nsid_uuid()) { + return 0; + } + if (argc != 2) { + printf("usage: ./client \n"); + exit(0); + } + + if ((sockfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + printf("create socket error: %s(errno: %d)\n", strerror(errno), errno); + exit(0); + } + + memset(&servaddr, 0, sizeof(servaddr)); + servaddr.sin_family = AF_INET; + servaddr.sin_port = htons(DEFAULT_PORT); + if (inet_pton(AF_INET, argv[1], &servaddr.sin_addr) <= 0) { + printf("inet_pton error for %s\n", argv[1]); + exit(0); + } + + if (connect(sockfd, (struct sockaddr *)&servaddr, sizeof(servaddr)) < 0) { + printf("connect error: %s(errno: %d)\n", strerror(errno), errno); + exit(0); + } + + get_report(sockfd); + + close(sockfd); + exit(0); +} diff --git a/trustzone-awared-vm/RemoteAttestation/qca_client_demo/data.txt b/trustzone-awared-vm/RemoteAttestation/qca_client_demo/data.txt new file mode 100644 index 0000000000000000000000000000000000000000..b9986b93db368d170a586db334b9dc65d9a51d77 --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_client_demo/data.txt @@ -0,0 +1,3 @@ +e3d37f4a-f24c-48d0-8884-3bdd6c44e988 +4469 + diff --git a/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/b64.c b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/b64.c new file mode 100644 index 0000000000000000000000000000000000000000..f3168c0964d3a434f1f6235f8eb1104675ce0054 --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/b64.c @@ -0,0 +1,99 @@ +#include +#include +#include +#include + +#include "b64.h" + +void build_decoding_table() { + + decoding_table = malloc(256); + + for (int i = 0; i < 64; i++) + decoding_table[(unsigned char) encoding_table[i]] = i; +} + + +void base64_cleanup() { + free(decoding_table); +} + +char *base64_encode(const unsigned char *data, + size_t input_length, + size_t *output_length) { + + *output_length = 4 * ((input_length + 2) / 3); + + char *encoded_data = malloc(*output_length); + if (encoded_data == NULL) return NULL; + + for (int i = 0, j = 0; i < input_length;) { + + uint32_t octet_a = i < input_length ? (unsigned char)data[i++] : 0; + uint32_t octet_b = i < input_length ? (unsigned char)data[i++] : 0; + uint32_t octet_c = i < input_length ? (unsigned char)data[i++] : 0; + + uint32_t triple = (octet_a << 0x10) + (octet_b << 0x08) + octet_c; + + encoded_data[j++] = encoding_table[(triple >> 3 * 6) & 0x3F]; + encoded_data[j++] = encoding_table[(triple >> 2 * 6) & 0x3F]; + encoded_data[j++] = encoding_table[(triple >> 1 * 6) & 0x3F]; + encoded_data[j++] = encoding_table[(triple >> 0 * 6) & 0x3F]; + } + + for (int i = 0; i < mod_table[input_length % 3]; i++) + encoded_data[*output_length - 1 - i] = '='; + + return encoded_data; +} + + +unsigned char *base64_decode(const char *data, + size_t input_length, + size_t *output_length) { + + if (decoding_table == NULL) build_decoding_table(); + + if (input_length % 4 != 0) return NULL; + + *output_length = input_length / 4 * 3; + if (data[input_length - 1] == '=') (*output_length)--; + if (data[input_length - 2] == '=') (*output_length)--; + + unsigned char *decoded_data = malloc(*output_length); + if (decoded_data == NULL) return NULL; + + for (int i = 0, j = 0; i < input_length;) { + + uint32_t sextet_a = data[i] == '=' ? 0 & i++ : decoding_table[data[i++]]; + uint32_t sextet_b = data[i] == '=' ? 0 & i++ : decoding_table[data[i++]]; + uint32_t sextet_c = data[i] == '=' ? 0 & i++ : decoding_table[data[i++]]; + uint32_t sextet_d = data[i] == '=' ? 0 & i++ : decoding_table[data[i++]]; + + uint32_t triple = (sextet_a << 3 * 6) + + (sextet_b << 2 * 6) + + (sextet_c << 1 * 6) + + (sextet_d << 0 * 6); + + if (j < *output_length) decoded_data[j++] = (triple >> 2 * 8) & 0xFF; + if (j < *output_length) decoded_data[j++] = (triple >> 1 * 8) & 0xFF; + if (j < *output_length) decoded_data[j++] = (triple >> 0 * 8) & 0xFF; + } + + return decoded_data; +} + +/* +int main(){ + + char * data = "Hello World!"; + long input_size = strlen(data); + char * encoded_data = base64_encode(data, input_size, &input_size); + printf("Encoded Data is: %s \n",encoded_data); + + long decode_size = strlen(encoded_data); + char * decoded_data = base64_decode(encoded_data, decode_size, &decode_size); + printf("Decoded Data is: %s \n",decoded_data); + exit(0); +} +*/ diff --git a/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/b64.h b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/b64.h new file mode 100644 index 0000000000000000000000000000000000000000..4beb085e52358fe9ad3b9d02b0d5a310e1f97188 --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/b64.h @@ -0,0 +1,27 @@ +#ifndef B64_H +#define B64_H + +static char encoding_table[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', + 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', + 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', + 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', + 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', + 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', '0', '1', '2', '3', + '4', '5', '6', '7', '8', '9', '+', '/'}; +static char *decoding_table = NULL; +static int mod_table[] = {0, 2, 1}; + +void build_decoding_table(); +void base64_cleanup(); + +char *base64_encode(const unsigned char *data, + size_t input_length, + size_t *output_length); +unsigned char *base64_decode(const char *data, + size_t input_length, + size_t *output_length); + + + +#endif // B64_H diff --git a/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/cloud/Makefile b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/cloud/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9cf7df6e58514ade3928f5da1611433dd923b675 --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/cloud/Makefile @@ -0,0 +1,34 @@ +CUR_DIR=$(shell pwd) +iTrustee_SDK_PATH=${CUR_DIR}/../../../../ + +TARGET_APP := qca_guest + +APP_SOURCES := ../qca_guest.c \ + ../cJSON/cJSON.c \ + ../sha256.c \ + ../b64.c \ + +APP_SOURCES += $(iTrustee_SDK_PATH)/src/CA/cloud/libteec_adaptor.c + +APP_CFLAGS += -fstack-protector-strong -fPIC + +APP_CFLAGS += -I$(CUR_DIR)/../cJSON + +APP_CFLAGS += -I$(iTrustee_SDK_PATH)/include/CA -I$(iTrustee_SDK_PATH)/thirdparty/open_source/libboundscheck/include + +APP_CFLAGS += -DHOST_QCA + +APP_LDFLAGS += -ldl -lpthread + +APP_LDFLAGS += -L/usr/lib64 -lqca + +APP_LDFLAGS += -z text -z now -z relro -z noexecstack -pie + +APP_OBJECTS := $(APP_SOURCES:.c=.o) +$(TARGET_APP): $(APP_SOURCES) + @$(CC) $(APP_CFLAGS) -o $@ $(APP_SOURCES) $(APP_LDFLAGS) + +clean: + rm -f *.o $(TARGET_APP) + + diff --git a/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/qca_guest.c b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/qca_guest.c new file mode 100644 index 0000000000000000000000000000000000000000..536e54e92d8a980742df3f9a927d28a8c8f9c0e7 --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/qca_guest.c @@ -0,0 +1,450 @@ +/* + * File Name: qca_guest_demo.c + * */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "ra_client_api.h" + +#include "tee_client_api.h" +#include "securec.h" + +#include "cJSON.h" +#include "sha256.h" +#include "b64.h" + +#define DEFAULT_PORT 8000 +#define MAXLINE 4096 + +#define SHAREMEM_LIMIT (0x100000) /* 1 MB */ +#define PARAMS_RESERVED_SIZE (0x2000) +#define OUT_DATA_RESERVED_SIZE (0x3000) +#define REMOTE_ATTEST_CMD (0x1001) + +#define USER_DATA_SIZE 64 +#define NODE_LEN 8 +#define VERSION_SIZE 11 +#define TS_SIZE 22 +#define UUID_SIZE 16 +#define HASH_SIZE 32 + +unsigned char g_hexstr_hash_nsid[SHA256_BLOCK_SIZE * 2 + 1]; +int g_nsid; + +typedef struct +{ + uint32_t size; + uint8_t *buf; +} buffer_data; + +typedef struct +{ + uint8_t version[VERSION_SIZE]; + uint8_t timestamp[TS_SIZE]; + uint8_t nonce[USER_DATA_SIZE]; + uint8_t uuid[UUID_SIZE]; + uint32_t scenario; + uint32_t sig_alg; // Signature algorithm type + uint32_t hash_alg; // Hash algorithm type + uint8_t image_hash[HASH_SIZE]; + uint8_t hash[HASH_SIZE]; + uint8_t reserve[HASH_SIZE]; + // uint8_t signature[SIG_SIZE]; + // uint8_t cert[CERT_SIZE]; //AK cert + buffer_data *signature; + buffer_data *cert; +} TA_report; + +#define MAXSIZE 1000 +#define DATABUFMIN 100 +#define DATABUFMAX 20000 + +#define DEBUG 1 + +#ifdef DEBUG +static void debug(const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vfprintf(stderr, fmt, args); + va_end(args); +} + +#define PRINTF_SIZE 16 +static void dump_buff(const char *buffer, size_t bufLen) +{ + size_t i; + if (buffer == NULL || bufLen == 0) + { + return; + } + + // printf("\n--------------------------------------------------\n"); + printf("--------------------------------------------------\n"); + printf("buf_len = %d\n", (int)bufLen); + for (i = 0; i < bufLen; i++) + { + if (i % PRINTF_SIZE == 0 && i != 0) + { + printf("\n"); + } + printf("%02x ", *(buffer + i)); + } + printf("\n--------------------------------------------------\n"); + return; +} +#else +#define debug(fmt, ...) \ + do \ + { \ + } while (0) +#define dump_buff(buffer, bufLen) \ + do \ + { \ + } while (0) +#endif + +int get_nsid_and_hash() +{ + FILE *fp; + char str_nsid[100] = {0}; + char container_id[256] = {0}; + SHA256_CTX ctx; + BYTE hash_nsid[SHA256_BLOCK_SIZE]; + int i; + + fp = fopen("/tmp/qca_nsid", "r"); + memset(str_nsid, 0, sizeof(str_nsid)); + fgets(str_nsid, sizeof(str_nsid), fp); + fclose(fp); + + g_nsid = atoi(str_nsid); + + sha256_init(&ctx); + sha256_update(&ctx, str_nsid, strlen(str_nsid) - 1); + sha256_final(&ctx, hash_nsid); + + memset(g_hexstr_hash_nsid, 0, SHA256_BLOCK_SIZE * 2 + 1); + for (i = 0; i < SHA256_BLOCK_SIZE; i++) + { + sprintf(g_hexstr_hash_nsid + i * 2, "%02x", hash_nsid[i]); + } + printf("guest vm nsid str (len = %d): %s", strlen(str_nsid), str_nsid); + printf("guest vm nsid int: %d \n", g_nsid); + printf("guest vm nsid str sha256 hex str: %s \n", g_hexstr_hash_nsid); + if (!g_nsid) + return -1; + return 0; +} + +char *Convert(buffer_data *data) +{ + // determine whether the buffer is legal + if (data == NULL) { + printf("illegal buffer data pointer."); + return NULL; + } + + if (data->size > DATABUFMAX || data->size < DATABUFMIN) { + printf("size of buffer is illegal."); + return NULL; + } + + cJSON *cj = cJSON_ParseWithLength(data->buf, data->size); + if (cj == NULL) { + printf("cjson parse report error."); + return NULL; + } + + char *json_string = cJSON_Print(cj); + //printf("out_data = %s\n", json_string); + cJSON_Delete(cj); + + return json_string; +} + +char *create_VM_cJson() +{ + cJSON *root = cJSON_CreateObject(); + + cJSON_AddStringToObject(root, "Handler", "report-input"); + + cJSON *payload = cJSON_CreateObject(); + + cJSON_AddStringToObject(payload, "Version", "TEE.RA.1.0"); + + cJSON_AddStringToObject(payload, "Nonce", "4JeF994WNGepoFyvu-6hYqj0VT6kixdh82huGh2D19wm_Mjj1jZdzHxUYJHw_j0ZlQjBpRqxpVJJxGMFaO2aIQ"); + cJSON_AddStringToObject(payload, "Uuid", "e3d37f4a-f24c-48d0-8884-3bdd6c44e988"); + + cJSON_AddStringToObject(payload, "Hash_alg", "HS256"); + cJSON_AddBoolToObject(payload, "With_tcb", false); + cJSON_AddNullToObject(payload, "Daa_bsn"); + + cJSON *container_info = cJSON_CreateObject(); + //cJSON_AddStringToObject(container_info, "id", "aBcDeFgHiJkLmNoPqRsTuVwXyZ0123456789abcdefghijklmnopqrstuvwxyz01"); + cJSON_AddStringToObject(container_info, "id", g_hexstr_hash_nsid); + cJSON_AddStringToObject(container_info, "type", "docker"); + + cJSON_AddItemToObject(payload, "container_info", container_info); + + cJSON_AddItemToObject(root, "Payload", payload); + + char *json_string = cJSON_Print(root); + + cJSON_Delete(root); + + return json_string; +} + +char *addFieldToPayload(char *buf) +{ + //printf("buf = %s\n", buf); + cJSON *root = cJSON_Parse(buf); + if (root == NULL) { + printf("Error parsing JSON\n"); + return NULL; + } + cJSON *payload = cJSON_GetObjectItem(root, "payload"); + if (payload != NULL && cJSON_IsObject(payload)) { + cJSON *container_info = cJSON_CreateObject(); + cJSON_AddStringToObject(container_info, "id", g_hexstr_hash_nsid); + cJSON_AddStringToObject(container_info, "type", "docker"); + cJSON_AddItemToObject(payload, "container_info", container_info); + //cJSON_AddItemToObject(root, "Payload", payload); + char *newJsonStr = cJSON_Print(root); + printf("Modified JSON: %s\n", newJsonStr); + + cJSON_Delete(root); + + return newJsonStr; + } else { + printf("Error: 'payload' node not found or not an object\n"); + cJSON_Delete(root); + return NULL; + } + +} + +char *get_report(char *buf) +{ + char *tmp_str = NULL; + struct ra_buffer_data in; + struct ra_buffer_data out; + //char *in_buf = create_VM_cJson(); + if (!buf) { + printf("in_buf is NULL\n"); + return NULL; + } + char *in_buf = addFieldToPayload(buf); + if (!in_buf) + return NULL; + in.buf = in_buf; + in.size = strlen(in_buf); + //printf("in_buf = %s \n", in.buf); + out.buf = malloc(0x4000); + if (out.buf == NULL) { + printf("malloc err\n"); + free(in_buf); + return NULL; + } + out.size = 0x4000; + if (out.size > SHAREMEM_LIMIT || (out.buf == NULL && out.size > 0) || + (out.buf != NULL && out.size < 0x3000)) { + printf("check output failed\n"); + goto END; + } + + TEEC_Result result = RemoteAttest(&in, &out); + + if (result != TEEC_SUCCESS) { + printf("RemoteAttest error\n"); + goto END; + } + printf("ger report result = %d\n", result); + tmp_str = Convert((buffer_data *)&out); +END: + free(in_buf); + free(out.buf); + return tmp_str; +} + +char *create_reg_cJson(char *str_hash_nsid, int insid) +{ + cJSON *root = cJSON_CreateObject(); + + cJSON_AddStringToObject(root, "container_id", str_hash_nsid); + cJSON_AddNumberToObject(root, "nsid", insid); + char *json_string = cJSON_Print(root); + cJSON *json_nsid = cJSON_GetObjectItem(root, "nsid"); + if (json_nsid == NULL){ + printf("err\n"); + } + uint32_t nsid = cJSON_GetNumberValue(json_nsid); + printf("nsid =%d\n", nsid); + cJSON_Delete(root); + return json_string; +} + +#define CMD_OK 0xff000000 +#define CMD_REGISTER_VM 0xff110011 +#define CMD_SEND_REPORT 0xff110012 + +typedef struct { + int packet_size; + int cmd; + int nsid; + char data[]; +} struct_packet_cmd_register; + +typedef struct { + int packet_size; + int rsp; + int ret; +} struct_packet_rsp_register; + +int register_vm(int s) +{ + int ret = 0; + struct_packet_cmd_register *packet_cmd; + struct_packet_rsp_register packet_rsp = {0}; + char *tmp_str = NULL; + tmp_str = create_reg_cJson(g_hexstr_hash_nsid, g_nsid); + if (!tmp_str) { + printf("err\n"); + return -1; + } + + packet_cmd = malloc(sizeof(*packet_cmd) + strlen(tmp_str)); + if (!packet_cmd) { + printf("ENOMEM\n"); + free(tmp_str); + return -1; + } + packet_cmd->cmd = CMD_REGISTER_VM; + packet_cmd->nsid = g_nsid; + packet_cmd->packet_size = sizeof(*packet_cmd) + strlen(tmp_str); + memcpy(packet_cmd->data, tmp_str, strlen(tmp_str)); + free(tmp_str); + + send(s, packet_cmd, packet_cmd->packet_size, 0); + + free(packet_cmd); + + (void)recv(s, &packet_rsp, sizeof(packet_rsp), 0); + if (packet_rsp.rsp != CMD_OK){ + printf("send cmd to host failed\n"); + return -1; + } else { + printf("send cmd to host success, ret = %d\n", packet_rsp.ret); + } + + return 0; +} + +typedef struct { + int packet_size; + int cmd; + int nsid; + char data[]; +} packet_cmd_client; + +typedef struct { + int packet_size; + int cmd; + int rsp; + int ret; + char data[]; +} packet_rsp_client; + +void proc_get_report(int s) +{ + packet_cmd_client *packet_cmd; + packet_rsp_client *packet_rsp; + char *data; + packet_cmd = malloc(sizeof(*packet_cmd) + 4096); + if (!packet_cmd) { + return; + } + while (1){ + int ret = recv(s, packet_cmd, sizeof(*packet_cmd) + 4096, 0); + if (!ret) + break; + printf("recv :%s\n", packet_cmd->data); + data = packet_cmd->data; + char *tmp_str = get_report(data); + + if (tmp_str) { + //printf("tmp_str = %s\n", tmp_str); + packet_rsp = malloc(sizeof(*packet_rsp) + strlen(tmp_str)); + if (!packet_rsp){ + free(tmp_str); + goto END; + } + memcpy(packet_rsp->data, tmp_str, strlen(tmp_str)); + packet_rsp->packet_size = sizeof(*packet_rsp) + strlen(tmp_str); + packet_rsp->rsp = CMD_OK; + packet_rsp->ret = 0; + packet_rsp->cmd = CMD_SEND_REPORT; + send(s, packet_rsp, packet_rsp->packet_size, 0); + free(tmp_str); + free(packet_rsp); + } else { + packet_rsp = malloc(sizeof(*packet_rsp)); + if (!packet_rsp){ + goto END; + } + packet_rsp->packet_size = sizeof(*packet_rsp); + packet_rsp->rsp = CMD_OK; + packet_rsp->ret = -1; + packet_rsp->cmd = CMD_SEND_REPORT; + send(s, packet_rsp, packet_rsp->packet_size, 0); + free(packet_rsp); + } + } +END: + free(packet_cmd); +} + +int main(int argc, char **argv) +{ + char *tmp_str = NULL; + int s; + struct sockaddr_vm addr; + unsigned char uc_sockbuf[4096]; + size_t sizet_buf_recv; + + if(get_nsid_and_hash()) + return 0; + + s = socket(AF_VSOCK, SOCK_STREAM, 0); + memset(&addr, 0, sizeof(struct sockaddr_vm)); + addr.svm_family = AF_VSOCK; + addr.svm_port = 9999; + addr.svm_cid = VMADDR_CID_HOST; + + connect(s, (struct sockaddr *)&addr, sizeof(struct sockaddr_vm)); + + if(register_vm(s)) { + printf("register VM failed\n"); + close(s); + return 0; + } + + proc_get_report(s); + + close(s); +} + diff --git a/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/ra_client_api.h b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/ra_client_api.h new file mode 100644 index 0000000000000000000000000000000000000000..604254e5b82544cb6e44e9b833d6f88643c227dc --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/ra_client_api.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef LIBQCA_H +#define LIBQCA_H +#include + +struct ra_buffer_data { + uint32_t size; + uint8_t *buf; +}; + +TEEC_Result RemoteAttest(struct ra_buffer_data *in, struct ra_buffer_data *out); +#ifdef HOST_QCA +TEEC_Result RegisterContainer(struct ra_buffer_data *container_info, TEEC_Context *context, + TEEC_Session *session, uint32_t *origin); +#endif + +#endif + diff --git a/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/sha256.c b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/sha256.c new file mode 100644 index 0000000000000000000000000000000000000000..704e31ee698b180f5f97cef84bb5bffba272b864 --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/sha256.c @@ -0,0 +1,144 @@ +/*************************** HEADER FILES ***************************/ +#include +#include +#include "sha256.h" + +/****************************** MACROS ******************************/ +#define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b)))) +#define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b)))) + +#define CH(x,y,z) (((x) & (y)) ^ (~(x) & (z))) +#define MAJ(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) +#define EP0(x) (ROTRIGHT(x,2) ^ ROTRIGHT(x,13) ^ ROTRIGHT(x,22)) +#define EP1(x) (ROTRIGHT(x,6) ^ ROTRIGHT(x,11) ^ ROTRIGHT(x,25)) +#define SIG0(x) (ROTRIGHT(x,7) ^ ROTRIGHT(x,18) ^ ((x) >> 3)) +#define SIG1(x) (ROTRIGHT(x,17) ^ ROTRIGHT(x,19) ^ ((x) >> 10)) + +/**************************** VARIABLES *****************************/ +static const WORD k[64] = { + 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5, + 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174, + 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da, + 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967, + 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85, + 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070, + 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3, + 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 +}; + +/*********************** FUNCTION DEFINITIONS ***********************/ +void sha256_transform(SHA256_CTX *ctx, const BYTE data[]) +{ + WORD a, b, c, d, e, f, g, h, i, j, t1, t2, m[64]; + + for (i = 0, j = 0; i < 16; ++i, j += 4) + m[i] = (data[j] << 24) | (data[j + 1] << 16) | (data[j + 2] << 8) | (data[j + 3]); + for ( ; i < 64; ++i) + m[i] = SIG1(m[i - 2]) + m[i - 7] + SIG0(m[i - 15]) + m[i - 16]; + + a = ctx->state[0]; + b = ctx->state[1]; + c = ctx->state[2]; + d = ctx->state[3]; + e = ctx->state[4]; + f = ctx->state[5]; + g = ctx->state[6]; + h = ctx->state[7]; + + for (i = 0; i < 64; ++i) { + t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i]; + t2 = EP0(a) + MAJ(a,b,c); + h = g; + g = f; + f = e; + e = d + t1; + d = c; + c = b; + b = a; + a = t1 + t2; + } + + ctx->state[0] += a; + ctx->state[1] += b; + ctx->state[2] += c; + ctx->state[3] += d; + ctx->state[4] += e; + ctx->state[5] += f; + ctx->state[6] += g; + ctx->state[7] += h; +} + +void sha256_init(SHA256_CTX *ctx) +{ + ctx->datalen = 0; + ctx->bitlen = 0; + ctx->state[0] = 0x6a09e667; + ctx->state[1] = 0xbb67ae85; + ctx->state[2] = 0x3c6ef372; + ctx->state[3] = 0xa54ff53a; + ctx->state[4] = 0x510e527f; + ctx->state[5] = 0x9b05688c; + ctx->state[6] = 0x1f83d9ab; + ctx->state[7] = 0x5be0cd19; +} + +void sha256_update(SHA256_CTX *ctx, const BYTE data[], size_t len) +{ + WORD i; + + for (i = 0; i < len; ++i) { + ctx->data[ctx->datalen] = data[i]; + ctx->datalen++; + if (ctx->datalen == 64) { + sha256_transform(ctx, ctx->data); + ctx->bitlen += 512; + ctx->datalen = 0; + } + } +} + +void sha256_final(SHA256_CTX *ctx, BYTE hash[]) +{ + WORD i; + + i = ctx->datalen; + + // Pad whatever data is left in the buffer. + if (ctx->datalen < 56) { + ctx->data[i++] = 0x80; + while (i < 56) + ctx->data[i++] = 0x00; + } + else { + ctx->data[i++] = 0x80; + while (i < 64) + ctx->data[i++] = 0x00; + sha256_transform(ctx, ctx->data); + memset(ctx->data, 0, 56); + } + + // Append to the padding the total message's length in bits and transform. + ctx->bitlen += ctx->datalen * 8; + ctx->data[63] = ctx->bitlen; + ctx->data[62] = ctx->bitlen >> 8; + ctx->data[61] = ctx->bitlen >> 16; + ctx->data[60] = ctx->bitlen >> 24; + ctx->data[59] = ctx->bitlen >> 32; + ctx->data[58] = ctx->bitlen >> 40; + ctx->data[57] = ctx->bitlen >> 48; + ctx->data[56] = ctx->bitlen >> 56; + sha256_transform(ctx, ctx->data); + + // Since this implementation uses little endian byte ordering and SHA uses big endian, + // reverse all the bytes when copying the final state to the output hash. + for (i = 0; i < 4; ++i) { + hash[i] = (ctx->state[0] >> (24 - i * 8)) & 0x000000ff; + hash[i + 4] = (ctx->state[1] >> (24 - i * 8)) & 0x000000ff; + hash[i + 8] = (ctx->state[2] >> (24 - i * 8)) & 0x000000ff; + hash[i + 12] = (ctx->state[3] >> (24 - i * 8)) & 0x000000ff; + hash[i + 16] = (ctx->state[4] >> (24 - i * 8)) & 0x000000ff; + hash[i + 20] = (ctx->state[5] >> (24 - i * 8)) & 0x000000ff; + hash[i + 24] = (ctx->state[6] >> (24 - i * 8)) & 0x000000ff; + hash[i + 28] = (ctx->state[7] >> (24 - i * 8)) & 0x000000ff; + } +} diff --git a/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/sha256.h b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/sha256.h new file mode 100644 index 0000000000000000000000000000000000000000..e9fe0ddc21d1114c72cc18808849972fdc5f00b9 --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_guest_demo/sha256.h @@ -0,0 +1,26 @@ +#ifndef SHA256_H +#define SHA256_H + +/*************************** HEADER FILES ***************************/ +#include + +/****************************** MACROS ******************************/ +#define SHA256_BLOCK_SIZE 32 // SHA256 outputs a 32 byte digest + +/**************************** DATA TYPES ****************************/ +typedef unsigned char BYTE; // 8-bit byte +typedef unsigned int WORD; // 32-bit word, change to "long" for 16-bit machines + +typedef struct { + BYTE data[64]; + WORD datalen; + unsigned long long bitlen; + WORD state[8]; +} SHA256_CTX; + +/*********************** FUNCTION DECLARATIONS **********************/ +void sha256_init(SHA256_CTX *ctx); +void sha256_update(SHA256_CTX *ctx, const BYTE data[], size_t len); +void sha256_final(SHA256_CTX *ctx, BYTE hash[]); + +#endif // SHA256_H diff --git a/trustzone-awared-vm/RemoteAttestation/qca_host_server_demo/cloud/Makefile b/trustzone-awared-vm/RemoteAttestation/qca_host_server_demo/cloud/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..035e1397b7abbaf69fa1d1699c1c9bca9ba3b9da --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_host_server_demo/cloud/Makefile @@ -0,0 +1,33 @@ +CUR_DIR=$(shell pwd) +iTrustee_SDK_PATH=${CUR_DIR}/../../../../ + +TARGET_APP := qca_host_server + +APP_SOURCES := ../qca_host_server_demo.c \ + ../cJSON/cJSON.c \ + +APP_SOURCES += $(iTrustee_SDK_PATH)/src/CA/cloud/libteec_adaptor.c + +APP_CFLAGS += -fstack-protector-strong -fPIC + +APP_CFLAGS += -I$(CUR_DIR)/../cJSON + +APP_CFLAGS += -I$(iTrustee_SDK_PATH)/include/CA -I$(iTrustee_SDK_PATH)/thirdparty/open_source/libboundscheck/include + +APP_CFLAGS += -DHOST_QCA + +APP_LDFLAGS += -ldl -lpthread + +APP_LDFLAGS += -L/usr/lib64 -lqca + +APP_LDFLAGS += -z text -z now -z relro -z noexecstack -pie + +APP_OBJECTS := $(APP_SOURCES:.c=.o) +$(TARGET_APP): $(APP_SOURCES) + @$(CC) $(APP_CFLAGS) -o $@ $(APP_SOURCES) $(APP_LDFLAGS) + +clean: + rm -f *.o $(TARGET_APP) + + + diff --git a/trustzone-awared-vm/RemoteAttestation/qca_host_server_demo/qca_host_server_demo.c b/trustzone-awared-vm/RemoteAttestation/qca_host_server_demo/qca_host_server_demo.c new file mode 100644 index 0000000000000000000000000000000000000000..ebb60376c388b15dc4eb6342c0eda126710bcfc9 --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_host_server_demo/qca_host_server_demo.c @@ -0,0 +1,420 @@ +/* + * File Name: qca_host_server_demo.c + * */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ra_client_api.h" +#include "tee_client_api.h" +#include "securec.h" +#include "cJSON.h" + +#define DEFAULT_PORT 8000 +#define MAXLINE 4096 + +#define SHAREMEM_LIMIT (0x100000) /* 1 MB */ +#define PARAMS_RESERVED_SIZE (0x2000) +#define OUT_DATA_RESERVED_SIZE (0x3000) +#define REMOTE_ATTEST_CMD (0x1001) + +#define MAX_VM_NUM 32 +struct fd_map +{ + int vm_fd; + int client_fd; + int nsid; + int valid; +}; + +struct fd_map g_fd_map[MAX_VM_NUM] = {0}; + +char g_JSON[4096]; + +static const TEEC_UUID g_tee_qta_uuid = { + 0xe08f7eca, 0xe875, 0x440e, { + 0x9a, 0xb0, 0x5f, 0x38, 0x11, 0x36, 0xc6, 0x00 + } +}; + +#define CMD_CAN_NOT_CM_VM 0xff001100 +#define CMD_OK 0xff000000 +#define CMD_REGISTER_VM 0xff110011 + +typedef struct { + int packet_size; + int cmd; + int nsid; + char data[]; +} struct_packet_cmd; + +typedef struct { + int packet_size; + int rsp; + int ret; +} struct_packet_rsp; + +TEEC_Context g_context = {0}; +TEEC_Session g_session = {0}; +TEEC_Operation g_operation = {0}; + +int init_context() +{ + TEEC_UUID uuid = g_tee_qta_uuid; + TEEC_Result result = TEEC_InitializeContext(NULL, &g_context); + if (result != TEEC_SUCCESS) + { + printf("init g_context is failed, result is 0x%x\n", result); + return result; + } + + g_operation.started = 1; + g_operation.paramTypes = TEEC_PARAM_TYPES(TEEC_NONE, TEEC_NONE, TEEC_NONE, TEEC_NONE); + result = TEEC_OpenSession(&g_context, &g_session, &uuid, TEEC_LOGIN_IDENTIFY, NULL, &g_operation, NULL); + if (result != TEEC_SUCCESS) + { + printf("open g_session is failed, result is 0x%x\n", result); + goto cleanup_1; + } + printf("InitializeContext success\n"); + return 0; +cleanup_1: + TEEC_FinalizeContext(&g_context); + return result; +} + +void destroy_contex() +{ + TEEC_CloseSession(&g_session); + TEEC_FinalizeContext(&g_context); +} + +int reg_vm(char *buf) +{ + uint32_t origin; + struct ra_buffer_data data; + data.buf = buf; + data.size = strlen(buf); + + TEEC_Result result = RegisterContainer(&data, &g_context, &g_session, &origin); + if (result != TEEC_SUCCESS) { + printf("open g_session is failed, result is 0x%x\n", result); + } + + printf("reg VM result = %d\n", result); + return result; +} + +void add_vm_fd_map(int vm_fd, int nsid) +{ + int find = 0; + for (int i = 0; i < MAX_VM_NUM; i++) { + if (g_fd_map[i].nsid == nsid) { + g_fd_map[i].vm_fd = vm_fd; + g_fd_map[i].nsid = nsid; + g_fd_map[i].valid = 1; + printf("add vm_fd = %d\n", vm_fd); + find = 1; + break; + } + } + if (find) + return; + for (int i = 0; i < MAX_VM_NUM; i++) { + if (!g_fd_map[i].valid) { + g_fd_map[i].vm_fd = vm_fd; + g_fd_map[i].nsid = nsid; + g_fd_map[i].valid = 1; + printf("add vm_fd = %d\n", vm_fd); + break; + } + } +} + +void add_client_fd_map(int client_fd, int nsid) +{ + for (int i = 0; i < MAX_VM_NUM; i++) { + if (g_fd_map[i].valid == nsid) { + g_fd_map[i].client_fd = client_fd; + break; + } + } +} + +void remove_fd_map(int nsid) +{ + for (int i = 0; i < MAX_VM_NUM; i++) { + if (g_fd_map[i].nsid == nsid) { + g_fd_map[i].vm_fd = 0; + g_fd_map[i].nsid = 0; + g_fd_map[i].valid = 0; + + break; + } + } +} + +void proc_mesg(char *recv_buf, int peer_fd) +{ + int ret = 0; + struct_packet_cmd *packet_cmd = (struct_packet_cmd *)recv_buf; + struct_packet_rsp packet_rsp = {0}; + if (!packet_cmd) + return; + switch (packet_cmd->cmd) + { + case CMD_REGISTER_VM: + if (packet_cmd->cmd == CMD_REGISTER_VM) { + printf("data = %s \n", ((struct_packet_cmd*)recv_buf)->data); + } + ret = reg_vm(packet_cmd->data); + packet_rsp.packet_size = sizeof(packet_rsp); + packet_rsp.ret = ret; + packet_rsp.rsp = CMD_OK; + send(peer_fd, &packet_rsp, sizeof(packet_rsp), 0); + if (ret == 0) + add_vm_fd_map(peer_fd, packet_cmd->nsid); + break; + + default: + break; + } +} + +void *th_vsock_fun(void *arg) +{ + int s; + int peer_fd; + struct sockaddr_vm peer_addr; + socklen_t peer_addr_size; + struct sockaddr_vm addr; + char recv_buf[4096]; + size_t sizet_buf_recv; + char cbuf_vmreg[6]; + char cbuf_strhashnsid[65]; + char cbuf_strnsid[10]; + int insid; + int iresult; + unsigned char uc_sockbuf[4096]; + + s = socket(AF_VSOCK, SOCK_STREAM, 0); + + if (s < 0) { + perror("socket"); + return NULL; + } + + memset(&addr, 0, sizeof(struct sockaddr_vm)); + addr.svm_family = AF_VSOCK; + addr.svm_port = 9999; + addr.svm_cid = VMADDR_CID_HOST; + bind(s, (struct sockaddr *)&addr, sizeof(struct sockaddr_vm)); + listen(s, 0); + + peer_addr_size = sizeof(struct sockaddr_vm); + + while (1) { + peer_fd = accept(s, (struct sockaddr *)&peer_addr, &peer_addr_size); + if (peer_fd < 0) { + perror("accept"); + printf("accept vsocket error: %s(errno: %d)", strerror(errno), errno); + continue; + } + fprintf(stderr, "connection from cid %u port %u \n", peer_addr.svm_cid, peer_addr.svm_port); + + sizet_buf_recv = recv(peer_fd, &recv_buf, 4096, 0); + if (sizet_buf_recv > 0) { + printf(" received %lu bytes \n", sizet_buf_recv); + } + + proc_mesg(recv_buf, peer_fd); + + /* + if (!fork()) + { + if (send(connect_fd, "Hello,you are connected!\n", 26, 0) == -1) + perror("send error"); + close(connect_fd); + exit(0); + } + buff[n] = '\0'; + printf("recv msg from client: %s\n", buff); + */ + + //close(peer_fd); + } + close(s); +} + + +typedef struct { + int packet_size; + int cmd; + int nsid; + char data[]; +} packet_cmd_client; + +typedef struct { + int packet_size; + int cmd; + int rsp; + int ret; + char data[]; +} packet_rsp_client; + +void proc_client_cmd(char *buff, int sock_fd) +{ + int bfind = 0; + int ret = 0; + int recv_size; + int buf_size = 0x4000 + sizeof(packet_rsp_client); + packet_cmd_client *packet_cmd = (packet_cmd_client *)buff; + packet_rsp_client *packet_rsp; + packet_rsp = (packet_rsp_client *)malloc(buf_size); + switch (packet_cmd->cmd) + { + case 1:/*验证TA*/ + for (int i = 0; i < MAX_VM_NUM; i++) { + if (g_fd_map[i].nsid == packet_cmd->nsid) { + send(g_fd_map[i].vm_fd, packet_cmd, packet_cmd->packet_size, 0); + printf("recv msg from client: %s\n", packet_cmd->data); + recv_size = recv(g_fd_map[i].vm_fd, packet_rsp, buf_size, 0); + printf("ret = %d, cmd = %lx\n", packet_rsp->ret, packet_rsp->cmd); + //printf("data = %s\n", packet_rsp->data); + send(sock_fd, packet_rsp, packet_rsp->packet_size, 0); + close(sock_fd); + bfind = 1; + break; + } + } + if (!bfind) { + packet_rsp->rsp = CMD_CAN_NOT_CM_VM; + packet_rsp->ret = -1; + send(sock_fd, packet_rsp, packet_rsp->packet_size, 0); + } + break; + default: + break; + } +} + +void *th_pro_client(void *args) +{ + int socket_fd; + struct sockaddr_in servaddr; + char buff[4096]; + int n; + + if ((socket_fd = socket(AF_INET, SOCK_STREAM, 0)) == -1) + { + printf("create socket error: %s(errno: %d)\n", strerror(errno), errno); + exit(0); + } + memset(&servaddr, 0, sizeof(servaddr)); + servaddr.sin_family = AF_INET; + servaddr.sin_addr.s_addr = htonl(INADDR_ANY); + servaddr.sin_port = htons(DEFAULT_PORT); + + if (bind(socket_fd, (struct sockaddr *)&servaddr, sizeof(servaddr)) == -1) { + printf("bind socket error: %s(errno: %d)\n", strerror(errno), errno); + exit(0); + } + if (listen(socket_fd, 10) == -1) { + printf("listen socket error: %s(errno: %d)\n", strerror(errno), errno); + exit(0); + } + printf("======waiting for client's request======\n"); + while (1) { + int connect_fd; + if ((connect_fd = accept(socket_fd, (struct sockaddr *)NULL, NULL)) == -1) + { + printf("accept socket error: %s(errno: %d)", strerror(errno), errno); + continue; + } + n = recv(connect_fd, buff, MAXLINE, 0); + proc_client_cmd(buff, connect_fd); + } + close(socket_fd); +} + +char *create_no_as_cJson() +{ + cJSON *root = cJSON_CreateObject(); + cJSON_AddStringToObject(root, "Handler", "provisioning-input"); + cJSON *payload = cJSON_CreateObject(); + cJSON_AddStringToObject(payload, "Version", "TEE.RA.1.0"); + cJSON_AddStringToObject(payload, "scenario", "sce_no_as"); + cJSON_AddStringToObject(payload, "Hash_alg", "HS256"); + cJSON_AddItemToObject(root, "Payload", payload); + + char *json_string = cJSON_Print(root); + + cJSON_Delete(root); + + return json_string; +} + +int provisionNoAS() +{ + char *tmp_buf = create_no_as_cJson(); + if (!tmp_buf) { + printf("provisionNoAS failed \n"); + return -1; + } + + struct ra_buffer_data in; + struct ra_buffer_data out; + in.buf = tmp_buf; + in.size = strlen(tmp_buf); + printf("tmp_buf = %s \n", in.buf); + + out.size = 0x3000; + out.buf = malloc(0x3000); + if (!out.buf) { + free(tmp_buf); + return -1; + } + TEEC_Result result = RemoteAttest(&in, &out); + if (result != TEEC_SUCCESS) { + printf("open g_session is failed, result is 0x%x\n", result); + } + free(tmp_buf); + printf("provisionNoAS result = %d\n", result); + return result; +} + +int main(int argc, char **argv) +{ + pthread_t th_vsock; + pthread_t th_client; + + if (provisionNoAS()){ + return 0; + } + + if (init_context()) { + perror("init_context failed\n"); + return 0; + } + + (void)pthread_create(&th_vsock, NULL, th_vsock_fun, NULL); + (void)pthread_create(&th_client, NULL, th_pro_client, NULL); + + (void)pthread_join(th_vsock, NULL); + (void)pthread_join(th_client, NULL); + + destroy_contex(); + + return 0; +} + diff --git a/trustzone-awared-vm/RemoteAttestation/qca_host_server_demo/ra_client_api.h b/trustzone-awared-vm/RemoteAttestation/qca_host_server_demo/ra_client_api.h new file mode 100644 index 0000000000000000000000000000000000000000..604254e5b82544cb6e44e9b833d6f88643c227dc --- /dev/null +++ b/trustzone-awared-vm/RemoteAttestation/qca_host_server_demo/ra_client_api.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. + * Licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef LIBQCA_H +#define LIBQCA_H +#include + +struct ra_buffer_data { + uint32_t size; + uint8_t *buf; +}; + +TEEC_Result RemoteAttest(struct ra_buffer_data *in, struct ra_buffer_data *out); +#ifdef HOST_QCA +TEEC_Result RegisterContainer(struct ra_buffer_data *container_info, TEEC_Context *context, + TEEC_Session *session, uint32_t *origin); +#endif + +#endif + diff --git a/trustzone-awared-vm/VM/virtio/char/Makefile b/trustzone-awared-vm/VM/virtio/char/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..043477007925201f532bda4d609b0e5a10dfaf21 --- /dev/null +++ b/trustzone-awared-vm/VM/virtio/char/Makefile @@ -0,0 +1,32 @@ +#Makefile +obj-m := virtio_console.o + +vtzfdriver-objs := virtio_console.o + +RESULT := $(shell cat /proc/kallsyms | grep vsnprintf_s) + +STATUS := $(findstring vsnprintf_s, $(RESULT)) + +# ifneq ($(STATUS), vsnprintf_s) + +# endif + +KERN_VER = $(shell uname -r) +KERN_DIR = /lib/modules/$(KERN_VER)/build + +EXTRA_CFLAGS += -fstack-protector-strong -DCONFIG_AUTH_ENHANCE +EXTRA_CFLAGS += -I$(PWD)/../tty/hvc + +all: + make -C $(KERN_DIR) M=`pwd` modules + + +.PHONY: clean +clean: + # make -C $(KERN_DIR) M=`pwd` modules clean + -rm -vrf *.o *.ko + -rm -vrf *.order *.symvers *.mod.c *.mod.o .tmp_versions .*o.cmd .*.o.d + -rm -vrf *.mod + + + diff --git a/trustzone-awared-vm/VM/virtio/char/virtio_console.c b/trustzone-awared-vm/VM/virtio/char/virtio_console.c new file mode 100644 index 0000000000000000000000000000000000000000..ee9e9d3a87c4ad129d456b1a79ecd00cd9cf127e --- /dev/null +++ b/trustzone-awared-vm/VM/virtio/char/virtio_console.c @@ -0,0 +1,2447 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation + * Copyright (C) 2009, 2010, 2011 Red Hat, Inc. + * Copyright (C) 2009, 2010, 2011 Amit Shah + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../tty/hvc/hvc_console.h" + +#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC) + +/* + * This is a global struct for storing common data for all the devices + * this driver handles. + * + * Mainly, it has a linked list for all the consoles in one place so + * that callbacks from hvc for get_chars(), put_chars() work properly + * across multiple devices and multiple ports per device. + */ +struct ports_driver_data { + /* Used for registering chardevs */ + struct class *class; + + /* Used for exporting per-port information to debugfs */ + struct dentry *debugfs_dir; + + /* List of all the devices we're handling */ + struct list_head portdevs; + + /* + * This is used to keep track of the number of hvc consoles + * spawned by this driver. This number is given as the first + * argument to hvc_alloc(). To correctly map an initial + * console spawned via hvc_instantiate to the console being + * hooked up via hvc_alloc, we need to pass the same vtermno. + * + * We also just assume the first console being initialised was + * the first one that got used as the initial console. + */ + unsigned int next_vtermno; + + /* All the console devices handled by this driver */ + struct list_head consoles; +}; +static struct ports_driver_data pdrvdata = { .next_vtermno = 1}; + +static DEFINE_SPINLOCK(pdrvdata_lock); +static DECLARE_COMPLETION(early_console_added); + +/* This struct holds information that's relevant only for console ports */ +struct console { + /* We'll place all consoles in a list in the pdrvdata struct */ + struct list_head list; + + /* The hvc device associated with this console port */ + struct hvc_struct *hvc; + + /* The size of the console */ + struct winsize ws; + + /* + * This number identifies the number that we used to register + * with hvc in hvc_instantiate() and hvc_alloc(); this is the + * number passed on by the hvc callbacks to us to + * differentiate between the other console ports handled by + * this driver + */ + u32 vtermno; +}; + +struct port_buffer { + char *buf; + + /* size of the buffer in *buf above */ + size_t size; + + /* used length of the buffer */ + size_t len; + /* offset in the buf from which to consume data */ + size_t offset; + + /* DMA address of buffer */ + dma_addr_t dma; + + /* Device we got DMA memory from */ + struct device *dev; + + /* List of pending dma buffers to free */ + struct list_head list; + + /* If sgpages == 0 then buf is used */ + unsigned int sgpages; + + /* sg is used if spages > 0. sg must be the last in is struct */ + struct scatterlist sg[]; +}; + +/* + * This is a per-device struct that stores data common to all the + * ports for that device (vdev->priv). + */ +struct ports_device { + /* Next portdev in the list, head is in the pdrvdata struct */ + struct list_head list; + + /* + * Workqueue handlers where we process deferred work after + * notification + */ + struct work_struct control_work; + struct work_struct config_work; + + struct list_head ports; + + /* To protect the list of ports */ + spinlock_t ports_lock; + + /* To protect the vq operations for the control channel */ + spinlock_t c_ivq_lock; + spinlock_t c_ovq_lock; + + /* max. number of ports this device can hold */ + u32 max_nr_ports; + + /* The virtio device we're associated with */ + struct virtio_device *vdev; + + /* + * A couple of virtqueues for the control channel: one for + * guest->host transfers, one for host->guest transfers + */ + struct virtqueue *c_ivq, *c_ovq; + + /* + * A control packet buffer for guest->host requests, protected + * by c_ovq_lock. + */ + struct virtio_console_control cpkt; + + /* Array of per-port IO virtqueues */ + struct virtqueue **in_vqs, **out_vqs; + + /* Major number for this device. Ports will be created as minors. */ + int chr_major; +}; + +struct port_stats { + unsigned long bytes_sent, bytes_received, bytes_discarded; +}; + +/* This struct holds the per-port data */ +struct port { + /* Next port in the list, head is in the ports_device */ + struct list_head list; + + /* Pointer to the parent virtio_console device */ + struct ports_device *portdev; + + /* The current buffer from which data has to be fed to readers */ + struct port_buffer *inbuf; + + /* + * To protect the operations on the in_vq associated with this + * port. Has to be a spinlock because it can be called from + * interrupt context (get_char()). + */ + spinlock_t inbuf_lock; + + /* Protect the operations on the out_vq. */ + spinlock_t outvq_lock; + + /* The IO vqs for this port */ + struct virtqueue *in_vq, *out_vq; + + /* File in the debugfs directory that exposes this port's information */ + struct dentry *debugfs_file; + + /* + * Keep count of the bytes sent, received and discarded for + * this port for accounting and debugging purposes. These + * counts are not reset across port open / close events. + */ + struct port_stats stats; + + /* + * The entries in this struct will be valid if this port is + * hooked up to an hvc console + */ + struct console cons; + + /* Each port associates with a separate char device */ + struct cdev *cdev; + struct device *dev; + + /* Reference-counting to handle port hot-unplugs and file operations */ + struct kref kref; + + /* A waitqueue for poll() or blocking read operations */ + wait_queue_head_t waitqueue; + + /* The 'name' of the port that we expose via sysfs properties */ + char *name; + + /* We can notify apps of host connect / disconnect events via SIGIO */ + struct fasync_struct *async_queue; + + /* The 'id' to identify the port with the Host */ + u32 id; + + bool outvq_full; + + /* Is the host device open */ + bool host_connected; + + /* We should allow only one process to open a port */ + bool guest_connected; +}; + +/* This is the very early arch-specified put chars function. */ +static int (*early_put_chars)(u32, const char *, int); + +static struct port *find_port_by_vtermno(u32 vtermno) +{ + struct port *port; + struct console *cons; + unsigned long flags; + + spin_lock_irqsave(&pdrvdata_lock, flags); + list_for_each_entry(cons, &pdrvdata.consoles, list) { + if (cons->vtermno == vtermno) { + port = container_of(cons, struct port, cons); + goto out; + } + } + port = NULL; +out: + spin_unlock_irqrestore(&pdrvdata_lock, flags); + return port; +} + +static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, + dev_t dev) +{ + struct port *port; + unsigned long flags; + + spin_lock_irqsave(&portdev->ports_lock, flags); + list_for_each_entry(port, &portdev->ports, list) { + if (port->cdev->dev == dev) { + kref_get(&port->kref); + goto out; + } + } + port = NULL; +out: + spin_unlock_irqrestore(&portdev->ports_lock, flags); + + return port; +} + +static struct port *find_port_by_devt(dev_t dev) +{ + struct ports_device *portdev; + struct port *port; + unsigned long flags; + + spin_lock_irqsave(&pdrvdata_lock, flags); + list_for_each_entry(portdev, &pdrvdata.portdevs, list) { + port = find_port_by_devt_in_portdev(portdev, dev); + if (port) + goto out; + } + port = NULL; +out: + spin_unlock_irqrestore(&pdrvdata_lock, flags); + return port; +} + +static struct port *find_port_by_id(struct ports_device *portdev, u32 id) +{ + struct port *port; + unsigned long flags; + + spin_lock_irqsave(&portdev->ports_lock, flags); + list_for_each_entry(port, &portdev->ports, list) + if (port->id == id) + goto out; + port = NULL; +out: + spin_unlock_irqrestore(&portdev->ports_lock, flags); + + return port; +} + +static struct port *find_port_by_vq(struct ports_device *portdev, + struct virtqueue *vq) +{ + struct port *port; + unsigned long flags; + + spin_lock_irqsave(&portdev->ports_lock, flags); + list_for_each_entry(port, &portdev->ports, list) + if (port->in_vq == vq || port->out_vq == vq) + goto out; + port = NULL; +out: + spin_unlock_irqrestore(&portdev->ports_lock, flags); + return port; +} + +static bool is_console_port(struct port *port) +{ + if (port->cons.hvc) + return true; + return false; +} + +static bool is_rproc_serial(const struct virtio_device *vdev) +{ + return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL; +} + +static inline bool use_multiport(struct ports_device *portdev) +{ + /* + * This condition can be true when put_chars is called from + * early_init + */ + if (!portdev->vdev) + return false; + return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT); +} + +static DEFINE_SPINLOCK(dma_bufs_lock); +static LIST_HEAD(pending_free_dma_bufs); + +static void free_buf(struct port_buffer *buf, bool can_sleep) +{ + unsigned int i; + + for (i = 0; i < buf->sgpages; i++) { + struct page *page = sg_page(&buf->sg[i]); + if (!page) + break; + put_page(page); + } + + if (!buf->dev) { + kfree(buf->buf); + } else if (is_rproc_enabled) { + unsigned long flags; + + /* dma_free_coherent requires interrupts to be enabled. */ + if (!can_sleep) { + /* queue up dma-buffers to be freed later */ + spin_lock_irqsave(&dma_bufs_lock, flags); + list_add_tail(&buf->list, &pending_free_dma_bufs); + spin_unlock_irqrestore(&dma_bufs_lock, flags); + return; + } + dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma); + + /* Release device refcnt and allow it to be freed */ + put_device(buf->dev); + } + + kfree(buf); +} + +static void reclaim_dma_bufs(void) +{ + unsigned long flags; + struct port_buffer *buf, *tmp; + LIST_HEAD(tmp_list); + + if (list_empty(&pending_free_dma_bufs)) + return; + + /* Create a copy of the pending_free_dma_bufs while holding the lock */ + spin_lock_irqsave(&dma_bufs_lock, flags); + list_cut_position(&tmp_list, &pending_free_dma_bufs, + pending_free_dma_bufs.prev); + spin_unlock_irqrestore(&dma_bufs_lock, flags); + + /* Release the dma buffers, without irqs enabled */ + list_for_each_entry_safe(buf, tmp, &tmp_list, list) { + list_del(&buf->list); + free_buf(buf, true); + } +} + +static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size, + int pages) +{ + struct port_buffer *buf; + + reclaim_dma_bufs(); + + /* + * Allocate buffer and the sg list. The sg list array is allocated + * directly after the port_buffer struct. + */ + buf = kmalloc(struct_size(buf, sg, pages), GFP_KERNEL); + if (!buf) + goto fail; + + buf->sgpages = pages; + if (pages > 0) { + buf->dev = NULL; + buf->buf = NULL; + return buf; + } + + if (is_rproc_serial(vdev)) { + /* + * Allocate DMA memory from ancestor. When a virtio + * device is created by remoteproc, the DMA memory is + * associated with the parent device: + * virtioY => remoteprocX#vdevYbuffer. + */ + buf->dev = vdev->dev.parent; + if (!buf->dev) + goto free_buf; + + /* Increase device refcnt to avoid freeing it */ + get_device(buf->dev); + buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma, + GFP_KERNEL); + } else { + buf->dev = NULL; + buf->buf = kmalloc(buf_size, GFP_KERNEL); + } + + if (!buf->buf) + goto free_buf; + buf->len = 0; + buf->offset = 0; + buf->size = buf_size; + return buf; + +free_buf: + kfree(buf); +fail: + return NULL; +} + +/* Callers should take appropriate locks */ +static struct port_buffer *get_inbuf(struct port *port) +{ + struct port_buffer *buf; + unsigned int len; + + if (port->inbuf) + return port->inbuf; + + buf = virtqueue_get_buf(port->in_vq, &len); + if (buf) { + buf->len = min_t(size_t, len, buf->size); + buf->offset = 0; + port->stats.bytes_received += len; + } + return buf; +} + +/* + * Create a scatter-gather list representing our input buffer and put + * it in the queue. + * + * Callers should take appropriate locks. + */ +static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) +{ + struct scatterlist sg[1]; + int ret; + + sg_init_one(sg, buf->buf, buf->size); + + ret = virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC); + virtqueue_kick(vq); + if (!ret) + ret = vq->num_free; + return ret; +} + +/* Discard any unread data this port has. Callers lockers. */ +static void discard_port_data(struct port *port) +{ + struct port_buffer *buf; + unsigned int err; + + if (!port->portdev) { + /* Device has been unplugged. vqs are already gone. */ + return; + } + buf = get_inbuf(port); + + err = 0; + while (buf) { + port->stats.bytes_discarded += buf->len - buf->offset; + if (add_inbuf(port->in_vq, buf) < 0) { + err++; + free_buf(buf, false); + } + port->inbuf = NULL; + buf = get_inbuf(port); + } + if (err) + dev_warn(port->dev, "Errors adding %d buffers back to vq\n", + err); +} + +static bool port_has_data(struct port *port) +{ + unsigned long flags; + bool ret; + + ret = false; + spin_lock_irqsave(&port->inbuf_lock, flags); + port->inbuf = get_inbuf(port); + if (port->inbuf) + ret = true; + + spin_unlock_irqrestore(&port->inbuf_lock, flags); + return ret; +} + +static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, + unsigned int event, unsigned int value) +{ + struct scatterlist sg[1]; + struct virtqueue *vq; + unsigned int len; + + if (!use_multiport(portdev)) + return 0; + + vq = portdev->c_ovq; + + spin_lock(&portdev->c_ovq_lock); + + portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id); + portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event); + portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value); + + sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control)); + + if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) { + virtqueue_kick(vq); + while (!virtqueue_get_buf(vq, &len) + && !virtqueue_is_broken(vq)) + cpu_relax(); + } + + spin_unlock(&portdev->c_ovq_lock); + return 0; +} + +static ssize_t send_control_msg(struct port *port, unsigned int event, + unsigned int value) +{ + /* Did the port get unplugged before userspace closed it? */ + if (port->portdev) + return __send_control_msg(port->portdev, port->id, event, value); + return 0; +} + + +/* Callers must take the port->outvq_lock */ +static void reclaim_consumed_buffers(struct port *port) +{ + struct port_buffer *buf; + unsigned int len; + + if (!port->portdev) { + /* Device has been unplugged. vqs are already gone. */ + return; + } + while ((buf = virtqueue_get_buf(port->out_vq, &len))) { + free_buf(buf, false); + port->outvq_full = false; + } +} + +static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, + int nents, size_t in_count, + void *data, bool nonblock) +{ + struct virtqueue *out_vq; + int err; + unsigned long flags; + unsigned int len; + + out_vq = port->out_vq; + + spin_lock_irqsave(&port->outvq_lock, flags); + + reclaim_consumed_buffers(port); + + err = virtqueue_add_outbuf(out_vq, sg, nents, data, GFP_ATOMIC); + + /* Tell Host to go! */ + virtqueue_kick(out_vq); + + if (err) { + in_count = 0; + goto done; + } + + if (out_vq->num_free == 0) + port->outvq_full = true; + + if (nonblock) + goto done; + + /* + * Wait till the host acknowledges it pushed out the data we + * sent. This is done for data from the hvc_console; the tty + * operations are performed with spinlocks held so we can't + * sleep here. An alternative would be to copy the data to a + * buffer and relax the spinning requirement. The downside is + * we need to kmalloc a GFP_ATOMIC buffer each time the + * console driver writes something out. + */ + while (!virtqueue_get_buf(out_vq, &len) + && !virtqueue_is_broken(out_vq)) + cpu_relax(); +done: + spin_unlock_irqrestore(&port->outvq_lock, flags); + + port->stats.bytes_sent += in_count; + /* + * We're expected to return the amount of data we wrote -- all + * of it + */ + return in_count; +} + +/* + * Give out the data that's requested from the buffer that we have + * queued up. + */ +static ssize_t fill_readbuf(struct port *port, char __user *out_buf, + size_t out_count, bool to_user) +{ + struct port_buffer *buf; + unsigned long flags; + if (!out_count || !port_has_data(port)) + return 0; + + buf = port->inbuf; + out_count = min(out_count, buf->len - buf->offset); + + if (to_user) { + ssize_t ret; + + ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); + if (ret) + return -EFAULT; + } else { + memcpy((__force char *)out_buf, buf->buf + buf->offset, + out_count); + } + + buf->offset += out_count; + + if (buf->offset == buf->len) { + /* + * We're done using all the data in this buffer. + * Re-queue so that the Host can send us more data. + */ + spin_lock_irqsave(&port->inbuf_lock, flags); + port->inbuf = NULL; + + if (add_inbuf(port->in_vq, buf) < 0) + dev_warn(port->dev, "failed add_buf\n"); + + spin_unlock_irqrestore(&port->inbuf_lock, flags); + } + /* Return the number of bytes actually copied */ + return out_count; +} + +/* The condition that must be true for polling to end */ +static bool will_read_block(struct port *port) +{ + if (!port->guest_connected) { + /* Port got hot-unplugged. Let's exit. */ + return false; + } + return !port_has_data(port) && port->host_connected; +} + +static bool will_write_block(struct port *port) +{ + bool ret; + + if (!port->guest_connected) { + /* Port got hot-unplugged. Let's exit. */ + return false; + } + if (!port->host_connected) + return true; + + spin_lock_irq(&port->outvq_lock); + /* + * Check if the Host has consumed any buffers since we last + * sent data (this is only applicable for nonblocking ports). + */ + reclaim_consumed_buffers(port); + ret = port->outvq_full; + spin_unlock_irq(&port->outvq_lock); + + return ret; +} + +static ssize_t port_fops_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct port *port; + ssize_t ret; + + port = filp->private_data; + + /* Port is hot-unplugged. */ + if (!port->guest_connected) + return -ENODEV; + + if (!port_has_data(port)) { + /* + * If nothing's connected on the host just return 0 in + * case of list_empty; this tells the userspace app + * that there's no connection + */ + if (!port->host_connected) + return 0; + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + + ret = wait_event_freezable(port->waitqueue, + !will_read_block(port)); + if (ret < 0) + return ret; + } + /* Port got hot-unplugged while we were waiting above. */ + if (!port->guest_connected) + return -ENODEV; + /* + * We could've received a disconnection message while we were + * waiting for more data. + * + * This check is not clubbed in the if() statement above as we + * might receive some data as well as the host could get + * disconnected after we got woken up from our wait. So we + * really want to give off whatever data we have and only then + * check for host_connected. + */ + if (!port_has_data(port) && !port->host_connected) + return 0; + + return fill_readbuf(port, ubuf, count, true); +} + +static int wait_port_writable(struct port *port, bool nonblock) +{ + int ret; + + if (will_write_block(port)) { + if (nonblock) + return -EAGAIN; + + ret = wait_event_freezable(port->waitqueue, + !will_write_block(port)); + if (ret < 0) + return ret; + } + /* Port got hot-unplugged. */ + if (!port->guest_connected) + return -ENODEV; + + return 0; +} + +static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, + size_t count, loff_t *offp) +{ + struct port *port; + struct port_buffer *buf; + ssize_t ret; + bool nonblock; + struct scatterlist sg[1]; + + /* Userspace could be out to fool us */ + if (!count) + return 0; + + port = filp->private_data; + + nonblock = filp->f_flags & O_NONBLOCK; + + ret = wait_port_writable(port, nonblock); + if (ret < 0) + return ret; + + count = min((size_t)(32 * 1024), count); + + buf = alloc_buf(port->portdev->vdev, count, 0); + if (!buf) + return -ENOMEM; + + ret = copy_from_user(buf->buf, ubuf, count); + if (ret) { + ret = -EFAULT; + goto free_buf; + } + + /* + * We now ask send_buf() to not spin for generic ports -- we + * can re-use the same code path that non-blocking file + * descriptors take for blocking file descriptors since the + * wait is already done and we're certain the write will go + * through to the host. + */ + nonblock = true; + sg_init_one(sg, buf->buf, count); + ret = __send_to_port(port, sg, 1, count, buf, nonblock); + + if (nonblock && ret > 0) + goto out; + +free_buf: + free_buf(buf, true); +out: + return ret; +} + + +struct vtz_buf_struct{ + size_t buf_size; + void * buf; +}; + + +#define VTZ_IOC_MAGIC 'v' +#define TC_NS_CLIENT_IOCTL_READ_REQ \ + _IOWR(VTZ_IOC_MAGIC, 1, struct vtz_buf_struct) +#define TC_NS_CLIENT_IOCTL_WRITE_REQ \ + _IOWR(VTZ_IOC_MAGIC, 2, struct vtz_buf_struct) + +static int vtz_read_ioctl(struct file *filp, unsigned int cmd, struct vtz_buf_struct *vtz_buf) +{ + int ret = -EINVAL; + char *ubuf = vtz_buf->buf; + size_t count = vtz_buf->buf_size; + struct port *port; + + port = filp->private_data; + + /* Port is hot-unplugged. */ + if (!port->guest_connected) + return -ENODEV; + + if (!port_has_data(port)) { + /* + * If nothing's connected on the host just return 0 in + * case of list_empty; this tells the userspace app + * that there's no connection + */ + if (!port->host_connected) + return 0; + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + ret = wait_event_freezable(port->waitqueue, + !will_read_block(port)); + if (ret < 0) + return ret; + } + /* Port got hot-unplugged while we were waiting above. */ + if (!port->guest_connected) + return -ENODEV; + /* + * We could've received a disconnection message while we were + * waiting for more data. + * + * This check is not clubbed in the if() statement above as we + * might receive some data as well as the host could get + * disconnected after we got woken up from our wait. So we + * really want to give off whatever data we have and only then + * check for host_connected. + */ + if (!port_has_data(port) && !port->host_connected) { + return 0; + } + + ret = fill_readbuf(port, ubuf, count, false); + return ret; +} + +static int vtz_write_ioctl(struct file *filp, unsigned int cmd, struct vtz_buf_struct *vtz_buf) +{ + int ret = -EINVAL; + char *ubuf = vtz_buf->buf; + size_t count = vtz_buf->buf_size; + + struct port *port; + struct port_buffer *buf; + bool nonblock; + struct scatterlist sg[1]; + /* Userspace could be out to fool us */ + if (!count) + return 0; + + port = filp->private_data; + + nonblock = filp->f_flags & O_NONBLOCK; + + ret = wait_port_writable(port, nonblock); + if (ret < 0) + return ret; + + count = min((size_t)(32 * 1024), count); + + buf = alloc_buf(port->portdev->vdev, count, 0); + if (!buf) + return -ENOMEM; + + memcpy(buf->buf, ubuf, count); + //ret = copy_from_user(buf->buf, ubuf, count); + //if (ret) { + // ret = -EFAULT; + // goto free_buf; + //} + + /* + * We now ask send_buf() to not spin for generic ports -- we + * can re-use the same code path that non-blocking file + * descriptors take for blocking file descriptors since the + * wait is already done and we're certain the write will go + * through to the host. + */ + nonblock = true; + sg_init_one(sg, buf->buf, count); + ret = __send_to_port(port, sg, 1, count, buf, nonblock); + + if (nonblock && ret > 0) + goto out; + +free_buf: + free_buf(buf, true); +out: + return ret; +} + +static long vtz_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret = -EFAULT; + void *argp = (void *)(uintptr_t)arg; + struct vtz_buf_struct vtz_buf; + memcpy(&vtz_buf, argp, sizeof(vtz_buf)); + switch (cmd) { + case TC_NS_CLIENT_IOCTL_READ_REQ: + ret = vtz_read_ioctl(file, cmd, &vtz_buf); + break; + case TC_NS_CLIENT_IOCTL_WRITE_REQ: + ret = vtz_write_ioctl(file, cmd, &vtz_buf); + break; + default: + break; + } + return ret; +} + +#ifdef CONFIG_COMPAT +long vtz_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + long ret; + + if (!file) + return -EINVAL; + + ret = vtz_ioctl(file, cmd, (unsigned long)(uintptr_t)compat_ptr(arg)); + return ret; +} +#endif + +struct sg_list { + unsigned int n; + unsigned int size; + size_t len; + struct scatterlist *sg; +}; + +static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, + struct splice_desc *sd) +{ + struct sg_list *sgl = sd->u.data; + unsigned int offset, len; + + if (sgl->n == sgl->size) + return 0; + + /* Try lock this page */ + if (pipe_buf_try_steal(pipe, buf)) { + /* Get reference and unlock page for moving */ + get_page(buf->page); + unlock_page(buf->page); + + len = min(buf->len, sd->len); + sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset); + } else { + /* Failback to copying a page */ + struct page *page = alloc_page(GFP_KERNEL); + char *src; + + if (!page) + return -ENOMEM; + + offset = sd->pos & ~PAGE_MASK; + + len = sd->len; + if (len + offset > PAGE_SIZE) + len = PAGE_SIZE - offset; + + src = kmap_atomic(buf->page); + memcpy(page_address(page) + offset, src + buf->offset, len); + kunmap_atomic(src); + + sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); + } + sgl->n++; + sgl->len += len; + + return len; +} + +/* Faster zero-copy write by splicing */ +static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, + struct file *filp, loff_t *ppos, + size_t len, unsigned int flags) +{ + struct port *port = filp->private_data; + struct sg_list sgl; + ssize_t ret; + struct port_buffer *buf; + struct splice_desc sd = { + .total_len = len, + .flags = flags, + .pos = *ppos, + .u.data = &sgl, + }; + unsigned int occupancy; + + /* + * Rproc_serial does not yet support splice. To support splice + * pipe_to_sg() must allocate dma-buffers and copy content from + * regular pages to dma pages. And alloc_buf and free_buf must + * support allocating and freeing such a list of dma-buffers. + */ + if (is_rproc_serial(port->out_vq->vdev)) + return -EINVAL; + + pipe_lock(pipe); + ret = 0; + if (pipe_empty(pipe->head, pipe->tail)) + goto error_out; + + ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); + if (ret < 0) + goto error_out; + + occupancy = pipe_occupancy(pipe->head, pipe->tail); + buf = alloc_buf(port->portdev->vdev, 0, occupancy); + + if (!buf) { + ret = -ENOMEM; + goto error_out; + } + + sgl.n = 0; + sgl.len = 0; + sgl.size = occupancy; + sgl.sg = buf->sg; + sg_init_table(sgl.sg, sgl.size); + ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); + pipe_unlock(pipe); + if (likely(ret > 0)) + ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); + + if (unlikely(ret <= 0)) + free_buf(buf, true); + return ret; + +error_out: + pipe_unlock(pipe); + return ret; +} + +static __poll_t port_fops_poll(struct file *filp, poll_table *wait) +{ + struct port *port; + __poll_t ret; + + port = filp->private_data; + poll_wait(filp, &port->waitqueue, wait); + + if (!port->guest_connected) { + /* Port got unplugged */ + return EPOLLHUP; + } + ret = 0; + if (!will_read_block(port)) + ret |= EPOLLIN | EPOLLRDNORM; + if (!will_write_block(port)) + ret |= EPOLLOUT; + if (!port->host_connected) + ret |= EPOLLHUP; + + return ret; +} + +static void remove_port(struct kref *kref); + +static int port_fops_release(struct inode *inode, struct file *filp) +{ + struct port *port; + + port = filp->private_data; + + /* Notify host of port being closed */ + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); + + spin_lock_irq(&port->inbuf_lock); + port->guest_connected = false; + + discard_port_data(port); + + spin_unlock_irq(&port->inbuf_lock); + + spin_lock_irq(&port->outvq_lock); + reclaim_consumed_buffers(port); + spin_unlock_irq(&port->outvq_lock); + + reclaim_dma_bufs(); + /* + * Locks aren't necessary here as a port can't be opened after + * unplug, and if a port isn't unplugged, a kref would already + * exist for the port. Plus, taking ports_lock here would + * create a dependency on other locks taken by functions + * inside remove_port if we're the last holder of the port, + * creating many problems. + */ + kref_put(&port->kref, remove_port); + + return 0; +} + +static int port_fops_open(struct inode *inode, struct file *filp) +{ + struct cdev *cdev = inode->i_cdev; + struct port *port; + int ret; + + /* We get the port with a kref here */ + port = find_port_by_devt(cdev->dev); + if (!port) { + /* Port was unplugged before we could proceed */ + return -ENXIO; + } + filp->private_data = port; + + /* + * Don't allow opening of console port devices -- that's done + * via /dev/hvc + */ + if (is_console_port(port)) { + ret = -ENXIO; + goto out; + } + + /* Allow only one process to open a particular port at a time */ + spin_lock_irq(&port->inbuf_lock); + if (port->guest_connected) { + spin_unlock_irq(&port->inbuf_lock); + ret = -EBUSY; + goto out; + } + + port->guest_connected = true; + spin_unlock_irq(&port->inbuf_lock); + + spin_lock_irq(&port->outvq_lock); + /* + * There might be a chance that we missed reclaiming a few + * buffers in the window of the port getting previously closed + * and opening now. + */ + reclaim_consumed_buffers(port); + spin_unlock_irq(&port->outvq_lock); + + nonseekable_open(inode, filp); + + /* Notify host of port being opened */ + send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); + + return 0; +out: + kref_put(&port->kref, remove_port); + return ret; +} + +static int port_fops_fasync(int fd, struct file *filp, int mode) +{ + struct port *port; + + port = filp->private_data; + return fasync_helper(fd, filp, mode, &port->async_queue); +} + +/* + * The file operations that we support: programs in the guest can open + * a console device, read from it, write to it, poll for data and + * close it. The devices are at + * /dev/vportp + */ +static const struct file_operations port_fops = { + .owner = THIS_MODULE, + .open = port_fops_open, + .read = port_fops_read, + .write = port_fops_write, + .splice_write = port_fops_splice_write, + .poll = port_fops_poll, + .release = port_fops_release, + .fasync = port_fops_fasync, + .llseek = no_llseek, + .unlocked_ioctl = vtz_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vtz_compat_ioctl, +#endif +}; + +/* + * The put_chars() callback is pretty straightforward. + * + * We turn the characters into a scatter-gather list, add it to the + * output queue and then kick the Host. Then we sit here waiting for + * it to finish: inefficient in theory, but in practice + * implementations will do it immediately. + */ +static int put_chars(u32 vtermno, const char *buf, int count) +{ + struct port *port; + struct scatterlist sg[1]; + void *data; + int ret; + + if (unlikely(early_put_chars)) + return early_put_chars(vtermno, buf, count); + + port = find_port_by_vtermno(vtermno); + if (!port) + return -EPIPE; + + data = kmemdup(buf, count, GFP_ATOMIC); + if (!data) + return -ENOMEM; + + sg_init_one(sg, data, count); + ret = __send_to_port(port, sg, 1, count, data, false); + kfree(data); + return ret; +} + +/* + * get_chars() is the callback from the hvc_console infrastructure + * when an interrupt is received. + * + * We call out to fill_readbuf that gets us the required data from the + * buffers that are queued up. + */ +static int get_chars(u32 vtermno, char *buf, int count) +{ + struct port *port; + + /* If we've not set up the port yet, we have no input to give. */ + if (unlikely(early_put_chars)) + return 0; + + port = find_port_by_vtermno(vtermno); + if (!port) + return -EPIPE; + + /* If we don't have an input queue yet, we can't get input. */ + BUG_ON(!port->in_vq); + + return fill_readbuf(port, (__force char __user *)buf, count, false); +} + +static void resize_console(struct port *port) +{ + struct virtio_device *vdev; + + /* The port could have been hot-unplugged */ + if (!port || !is_console_port(port)) + return; + + vdev = port->portdev->vdev; + + /* Don't test F_SIZE at all if we're rproc: not a valid feature! */ + if (!is_rproc_serial(vdev) && + virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) + hvc_resize(port->cons.hvc, port->cons.ws); +} + +/* We set the configuration at this point, since we now have a tty */ +static int notifier_add_vio(struct hvc_struct *hp, int data) +{ + struct port *port; + + port = find_port_by_vtermno(hp->vtermno); + if (!port) + return -EINVAL; + + hp->irq_requested = 1; + resize_console(port); + + return 0; +} + +static void notifier_del_vio(struct hvc_struct *hp, int data) +{ + hp->irq_requested = 0; +} + +/* The operations for console ports. */ +static const struct hv_ops hv_ops = { + .get_chars = get_chars, + .put_chars = put_chars, + .notifier_add = notifier_add_vio, + .notifier_del = notifier_del_vio, + .notifier_hangup = notifier_del_vio, +}; + +/* + * Console drivers are initialized very early so boot messages can go + * out, so we do things slightly differently from the generic virtio + * initialization of the net and block drivers. + * + * At this stage, the console is output-only. It's too early to set + * up a virtqueue, so we let the drivers do some boutique early-output + * thing. + */ +int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) +{ + early_put_chars = put_chars; + return hvc_instantiate(0, 0, &hv_ops); +} + +static int init_port_console(struct port *port) +{ + int ret; + + /* + * The Host's telling us this port is a console port. Hook it + * up with an hvc console. + * + * To set up and manage our virtual console, we call + * hvc_alloc(). + * + * The first argument of hvc_alloc() is the virtual console + * number. The second argument is the parameter for the + * notification mechanism (like irq number). We currently + * leave this as zero, virtqueues have implicit notifications. + * + * The third argument is a "struct hv_ops" containing the + * put_chars() get_chars(), notifier_add() and notifier_del() + * pointers. The final argument is the output buffer size: we + * can do any size, so we put PAGE_SIZE here. + */ + port->cons.vtermno = pdrvdata.next_vtermno; + + port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); + if (IS_ERR(port->cons.hvc)) { + ret = PTR_ERR(port->cons.hvc); + dev_err(port->dev, + "error %d allocating hvc for port\n", ret); + port->cons.hvc = NULL; + return ret; + } + spin_lock_irq(&pdrvdata_lock); + pdrvdata.next_vtermno++; + list_add_tail(&port->cons.list, &pdrvdata.consoles); + spin_unlock_irq(&pdrvdata_lock); + port->guest_connected = true; + + /* + * Start using the new console output if this is the first + * console to come up. + */ + if (early_put_chars) + early_put_chars = NULL; + + /* Notify host of port being opened */ + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); + + return 0; +} + +static ssize_t show_port_name(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct port *port; + + port = dev_get_drvdata(dev); + + return sprintf(buffer, "%s\n", port->name); +} + +static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL); + +static struct attribute *port_sysfs_entries[] = { + &dev_attr_name.attr, + NULL +}; + +static const struct attribute_group port_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = port_sysfs_entries, +}; + +static int port_debugfs_show(struct seq_file *s, void *data) +{ + struct port *port = s->private; + + seq_printf(s, "name: %s\n", port->name ? port->name : ""); + seq_printf(s, "guest_connected: %d\n", port->guest_connected); + seq_printf(s, "host_connected: %d\n", port->host_connected); + seq_printf(s, "outvq_full: %d\n", port->outvq_full); + seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent); + seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received); + seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded); + seq_printf(s, "is_console: %s\n", + is_console_port(port) ? "yes" : "no"); + seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(port_debugfs); + +static void set_console_size(struct port *port, u16 rows, u16 cols) +{ + if (!port || !is_console_port(port)) + return; + + port->cons.ws.ws_row = rows; + port->cons.ws.ws_col = cols; +} + +static int fill_queue(struct virtqueue *vq, spinlock_t *lock) +{ + struct port_buffer *buf; + int nr_added_bufs; + int ret; + + nr_added_bufs = 0; + do { + buf = alloc_buf(vq->vdev, PAGE_SIZE, 0); + if (!buf) + return -ENOMEM; + + spin_lock_irq(lock); + ret = add_inbuf(vq, buf); + if (ret < 0) { + spin_unlock_irq(lock); + free_buf(buf, true); + return ret; + } + nr_added_bufs++; + spin_unlock_irq(lock); + } while (ret > 0); + + return nr_added_bufs; +} + +static void send_sigio_to_port(struct port *port) +{ + if (port->async_queue && port->guest_connected) + kill_fasync(&port->async_queue, SIGIO, POLL_OUT); +} + +static int add_port(struct ports_device *portdev, u32 id) +{ + char debugfs_name[16]; + struct port *port; + dev_t devt; + int err; + + port = kmalloc(sizeof(*port), GFP_KERNEL); + if (!port) { + err = -ENOMEM; + goto fail; + } + kref_init(&port->kref); + + port->portdev = portdev; + port->id = id; + + port->name = NULL; + port->inbuf = NULL; + port->cons.hvc = NULL; + port->async_queue = NULL; + + port->cons.ws.ws_row = port->cons.ws.ws_col = 0; + port->cons.vtermno = 0; + + port->host_connected = port->guest_connected = false; + port->stats = (struct port_stats) { 0 }; + + port->outvq_full = false; + + port->in_vq = portdev->in_vqs[port->id]; + port->out_vq = portdev->out_vqs[port->id]; + + port->cdev = cdev_alloc(); + if (!port->cdev) { + dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n"); + err = -ENOMEM; + goto free_port; + } + port->cdev->ops = &port_fops; + + devt = MKDEV(portdev->chr_major, id); + err = cdev_add(port->cdev, devt, 1); + if (err < 0) { + dev_err(&port->portdev->vdev->dev, + "Error %d adding cdev for port %u\n", err, id); + goto free_cdev; + } + port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, + devt, port, "vport%up%u", + port->portdev->vdev->index, id); + if (IS_ERR(port->dev)) { + err = PTR_ERR(port->dev); + dev_err(&port->portdev->vdev->dev, + "Error %d creating device for port %u\n", + err, id); + goto free_cdev; + } + + spin_lock_init(&port->inbuf_lock); + spin_lock_init(&port->outvq_lock); + init_waitqueue_head(&port->waitqueue); + + /* We can safely ignore ENOSPC because it means + * the queue already has buffers. Buffers are removed + * only by virtcons_remove(), not by unplug_port() + */ + err = fill_queue(port->in_vq, &port->inbuf_lock); + if (err < 0 && err != -ENOSPC) { + dev_err(port->dev, "Error allocating inbufs\n"); + goto free_device; + } + + if (is_rproc_serial(port->portdev->vdev)) + /* + * For rproc_serial assume remote processor is connected. + * rproc_serial does not want the console port, only + * the generic port implementation. + */ + port->host_connected = true; + else if (!use_multiport(port->portdev)) { + /* + * If we're not using multiport support, + * this has to be a console port. + */ + err = init_port_console(port); + if (err) + goto free_inbufs; + } + + spin_lock_irq(&portdev->ports_lock); + list_add_tail(&port->list, &port->portdev->ports); + spin_unlock_irq(&portdev->ports_lock); + + /* + * Tell the Host we're set so that it can send us various + * configuration parameters for this port (eg, port name, + * caching, whether this is a console port, etc.) + */ + send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); + + if (pdrvdata.debugfs_dir) { + /* + * Finally, create the debugfs file that we can use to + * inspect a port's state at any time + */ + snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u", + port->portdev->vdev->index, id); + port->debugfs_file = debugfs_create_file(debugfs_name, 0444, + pdrvdata.debugfs_dir, + port, + &port_debugfs_fops); + } + return 0; + +free_inbufs: +free_device: + device_destroy(pdrvdata.class, port->dev->devt); +free_cdev: + cdev_del(port->cdev); +free_port: + kfree(port); +fail: + /* The host might want to notify management sw about port add failure */ + __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); + return err; +} + +/* No users remain, remove all port-specific data. */ +static void remove_port(struct kref *kref) +{ + struct port *port; + + port = container_of(kref, struct port, kref); + + kfree(port); +} + +static void remove_port_data(struct port *port) +{ + spin_lock_irq(&port->inbuf_lock); + /* Remove unused data this port might have received. */ + discard_port_data(port); + spin_unlock_irq(&port->inbuf_lock); + + spin_lock_irq(&port->outvq_lock); + reclaim_consumed_buffers(port); + spin_unlock_irq(&port->outvq_lock); +} + +/* + * Port got unplugged. Remove port from portdev's list and drop the + * kref reference. If no userspace has this port opened, it will + * result in immediate removal the port. + */ +static void unplug_port(struct port *port) +{ + spin_lock_irq(&port->portdev->ports_lock); + list_del(&port->list); + spin_unlock_irq(&port->portdev->ports_lock); + + spin_lock_irq(&port->inbuf_lock); + if (port->guest_connected) { + /* Let the app know the port is going down. */ + send_sigio_to_port(port); + + /* Do this after sigio is actually sent */ + port->guest_connected = false; + port->host_connected = false; + + wake_up_interruptible(&port->waitqueue); + } + spin_unlock_irq(&port->inbuf_lock); + + if (is_console_port(port)) { + spin_lock_irq(&pdrvdata_lock); + list_del(&port->cons.list); + spin_unlock_irq(&pdrvdata_lock); + hvc_remove(port->cons.hvc); + } + + remove_port_data(port); + + /* + * We should just assume the device itself has gone off -- + * else a close on an open port later will try to send out a + * control message. + */ + port->portdev = NULL; + + sysfs_remove_group(&port->dev->kobj, &port_attribute_group); + device_destroy(pdrvdata.class, port->dev->devt); + cdev_del(port->cdev); + + debugfs_remove(port->debugfs_file); + kfree(port->name); + + /* + * Locks around here are not necessary - a port can't be + * opened after we removed the port struct from ports_list + * above. + */ + kref_put(&port->kref, remove_port); +} + +/* Any private messages that the Host and Guest want to share */ +static void handle_control_message(struct virtio_device *vdev, + struct ports_device *portdev, + struct port_buffer *buf) +{ + struct virtio_console_control *cpkt; + struct port *port; + size_t name_size; + int err; + + cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); + + port = find_port_by_id(portdev, virtio32_to_cpu(vdev, cpkt->id)); + if (!port && + cpkt->event != cpu_to_virtio16(vdev, VIRTIO_CONSOLE_PORT_ADD)) { + /* No valid header at start of buffer. Drop it. */ + dev_dbg(&portdev->vdev->dev, + "Invalid index %u in control packet\n", cpkt->id); + return; + } + + switch (virtio16_to_cpu(vdev, cpkt->event)) { + case VIRTIO_CONSOLE_PORT_ADD: + if (port) { + dev_dbg(&portdev->vdev->dev, + "Port %u already added\n", port->id); + send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); + break; + } + if (virtio32_to_cpu(vdev, cpkt->id) >= + portdev->max_nr_ports) { + dev_warn(&portdev->vdev->dev, + "Request for adding port with " + "out-of-bound id %u, max. supported id: %u\n", + cpkt->id, portdev->max_nr_ports - 1); + break; + } + add_port(portdev, virtio32_to_cpu(vdev, cpkt->id)); + break; + case VIRTIO_CONSOLE_PORT_REMOVE: + unplug_port(port); + break; + case VIRTIO_CONSOLE_CONSOLE_PORT: + if (!cpkt->value) + break; + if (is_console_port(port)) + break; + + init_port_console(port); + complete(&early_console_added); + /* + * Could remove the port here in case init fails - but + * have to notify the host first. + */ + break; + case VIRTIO_CONSOLE_RESIZE: { + struct { + __u16 rows; + __u16 cols; + } size; + + if (!is_console_port(port)) + break; + + memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), + sizeof(size)); + set_console_size(port, size.rows, size.cols); + + port->cons.hvc->irq_requested = 1; + resize_console(port); + break; + } + case VIRTIO_CONSOLE_PORT_OPEN: + port->host_connected = virtio16_to_cpu(vdev, cpkt->value); + wake_up_interruptible(&port->waitqueue); + /* + * If the host port got closed and the host had any + * unconsumed buffers, we'll be able to reclaim them + * now. + */ + spin_lock_irq(&port->outvq_lock); + reclaim_consumed_buffers(port); + spin_unlock_irq(&port->outvq_lock); + + /* + * If the guest is connected, it'll be interested in + * knowing the host connection state changed. + */ + spin_lock_irq(&port->inbuf_lock); + send_sigio_to_port(port); + spin_unlock_irq(&port->inbuf_lock); + break; + case VIRTIO_CONSOLE_PORT_NAME: + /* + * If we woke up after hibernation, we can get this + * again. Skip it in that case. + */ + if (port->name) + break; + + /* + * Skip the size of the header and the cpkt to get the size + * of the name that was sent + */ + name_size = buf->len - buf->offset - sizeof(*cpkt) + 1; + + port->name = kmalloc(name_size, GFP_KERNEL); + if (!port->name) { + dev_err(port->dev, + "Not enough space to store port name\n"); + break; + } + strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt), + name_size - 1); + port->name[name_size - 1] = 0; + + /* + * Since we only have one sysfs attribute, 'name', + * create it only if we have a name for the port. + */ + err = sysfs_create_group(&port->dev->kobj, + &port_attribute_group); + if (err) { + dev_err(port->dev, + "Error %d creating sysfs device attributes\n", + err); + } else { + /* + * Generate a udev event so that appropriate + * symlinks can be created based on udev + * rules. + */ + kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); + } + break; + } +} + +static void control_work_handler(struct work_struct *work) +{ + struct ports_device *portdev; + struct virtqueue *vq; + struct port_buffer *buf; + unsigned int len; + + portdev = container_of(work, struct ports_device, control_work); + vq = portdev->c_ivq; + + spin_lock(&portdev->c_ivq_lock); + while ((buf = virtqueue_get_buf(vq, &len))) { + spin_unlock(&portdev->c_ivq_lock); + + buf->len = min_t(size_t, len, buf->size); + buf->offset = 0; + + handle_control_message(vq->vdev, portdev, buf); + + spin_lock(&portdev->c_ivq_lock); + if (add_inbuf(portdev->c_ivq, buf) < 0) { + dev_warn(&portdev->vdev->dev, + "Error adding buffer to queue\n"); + free_buf(buf, false); + } + } + spin_unlock(&portdev->c_ivq_lock); +} + +static void flush_bufs(struct virtqueue *vq, bool can_sleep) +{ + struct port_buffer *buf; + unsigned int len; + + while ((buf = virtqueue_get_buf(vq, &len))) + free_buf(buf, can_sleep); +} + +static void out_intr(struct virtqueue *vq) +{ + struct port *port; + + port = find_port_by_vq(vq->vdev->priv, vq); + if (!port) { + flush_bufs(vq, false); + return; + } + + wake_up_interruptible(&port->waitqueue); +} + +static void in_intr(struct virtqueue *vq) +{ + struct port *port; + unsigned long flags; + + port = find_port_by_vq(vq->vdev->priv, vq); + if (!port) { + flush_bufs(vq, false); + return; + } + + spin_lock_irqsave(&port->inbuf_lock, flags); + port->inbuf = get_inbuf(port); + + /* + * Normally the port should not accept data when the port is + * closed. For generic serial ports, the host won't (shouldn't) + * send data till the guest is connected. But this condition + * can be reached when a console port is not yet connected (no + * tty is spawned) and the other side sends out data over the + * vring, or when a remote devices start sending data before + * the ports are opened. + * + * A generic serial port will discard data if not connected, + * while console ports and rproc-serial ports accepts data at + * any time. rproc-serial is initiated with guest_connected to + * false because port_fops_open expects this. Console ports are + * hooked up with an HVC console and is initialized with + * guest_connected to true. + */ + + if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev)) + discard_port_data(port); + + /* Send a SIGIO indicating new data in case the process asked for it */ + send_sigio_to_port(port); + + spin_unlock_irqrestore(&port->inbuf_lock, flags); + + wake_up_interruptible(&port->waitqueue); + + if (is_console_port(port) && hvc_poll(port->cons.hvc)) + hvc_kick(); +} + +static void control_intr(struct virtqueue *vq) +{ + struct ports_device *portdev; + + portdev = vq->vdev->priv; + schedule_work(&portdev->control_work); +} + +static void config_intr(struct virtio_device *vdev) +{ + struct ports_device *portdev; + + portdev = vdev->priv; + + if (!use_multiport(portdev)) + schedule_work(&portdev->config_work); +} + +static void config_work_handler(struct work_struct *work) +{ + struct ports_device *portdev; + + portdev = container_of(work, struct ports_device, config_work); + if (!use_multiport(portdev)) { + struct virtio_device *vdev; + struct port *port; + u16 rows, cols; + + vdev = portdev->vdev; + virtio_cread(vdev, struct virtio_console_config, cols, &cols); + virtio_cread(vdev, struct virtio_console_config, rows, &rows); + + port = find_port_by_id(portdev, 0); + set_console_size(port, rows, cols); + + /* + * We'll use this way of resizing only for legacy + * support. For newer userspace + * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages + * to indicate console size changes so that it can be + * done per-port. + */ + resize_console(port); + } +} + +static int init_vqs(struct ports_device *portdev) +{ + vq_callback_t **io_callbacks; + char **io_names; + struct virtqueue **vqs; + u32 i, j, nr_ports, nr_queues; + int err; + + nr_ports = portdev->max_nr_ports; + nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; + + vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL); + io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *), + GFP_KERNEL); + io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL); + portdev->in_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *), + GFP_KERNEL); + portdev->out_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *), + GFP_KERNEL); + if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || + !portdev->out_vqs) { + err = -ENOMEM; + goto free; + } + + /* + * For backward compat (newer host but older guest), the host + * spawns a console port first and also inits the vqs for port + * 0 before others. + */ + j = 0; + io_callbacks[j] = in_intr; + io_callbacks[j + 1] = out_intr; + io_names[j] = "input"; + io_names[j + 1] = "output"; + j += 2; + + if (use_multiport(portdev)) { + io_callbacks[j] = control_intr; + io_callbacks[j + 1] = NULL; + io_names[j] = "control-i"; + io_names[j + 1] = "control-o"; + + for (i = 1; i < nr_ports; i++) { + j += 2; + io_callbacks[j] = in_intr; + io_callbacks[j + 1] = out_intr; + io_names[j] = "input"; + io_names[j + 1] = "output"; + } + } + /* Find the queues. */ + err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, + io_callbacks, + (const char **)io_names, NULL); + if (err) + goto free; + + j = 0; + portdev->in_vqs[0] = vqs[0]; + portdev->out_vqs[0] = vqs[1]; + j += 2; + if (use_multiport(portdev)) { + portdev->c_ivq = vqs[j]; + portdev->c_ovq = vqs[j + 1]; + + for (i = 1; i < nr_ports; i++) { + j += 2; + portdev->in_vqs[i] = vqs[j]; + portdev->out_vqs[i] = vqs[j + 1]; + } + } + kfree(io_names); + kfree(io_callbacks); + kfree(vqs); + + return 0; + +free: + kfree(portdev->out_vqs); + kfree(portdev->in_vqs); + kfree(io_names); + kfree(io_callbacks); + kfree(vqs); + + return err; +} + +static const struct file_operations portdev_fops = { + .owner = THIS_MODULE, +}; + +static void remove_vqs(struct ports_device *portdev) +{ + struct virtqueue *vq; + + virtio_device_for_each_vq(portdev->vdev, vq) { + struct port_buffer *buf; + + flush_bufs(vq, true); + while ((buf = virtqueue_detach_unused_buf(vq))) + free_buf(buf, true); + } + portdev->vdev->config->del_vqs(portdev->vdev); + kfree(portdev->in_vqs); + kfree(portdev->out_vqs); +} + +static void virtcons_remove(struct virtio_device *vdev) +{ + struct ports_device *portdev; + struct port *port, *port2; + + portdev = vdev->priv; + + spin_lock_irq(&pdrvdata_lock); + list_del(&portdev->list); + spin_unlock_irq(&pdrvdata_lock); + + /* Device is going away, exit any polling for buffers */ + virtio_break_device(vdev); + if (use_multiport(portdev)) + flush_work(&portdev->control_work); + else + flush_work(&portdev->config_work); + + /* Disable interrupts for vqs */ + vdev->config->reset(vdev); + /* Finish up work that's lined up */ + if (use_multiport(portdev)) + cancel_work_sync(&portdev->control_work); + else + cancel_work_sync(&portdev->config_work); + + list_for_each_entry_safe(port, port2, &portdev->ports, list) + unplug_port(port); + + unregister_chrdev(portdev->chr_major, "virtio-portsdev"); + + /* + * When yanking out a device, we immediately lose the + * (device-side) queues. So there's no point in keeping the + * guest side around till we drop our final reference. This + * also means that any ports which are in an open state will + * have to just stop using the port, as the vqs are going + * away. + */ + remove_vqs(portdev); + kfree(portdev); +} + +/* + * Once we're further in boot, we get probed like any other virtio + * device. + * + * If the host also supports multiple console ports, we check the + * config space to see how many ports the host has spawned. We + * initialize each port found. + */ +static int virtcons_probe(struct virtio_device *vdev) +{ + struct ports_device *portdev; + int err; + bool multiport; + bool early = early_put_chars != NULL; + + /* We only need a config space if features are offered */ + if (!vdev->config->get && + (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE) + || virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT))) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + + /* Ensure to read early_put_chars now */ + barrier(); + + portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); + if (!portdev) { + err = -ENOMEM; + goto fail; + } + + /* Attach this portdev to this virtio_device, and vice-versa. */ + portdev->vdev = vdev; + vdev->priv = portdev; + + portdev->chr_major = register_chrdev(0, "virtio-portsdev", + &portdev_fops); + if (portdev->chr_major < 0) { + dev_err(&vdev->dev, + "Error %d registering chrdev for device %u\n", + portdev->chr_major, vdev->index); + err = portdev->chr_major; + goto free; + } + + multiport = false; + portdev->max_nr_ports = 1; + + /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */ + if (!is_rproc_serial(vdev) && + virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT, + struct virtio_console_config, max_nr_ports, + &portdev->max_nr_ports) == 0) { + multiport = true; + } + + err = init_vqs(portdev); + if (err < 0) { + dev_err(&vdev->dev, "Error %d initializing vqs\n", err); + goto free_chrdev; + } + + spin_lock_init(&portdev->ports_lock); + INIT_LIST_HEAD(&portdev->ports); + INIT_LIST_HEAD(&portdev->list); + + virtio_device_ready(portdev->vdev); + + INIT_WORK(&portdev->config_work, &config_work_handler); + INIT_WORK(&portdev->control_work, &control_work_handler); + + if (multiport) { + spin_lock_init(&portdev->c_ivq_lock); + spin_lock_init(&portdev->c_ovq_lock); + + err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); + if (err < 0) { + dev_err(&vdev->dev, + "Error allocating buffers for control queue\n"); + /* + * The host might want to notify mgmt sw about device + * add failure. + */ + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, + VIRTIO_CONSOLE_DEVICE_READY, 0); + /* Device was functional: we need full cleanup. */ + virtcons_remove(vdev); + return err; + } + } else { + /* + * For backward compatibility: Create a console port + * if we're running on older host. + */ + add_port(portdev, 0); + } + + spin_lock_irq(&pdrvdata_lock); + list_add_tail(&portdev->list, &pdrvdata.portdevs); + spin_unlock_irq(&pdrvdata_lock); + + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, + VIRTIO_CONSOLE_DEVICE_READY, 1); + + /* + * If there was an early virtio console, assume that there are no + * other consoles. We need to wait until the hvc_alloc matches the + * hvc_instantiate, otherwise tty_open will complain, resulting in + * a "Warning: unable to open an initial console" boot failure. + * Without multiport this is done in add_port above. With multiport + * this might take some host<->guest communication - thus we have to + * wait. + */ + if (multiport && early) + wait_for_completion(&early_console_added); + + return 0; + +free_chrdev: + unregister_chrdev(portdev->chr_major, "virtio-portsdev"); +free: + kfree(portdev); +fail: + return err; +} + +static const struct virtio_device_id id_table[] = { + { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; +MODULE_DEVICE_TABLE(virtio, id_table); + +static const unsigned int features[] = { + VIRTIO_CONSOLE_F_SIZE, + VIRTIO_CONSOLE_F_MULTIPORT, +}; + +static const struct virtio_device_id rproc_serial_id_table[] = { +#if IS_ENABLED(CONFIG_REMOTEPROC) + { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID }, +#endif + { 0 }, +}; +MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table); + +static const unsigned int rproc_serial_features[] = { +}; + +#ifdef CONFIG_PM_SLEEP +static int virtcons_freeze(struct virtio_device *vdev) +{ + struct ports_device *portdev; + struct port *port; + + portdev = vdev->priv; + + vdev->config->reset(vdev); + + if (use_multiport(portdev)) + virtqueue_disable_cb(portdev->c_ivq); + cancel_work_sync(&portdev->control_work); + cancel_work_sync(&portdev->config_work); + /* + * Once more: if control_work_handler() was running, it would + * enable the cb as the last step. + */ + if (use_multiport(portdev)) + virtqueue_disable_cb(portdev->c_ivq); + + list_for_each_entry(port, &portdev->ports, list) { + virtqueue_disable_cb(port->in_vq); + virtqueue_disable_cb(port->out_vq); + /* + * We'll ask the host later if the new invocation has + * the port opened or closed. + */ + port->host_connected = false; + remove_port_data(port); + } + remove_vqs(portdev); + + return 0; +} + +static int virtcons_restore(struct virtio_device *vdev) +{ + struct ports_device *portdev; + struct port *port; + int ret; + + portdev = vdev->priv; + + ret = init_vqs(portdev); + if (ret) + return ret; + + virtio_device_ready(portdev->vdev); + + if (use_multiport(portdev)) + fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); + + list_for_each_entry(port, &portdev->ports, list) { + port->in_vq = portdev->in_vqs[port->id]; + port->out_vq = portdev->out_vqs[port->id]; + + fill_queue(port->in_vq, &port->inbuf_lock); + + /* Get port open/close status on the host */ + send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); + + /* + * If a port was open at the time of suspending, we + * have to let the host know that it's still open. + */ + if (port->guest_connected) + send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); + } + return 0; +} +#endif + +static struct virtio_driver virtio_console = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtcons_probe, + .remove = virtcons_remove, + .config_changed = config_intr, +#ifdef CONFIG_PM_SLEEP + .freeze = virtcons_freeze, + .restore = virtcons_restore, +#endif +}; + +static struct virtio_driver virtio_rproc_serial = { + .feature_table = rproc_serial_features, + .feature_table_size = ARRAY_SIZE(rproc_serial_features), + .driver.name = "virtio_rproc_serial", + .driver.owner = THIS_MODULE, + .id_table = rproc_serial_id_table, + .probe = virtcons_probe, + .remove = virtcons_remove, +}; + +static int __init virtio_console_init(void) +{ + int err; + + pdrvdata.class = class_create(THIS_MODULE, "virtio-ports"); + if (IS_ERR(pdrvdata.class)) { + err = PTR_ERR(pdrvdata.class); + pr_err("Error %d creating virtio-ports class\n", err); + return err; + } + + pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL); + if (!pdrvdata.debugfs_dir) + pr_warn("Error creating debugfs dir for virtio-ports\n"); + INIT_LIST_HEAD(&pdrvdata.consoles); + INIT_LIST_HEAD(&pdrvdata.portdevs); + + err = register_virtio_driver(&virtio_console); + if (err < 0) { + pr_err("Error %d registering virtio driver\n", err); + goto free; + } + err = register_virtio_driver(&virtio_rproc_serial); + if (err < 0) { + pr_err("Error %d registering virtio rproc serial driver\n", + err); + goto unregister; + } + return 0; +unregister: + unregister_virtio_driver(&virtio_console); +free: + debugfs_remove_recursive(pdrvdata.debugfs_dir); + class_destroy(pdrvdata.class); + return err; +} + +static void __exit virtio_console_fini(void) +{ + reclaim_dma_bufs(); + + unregister_virtio_driver(&virtio_console); + unregister_virtio_driver(&virtio_rproc_serial); + + class_destroy(pdrvdata.class); + debugfs_remove_recursive(pdrvdata.debugfs_dir); +} +module_init(virtio_console_init); +module_exit(virtio_console_fini); + +MODULE_DESCRIPTION("Virtio console driver"); +MODULE_LICENSE("GPL"); + diff --git a/trustzone-awared-vm/VM/virtio/tty/hvc/hvc_console.c b/trustzone-awared-vm/VM/virtio/tty/hvc/hvc_console.c new file mode 100644 index 0000000000000000000000000000000000000000..5613204f2c1658754d63287b7fe34479840e711f --- /dev/null +++ b/trustzone-awared-vm/VM/virtio/tty/hvc/hvc_console.c @@ -0,0 +1,1070 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2001 Anton Blanchard , IBM + * Copyright (C) 2001 Paul Mackerras , IBM + * Copyright (C) 2004 Benjamin Herrenschmidt , IBM Corp. + * Copyright (C) 2004 IBM Corporation + * + * Additional Author(s): + * Ryan S. Arnold + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "hvc_console.h" + +#define HVC_MAJOR 229 +#define HVC_MINOR 0 + +/* + * Wait this long per iteration while trying to push buffered data to the + * hypervisor before allowing the tty to complete a close operation. + */ +#define HVC_CLOSE_WAIT (HZ/100) /* 1/10 of a second */ + +/* + * These sizes are most efficient for vio, because they are the + * native transfer size. We could make them selectable in the + * future to better deal with backends that want other buffer sizes. + */ +#define N_OUTBUF 16 +#define N_INBUF 16 + +#define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) + +static struct tty_driver *hvc_driver; +static struct task_struct *hvc_task; + +/* Picks up late kicks after list walk but before schedule() */ +static int hvc_kicked; + +/* hvc_init is triggered from hvc_alloc, i.e. only when actually used */ +static atomic_t hvc_needs_init __read_mostly = ATOMIC_INIT(-1); + +static int hvc_init(void); + +#ifdef CONFIG_MAGIC_SYSRQ +static int sysrq_pressed; +#endif + +/* dynamic list of hvc_struct instances */ +static LIST_HEAD(hvc_structs); + +/* + * Protect the list of hvc_struct instances from inserts and removals during + * list traversal. + */ +static DEFINE_MUTEX(hvc_structs_mutex); + +/* + * This value is used to assign a tty->index value to a hvc_struct based + * upon order of exposure via hvc_probe(), when we can not match it to + * a console candidate registered with hvc_instantiate(). + */ +static int last_hvc = -1; + +/* + * Do not call this function with either the hvc_structs_mutex or the hvc_struct + * lock held. If successful, this function increments the kref reference + * count against the target hvc_struct so it should be released when finished. + */ +static struct hvc_struct *hvc_get_by_index(int index) +{ + struct hvc_struct *hp; + unsigned long flags; + + mutex_lock(&hvc_structs_mutex); + + list_for_each_entry(hp, &hvc_structs, next) { + spin_lock_irqsave(&hp->lock, flags); + if (hp->index == index) { + tty_port_get(&hp->port); + spin_unlock_irqrestore(&hp->lock, flags); + mutex_unlock(&hvc_structs_mutex); + return hp; + } + spin_unlock_irqrestore(&hp->lock, flags); + } + hp = NULL; + mutex_unlock(&hvc_structs_mutex); + + return hp; +} + +static int __hvc_flush(const struct hv_ops *ops, uint32_t vtermno, bool wait) +{ + if (wait) + might_sleep(); + + if (ops->flush) + return ops->flush(vtermno, wait); + return 0; +} + +static int hvc_console_flush(const struct hv_ops *ops, uint32_t vtermno) +{ + return __hvc_flush(ops, vtermno, false); +} + +/* + * Wait for the console to flush before writing more to it. This sleeps. + */ +static int hvc_flush(struct hvc_struct *hp) +{ + return __hvc_flush(hp->ops, hp->vtermno, true); +} + +/* + * Initial console vtermnos for console API usage prior to full console + * initialization. Any vty adapter outside this range will not have usable + * console interfaces but can still be used as a tty device. This has to be + * static because kmalloc will not work during early console init. + */ +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES]; +static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] = + {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1}; + +/* + * Console APIs, NOT TTY. These APIs are available immediately when + * hvc_console_setup() finds adapters. + */ + +static void hvc_console_print(struct console *co, const char *b, + unsigned count) +{ + char c[N_OUTBUF] __ALIGNED__; + unsigned i = 0, n = 0; + int r, donecr = 0, index = co->index; + + /* Console access attempt outside of acceptable console range. */ + if (index >= MAX_NR_HVC_CONSOLES) + return; + + /* This console adapter was removed so it is not usable. */ + if (vtermnos[index] == -1) + return; + + while (count > 0 || i > 0) { + if (count > 0 && i < sizeof(c)) { + if (b[n] == '\n' && !donecr) { + c[i++] = '\r'; + donecr = 1; + } else { + c[i++] = b[n++]; + donecr = 0; + --count; + } + } else { + r = cons_ops[index]->put_chars(vtermnos[index], c, i); + if (r <= 0) { + /* throw away characters on error + * but spin in case of -EAGAIN */ + if (r != -EAGAIN) { + i = 0; + } else { + hvc_console_flush(cons_ops[index], + vtermnos[index]); + } + } else if (r > 0) { + i -= r; + if (i > 0) + memmove(c, c+r, i); + } + } + } + hvc_console_flush(cons_ops[index], vtermnos[index]); +} + +static struct tty_driver *hvc_console_device(struct console *c, int *index) +{ + if (vtermnos[c->index] == -1) + return NULL; + + *index = c->index; + return hvc_driver; +} + +static int hvc_console_setup(struct console *co, char *options) +{ + if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES) + return -ENODEV; + + if (vtermnos[co->index] == -1) + return -ENODEV; + + return 0; +} + +static struct console hvc_console = { + .name = "hvc", + .write = hvc_console_print, + .device = hvc_console_device, + .setup = hvc_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +/* + * Early console initialization. Precedes driver initialization. + * + * (1) we are first, and the user specified another driver + * -- index will remain -1 + * (2) we are first and the user specified no driver + * -- index will be set to 0, then we will fail setup. + * (3) we are first and the user specified our driver + * -- index will be set to user specified driver, and we will fail + * (4) we are after driver, and this initcall will register us + * -- if the user didn't specify a driver then the console will match + * + * Note that for cases 2 and 3, we will match later when the io driver + * calls hvc_instantiate() and call register again. + */ +static int __init hvc_console_init(void) +{ + register_console(&hvc_console); + return 0; +} +console_initcall(hvc_console_init); + +/* callback when the kboject ref count reaches zero. */ +static void hvc_port_destruct(struct tty_port *port) +{ + struct hvc_struct *hp = container_of(port, struct hvc_struct, port); + unsigned long flags; + + mutex_lock(&hvc_structs_mutex); + + spin_lock_irqsave(&hp->lock, flags); + list_del(&(hp->next)); + spin_unlock_irqrestore(&hp->lock, flags); + + mutex_unlock(&hvc_structs_mutex); + + kfree(hp); +} + +static void hvc_check_console(int index) +{ + /* Already enabled, bail out */ + if (hvc_console.flags & CON_ENABLED) + return; + + /* If this index is what the user requested, then register + * now (setup won't fail at this point). It's ok to just + * call register again if previously .setup failed. + */ + if (index == hvc_console.index) + register_console(&hvc_console); +} + +/* + * hvc_instantiate() is an early console discovery method which locates + * consoles * prior to the vio subsystem discovering them. Hotplugged + * vty adapters do NOT get an hvc_instantiate() callback since they + * appear after early console init. + */ +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) +{ + struct hvc_struct *hp; + + if (index < 0 || index >= MAX_NR_HVC_CONSOLES) + return -1; + + if (vtermnos[index] != -1) + return -1; + + /* make sure no no tty has been registered in this index */ + hp = hvc_get_by_index(index); + if (hp) { + tty_port_put(&hp->port); + return -1; + } + + vtermnos[index] = vtermno; + cons_ops[index] = ops; + + /* check if we need to re-register the kernel console */ + hvc_check_console(index); + + return 0; +} +EXPORT_SYMBOL_GPL(hvc_instantiate); + +/* Wake the sleeping khvcd */ +void hvc_kick(void) +{ + hvc_kicked = 1; + wake_up_process(hvc_task); +} +EXPORT_SYMBOL_GPL(hvc_kick); + +static void hvc_unthrottle(struct tty_struct *tty) +{ + hvc_kick(); +} + +static int hvc_install(struct tty_driver *driver, struct tty_struct *tty) +{ + struct hvc_struct *hp; + int rc; + + /* Auto increments kref reference if found. */ + hp = hvc_get_by_index(tty->index); + if (!hp) + return -ENODEV; + + tty->driver_data = hp; + + rc = tty_port_install(&hp->port, driver, tty); + if (rc) + tty_port_put(&hp->port); + return rc; +} + +/* + * The TTY interface won't be used until after the vio layer has exposed the vty + * adapter to the kernel. + */ +static int hvc_open(struct tty_struct *tty, struct file * filp) +{ + struct hvc_struct *hp = tty->driver_data; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&hp->port.lock, flags); + /* Check and then increment for fast path open. */ + if (hp->port.count++ > 0) { + spin_unlock_irqrestore(&hp->port.lock, flags); + hvc_kick(); + return 0; + } /* else count == 0 */ + spin_unlock_irqrestore(&hp->port.lock, flags); + + tty_port_tty_set(&hp->port, tty); + + if (hp->ops->notifier_add) + rc = hp->ops->notifier_add(hp, hp->data); + + /* + * If the notifier fails we return an error. The tty layer + * will call hvc_close() after a failed open but we don't want to clean + * up there so we'll clean up here and clear out the previously set + * tty fields and return the kref reference. + */ + if (rc) { + printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); + } else { + /* We are ready... raise DTR/RTS */ + if (C_BAUD(tty)) + if (hp->ops->dtr_rts) + hp->ops->dtr_rts(hp, 1); + tty_port_set_initialized(&hp->port, true); + } + + /* Force wakeup of the polling thread */ + hvc_kick(); + + return rc; +} + +static void hvc_close(struct tty_struct *tty, struct file * filp) +{ + struct hvc_struct *hp = tty->driver_data; + unsigned long flags; + + if (tty_hung_up_p(filp)) + return; + + spin_lock_irqsave(&hp->port.lock, flags); + + if (--hp->port.count == 0) { + spin_unlock_irqrestore(&hp->port.lock, flags); + /* We are done with the tty pointer now. */ + tty_port_tty_set(&hp->port, NULL); + + if (!tty_port_initialized(&hp->port)) + return; + + if (C_HUPCL(tty)) + if (hp->ops->dtr_rts) + hp->ops->dtr_rts(hp, 0); + + if (hp->ops->notifier_del) + hp->ops->notifier_del(hp, hp->data); + + /* cancel pending tty resize work */ + cancel_work_sync(&hp->tty_resize); + + /* + * Chain calls chars_in_buffer() and returns immediately if + * there is no buffered data otherwise sleeps on a wait queue + * waking periodically to check chars_in_buffer(). + */ + tty_wait_until_sent(tty, HVC_CLOSE_WAIT); + tty_port_set_initialized(&hp->port, false); + } else { + if (hp->port.count < 0) + printk(KERN_ERR "hvc_close %X: oops, count is %d\n", + hp->vtermno, hp->port.count); + spin_unlock_irqrestore(&hp->port.lock, flags); + } +} + +static void hvc_cleanup(struct tty_struct *tty) +{ + struct hvc_struct *hp = tty->driver_data; + + tty_port_put(&hp->port); +} + +static void hvc_hangup(struct tty_struct *tty) +{ + struct hvc_struct *hp = tty->driver_data; + unsigned long flags; + + if (!hp) + return; + + /* cancel pending tty resize work */ + cancel_work_sync(&hp->tty_resize); + + spin_lock_irqsave(&hp->port.lock, flags); + + /* + * The N_TTY line discipline has problems such that in a close vs + * open->hangup case this can be called after the final close so prevent + * that from happening for now. + */ + if (hp->port.count <= 0) { + spin_unlock_irqrestore(&hp->port.lock, flags); + return; + } + + hp->port.count = 0; + spin_unlock_irqrestore(&hp->port.lock, flags); + tty_port_tty_set(&hp->port, NULL); + + hp->n_outbuf = 0; + + if (hp->ops->notifier_hangup) + hp->ops->notifier_hangup(hp, hp->data); +} + +/* + * Push buffered characters whether they were just recently buffered or waiting + * on a blocked hypervisor. Call this function with hp->lock held. + */ +static int hvc_push(struct hvc_struct *hp) +{ + int n; + + n = hp->ops->put_chars(hp->vtermno, hp->outbuf, hp->n_outbuf); + if (n <= 0) { + if (n == 0 || n == -EAGAIN) { + hp->do_wakeup = 1; + return 0; + } + /* throw away output on error; this happens when + there is no session connected to the vterm. */ + hp->n_outbuf = 0; + } else + hp->n_outbuf -= n; + if (hp->n_outbuf > 0) + memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf); + else + hp->do_wakeup = 1; + + return n; +} + +static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + struct hvc_struct *hp = tty->driver_data; + unsigned long flags; + int rsize, written = 0; + + /* This write was probably executed during a tty close. */ + if (!hp) + return -EPIPE; + + /* FIXME what's this (unprotected) check for? */ + if (hp->port.count <= 0) + return -EIO; + + while (count > 0) { + int ret = 0; + + spin_lock_irqsave(&hp->lock, flags); + + rsize = hp->outbuf_size - hp->n_outbuf; + + if (rsize) { + if (rsize > count) + rsize = count; + memcpy(hp->outbuf + hp->n_outbuf, buf, rsize); + count -= rsize; + buf += rsize; + hp->n_outbuf += rsize; + written += rsize; + } + + if (hp->n_outbuf > 0) + ret = hvc_push(hp); + + spin_unlock_irqrestore(&hp->lock, flags); + + if (!ret) + break; + + if (count) { + if (hp->n_outbuf > 0) + hvc_flush(hp); + cond_resched(); + } + } + + /* + * Racy, but harmless, kick thread if there is still pending data. + */ + if (hp->n_outbuf) + hvc_kick(); + + return written; +} + +/** + * hvc_set_winsz() - Resize the hvc tty terminal window. + * @work: work structure. + * + * The routine shall not be called within an atomic context because it + * might sleep. + * + * Locking: hp->lock + */ +static void hvc_set_winsz(struct work_struct *work) +{ + struct hvc_struct *hp; + unsigned long hvc_flags; + struct tty_struct *tty; + struct winsize ws; + + hp = container_of(work, struct hvc_struct, tty_resize); + + tty = tty_port_tty_get(&hp->port); + if (!tty) + return; + + spin_lock_irqsave(&hp->lock, hvc_flags); + ws = hp->ws; + spin_unlock_irqrestore(&hp->lock, hvc_flags); + + tty_do_resize(tty, &ws); + tty_kref_put(tty); +} + +/* + * This is actually a contract between the driver and the tty layer outlining + * how much write room the driver can guarantee will be sent OR BUFFERED. This + * driver MUST honor the return value. + */ +static int hvc_write_room(struct tty_struct *tty) +{ + struct hvc_struct *hp = tty->driver_data; + + if (!hp) + return 0; + + return hp->outbuf_size - hp->n_outbuf; +} + +static int hvc_chars_in_buffer(struct tty_struct *tty) +{ + struct hvc_struct *hp = tty->driver_data; + + if (!hp) + return 0; + return hp->n_outbuf; +} + +/* + * timeout will vary between the MIN and MAX values defined here. By default + * and during console activity we will use a default MIN_TIMEOUT of 10. When + * the console is idle, we increase the timeout value on each pass through + * msleep until we reach the max. This may be noticeable as a brief (average + * one second) delay on the console before the console responds to input when + * there has been no input for some time. + */ +#define MIN_TIMEOUT (10) +#define MAX_TIMEOUT (2000) +static u32 timeout = MIN_TIMEOUT; + +/* + * Maximum number of bytes to get from the console driver if hvc_poll is + * called from driver (and can't sleep). Any more than this and we break + * and start polling with khvcd. This value was derived from from an OpenBMC + * console with the OPAL driver that results in about 0.25ms interrupts off + * latency. + */ +#define HVC_ATOMIC_READ_MAX 128 + +#define HVC_POLL_READ 0x00000001 +#define HVC_POLL_WRITE 0x00000002 + +static int __hvc_poll(struct hvc_struct *hp, bool may_sleep) +{ + struct tty_struct *tty; + int i, n, count, poll_mask = 0; + char buf[N_INBUF] __ALIGNED__; + unsigned long flags; + int read_total = 0; + int written_total = 0; + + spin_lock_irqsave(&hp->lock, flags); + + /* Push pending writes */ + if (hp->n_outbuf > 0) + written_total = hvc_push(hp); + + /* Reschedule us if still some write pending */ + if (hp->n_outbuf > 0) { + poll_mask |= HVC_POLL_WRITE; + /* If hvc_push() was not able to write, sleep a few msecs */ + timeout = (written_total) ? 0 : MIN_TIMEOUT; + } + + if (may_sleep) { + spin_unlock_irqrestore(&hp->lock, flags); + cond_resched(); + spin_lock_irqsave(&hp->lock, flags); + } + + /* No tty attached, just skip */ + tty = tty_port_tty_get(&hp->port); + if (tty == NULL) + goto bail; + + /* Now check if we can get data (are we throttled ?) */ + if (tty_throttled(tty)) + goto out; + + /* If we aren't notifier driven and aren't throttled, we always + * request a reschedule + */ + if (!hp->irq_requested) + poll_mask |= HVC_POLL_READ; + + read_again: + /* Read data if any */ + count = tty_buffer_request_room(&hp->port, N_INBUF); + + /* If flip is full, just reschedule a later read */ + if (count == 0) { + poll_mask |= HVC_POLL_READ; + goto out; + } + + n = hp->ops->get_chars(hp->vtermno, buf, count); + if (n <= 0) { + /* Hangup the tty when disconnected from host */ + if (n == -EPIPE) { + spin_unlock_irqrestore(&hp->lock, flags); + tty_hangup(tty); + spin_lock_irqsave(&hp->lock, flags); + } else if ( n == -EAGAIN ) { + /* + * Some back-ends can only ensure a certain min + * num of bytes read, which may be > 'count'. + * Let the tty clear the flip buff to make room. + */ + poll_mask |= HVC_POLL_READ; + } + goto out; + } + + for (i = 0; i < n; ++i) { +#ifdef CONFIG_MAGIC_SYSRQ + if (hp->index == hvc_console.index) { + /* Handle the SysRq Hack */ + /* XXX should support a sequence */ + if (buf[i] == '\x0f') { /* ^O */ + /* if ^O is pressed again, reset + * sysrq_pressed and flip ^O char */ + sysrq_pressed = !sysrq_pressed; + if (sysrq_pressed) + continue; + } else if (sysrq_pressed) { + handle_sysrq(buf[i]); + sysrq_pressed = 0; + continue; + } + } +#endif /* CONFIG_MAGIC_SYSRQ */ + tty_insert_flip_char(&hp->port, buf[i], 0); + } + read_total += n; + + if (may_sleep) { + /* Keep going until the flip is full */ + spin_unlock_irqrestore(&hp->lock, flags); + cond_resched(); + spin_lock_irqsave(&hp->lock, flags); + goto read_again; + } else if (read_total < HVC_ATOMIC_READ_MAX) { + /* Break and defer if it's a large read in atomic */ + goto read_again; + } + + /* + * Latency break, schedule another poll immediately. + */ + poll_mask |= HVC_POLL_READ; + + out: + /* Wakeup write queue if necessary */ + if (hp->do_wakeup) { + hp->do_wakeup = 0; + tty_wakeup(tty); + } + bail: + spin_unlock_irqrestore(&hp->lock, flags); + + if (read_total) { + /* Activity is occurring, so reset the polling backoff value to + a minimum for performance. */ + timeout = MIN_TIMEOUT; + + tty_flip_buffer_push(&hp->port); + } + tty_kref_put(tty); + + return poll_mask; +} + +int hvc_poll(struct hvc_struct *hp) +{ + return __hvc_poll(hp, false); +} +EXPORT_SYMBOL_GPL(hvc_poll); + +/** + * __hvc_resize() - Update terminal window size information. + * @hp: HVC console pointer + * @ws: Terminal window size structure + * + * Stores the specified window size information in the hvc structure of @hp. + * The function schedule the tty resize update. + * + * Locking: Locking free; the function MUST be called holding hp->lock + */ +void __hvc_resize(struct hvc_struct *hp, struct winsize ws) +{ + hp->ws = ws; + schedule_work(&hp->tty_resize); +} +EXPORT_SYMBOL_GPL(__hvc_resize); + +/* + * This kthread is either polling or interrupt driven. This is determined by + * calling hvc_poll() who determines whether a console adapter support + * interrupts. + */ +static int khvcd(void *unused) +{ + int poll_mask; + struct hvc_struct *hp; + + set_freezable(); + do { + poll_mask = 0; + hvc_kicked = 0; + try_to_freeze(); + wmb(); + if (!cpus_are_in_xmon()) { + mutex_lock(&hvc_structs_mutex); + list_for_each_entry(hp, &hvc_structs, next) { + poll_mask |= __hvc_poll(hp, true); + cond_resched(); + } + mutex_unlock(&hvc_structs_mutex); + } else + poll_mask |= HVC_POLL_READ; + if (hvc_kicked) + continue; + set_current_state(TASK_INTERRUPTIBLE); + if (!hvc_kicked) { + if (poll_mask == 0) + schedule(); + else { + unsigned long j_timeout; + + if (timeout < MAX_TIMEOUT) + timeout += (timeout >> 6) + 1; + + /* + * We don't use msleep_interruptible otherwise + * "kick" will fail to wake us up + */ + j_timeout = msecs_to_jiffies(timeout) + 1; + schedule_timeout_interruptible(j_timeout); + } + } + __set_current_state(TASK_RUNNING); + } while (!kthread_should_stop()); + + return 0; +} + +static int hvc_tiocmget(struct tty_struct *tty) +{ + struct hvc_struct *hp = tty->driver_data; + + if (!hp || !hp->ops->tiocmget) + return -EINVAL; + return hp->ops->tiocmget(hp); +} + +static int hvc_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + struct hvc_struct *hp = tty->driver_data; + + if (!hp || !hp->ops->tiocmset) + return -EINVAL; + return hp->ops->tiocmset(hp, set, clear); +} + +#ifdef CONFIG_CONSOLE_POLL +static int hvc_poll_init(struct tty_driver *driver, int line, char *options) +{ + return 0; +} + +static int hvc_poll_get_char(struct tty_driver *driver, int line) +{ + struct tty_struct *tty = driver->ttys[0]; + struct hvc_struct *hp = tty->driver_data; + int n; + char ch; + + n = hp->ops->get_chars(hp->vtermno, &ch, 1); + + if (n <= 0) + return NO_POLL_CHAR; + + return ch; +} + +static void hvc_poll_put_char(struct tty_driver *driver, int line, char ch) +{ + struct tty_struct *tty = driver->ttys[0]; + struct hvc_struct *hp = tty->driver_data; + int n; + + do { + n = hp->ops->put_chars(hp->vtermno, &ch, 1); + } while (n <= 0); +} +#endif + +static const struct tty_operations hvc_ops = { + .install = hvc_install, + .open = hvc_open, + .close = hvc_close, + .cleanup = hvc_cleanup, + .write = hvc_write, + .hangup = hvc_hangup, + .unthrottle = hvc_unthrottle, + .write_room = hvc_write_room, + .chars_in_buffer = hvc_chars_in_buffer, + .tiocmget = hvc_tiocmget, + .tiocmset = hvc_tiocmset, +#ifdef CONFIG_CONSOLE_POLL + .poll_init = hvc_poll_init, + .poll_get_char = hvc_poll_get_char, + .poll_put_char = hvc_poll_put_char, +#endif +}; + +static const struct tty_port_operations hvc_port_ops = { + .destruct = hvc_port_destruct, +}; + +struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, + const struct hv_ops *ops, + int outbuf_size) +{ + struct hvc_struct *hp; + int i; + + /* We wait until a driver actually comes along */ + if (atomic_inc_not_zero(&hvc_needs_init)) { + int err = hvc_init(); + if (err) + return ERR_PTR(err); + } + + hp = kzalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size, + GFP_KERNEL); + if (!hp) + return ERR_PTR(-ENOMEM); + + hp->vtermno = vtermno; + hp->data = data; + hp->ops = ops; + hp->outbuf_size = outbuf_size; + hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))]; + + tty_port_init(&hp->port); + hp->port.ops = &hvc_port_ops; + + INIT_WORK(&hp->tty_resize, hvc_set_winsz); + spin_lock_init(&hp->lock); + mutex_lock(&hvc_structs_mutex); + + /* + * find index to use: + * see if this vterm id matches one registered for console. + */ + for (i=0; i < MAX_NR_HVC_CONSOLES; i++) + if (vtermnos[i] == hp->vtermno && + cons_ops[i] == hp->ops) + break; + + if (i >= MAX_NR_HVC_CONSOLES) { + + /* find 'empty' slot for console */ + for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) { + } + + /* no matching slot, just use a counter */ + if (i == MAX_NR_HVC_CONSOLES) + i = ++last_hvc + MAX_NR_HVC_CONSOLES; + } + + hp->index = i; + if (i < MAX_NR_HVC_CONSOLES) { + cons_ops[i] = ops; + vtermnos[i] = vtermno; + } + + list_add_tail(&(hp->next), &hvc_structs); + mutex_unlock(&hvc_structs_mutex); + + /* check if we need to re-register the kernel console */ + hvc_check_console(i); + + return hp; +} +EXPORT_SYMBOL_GPL(hvc_alloc); + +int hvc_remove(struct hvc_struct *hp) +{ + unsigned long flags; + struct tty_struct *tty; + + tty = tty_port_tty_get(&hp->port); + + console_lock(); + spin_lock_irqsave(&hp->lock, flags); + if (hp->index < MAX_NR_HVC_CONSOLES) { + vtermnos[hp->index] = -1; + cons_ops[hp->index] = NULL; + } + + /* Don't whack hp->irq because tty_hangup() will need to free the irq. */ + + spin_unlock_irqrestore(&hp->lock, flags); + console_unlock(); + + /* + * We 'put' the instance that was grabbed when the kref instance + * was initialized using kref_init(). Let the last holder of this + * kref cause it to be removed, which will probably be the tty_vhangup + * below. + */ + tty_port_put(&hp->port); + + /* + * This function call will auto chain call hvc_hangup. + */ + if (tty) { + tty_vhangup(tty); + tty_kref_put(tty); + } + return 0; +} +EXPORT_SYMBOL_GPL(hvc_remove); + +/* Driver initialization: called as soon as someone uses hvc_alloc(). */ +static int hvc_init(void) +{ + struct tty_driver *drv; + int err; + + /* We need more than hvc_count adapters due to hotplug additions. */ + drv = alloc_tty_driver(HVC_ALLOC_TTY_ADAPTERS); + if (!drv) { + err = -ENOMEM; + goto out; + } + + drv->driver_name = "hvc"; + drv->name = "hvc"; + drv->major = HVC_MAJOR; + drv->minor_start = HVC_MINOR; + drv->type = TTY_DRIVER_TYPE_SYSTEM; + drv->init_termios = tty_std_termios; + drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS; + tty_set_operations(drv, &hvc_ops); + + /* Always start the kthread because there can be hotplug vty adapters + * added later. */ + hvc_task = kthread_run(khvcd, NULL, "khvcd"); + if (IS_ERR(hvc_task)) { + printk(KERN_ERR "Couldn't create kthread for console.\n"); + err = PTR_ERR(hvc_task); + goto put_tty; + } + + err = tty_register_driver(drv); + if (err) { + printk(KERN_ERR "Couldn't register hvc console driver\n"); + goto stop_thread; + } + + /* + * Make sure tty is fully registered before allowing it to be + * found by hvc_console_device. + */ + smp_mb(); + hvc_driver = drv; + return 0; + +stop_thread: + kthread_stop(hvc_task); + hvc_task = NULL; +put_tty: + put_tty_driver(drv); +out: + return err; +} + diff --git a/trustzone-awared-vm/VM/virtio/tty/hvc/hvc_console.h b/trustzone-awared-vm/VM/virtio/tty/hvc/hvc_console.h new file mode 100644 index 0000000000000000000000000000000000000000..c8fbbb60a371a84be102e86e2e11d136810d4e5a --- /dev/null +++ b/trustzone-awared-vm/VM/virtio/tty/hvc/hvc_console.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * hvc_console.h + * Copyright (C) 2005 IBM Corporation + * + * Author(s): + * Ryan S. Arnold + * + * hvc_console header information: + * moved here from arch/powerpc/include/asm/hvconsole.h + * and drivers/char/hvc_console.c + */ + +#ifndef HVC_CONSOLE_H +#define HVC_CONSOLE_H +#include +#include +#include + +/* + * This is the max number of console adapters that can/will be found as + * console devices on first stage console init. Any number beyond this range + * can't be used as a console device but is still a valid tty device. + */ +#define MAX_NR_HVC_CONSOLES 16 + +/* + * The Linux TTY code does not support dynamic addition of tty derived devices + * so we need to know how many tty devices we might need when space is allocated + * for the tty device. Since this driver supports hotplug of vty adapters we + * need to make sure we have enough allocated. + */ +#define HVC_ALLOC_TTY_ADAPTERS 8 + +struct hvc_struct { + struct tty_port port; + spinlock_t lock; + int index; + int do_wakeup; + char *outbuf; + int outbuf_size; + int n_outbuf; + uint32_t vtermno; + const struct hv_ops *ops; + int irq_requested; + int data; + struct winsize ws; + struct work_struct tty_resize; + struct list_head next; + unsigned long flags; +}; + +/* implemented by a low level driver */ +struct hv_ops { + int (*get_chars)(uint32_t vtermno, char *buf, int count); + int (*put_chars)(uint32_t vtermno, const char *buf, int count); + int (*flush)(uint32_t vtermno, bool wait); + + /* Callbacks for notification. Called in open, close and hangup */ + int (*notifier_add)(struct hvc_struct *hp, int irq); + void (*notifier_del)(struct hvc_struct *hp, int irq); + void (*notifier_hangup)(struct hvc_struct *hp, int irq); + + /* tiocmget/set implementation */ + int (*tiocmget)(struct hvc_struct *hp); + int (*tiocmset)(struct hvc_struct *hp, unsigned int set, unsigned int clear); + + /* Callbacks to handle tty ports */ + void (*dtr_rts)(struct hvc_struct *hp, int raise); +}; + +/* Register a vterm and a slot index for use as a console (console_init) */ +extern int hvc_instantiate(uint32_t vtermno, int index, + const struct hv_ops *ops); + +/* register a vterm for hvc tty operation (module_init or hotplug add) */ +extern struct hvc_struct * hvc_alloc(uint32_t vtermno, int data, + const struct hv_ops *ops, int outbuf_size); +/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ +extern int hvc_remove(struct hvc_struct *hp); + +/* data available */ +int hvc_poll(struct hvc_struct *hp); +void hvc_kick(void); + +/* Resize hvc tty terminal window */ +extern void __hvc_resize(struct hvc_struct *hp, struct winsize ws); + +static inline void hvc_resize(struct hvc_struct *hp, struct winsize ws) +{ + unsigned long flags; + + spin_lock_irqsave(&hp->lock, flags); + __hvc_resize(hp, ws); + spin_unlock_irqrestore(&hp->lock, flags); +} + +/* default notifier for irq based notification */ +extern int notifier_add_irq(struct hvc_struct *hp, int data); +extern void notifier_del_irq(struct hvc_struct *hp, int data); +extern void notifier_hangup_irq(struct hvc_struct *hp, int data); + + +#if defined(CONFIG_XMON) && defined(CONFIG_SMP) +#include +#else +static inline int cpus_are_in_xmon(void) +{ + return 0; +} +#endif + +#endif // HVC_CONSOLE_H + diff --git a/trustzone-awared-vm/VM/vtzdriver/Makefile b/trustzone-awared-vm/VM/vtzdriver/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..13ec601098ece0532f1087a232b6da89a9e6ac36 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/Makefile @@ -0,0 +1,33 @@ +#Makefile +obj-m := vtzfdriver.o + +vtzfdriver-objs := vtzf.o + +RESULT := $(shell cat /proc/kallsyms | grep vsnprintf_s) + +STATUS := $(findstring vsnprintf_s, $(RESULT)) + +# ifneq ($(STATUS), vsnprintf_s) +vtzfdriver-objs += libboundscheck/src/memcpy_s.o libboundscheck/src/memset_s.o libboundscheck/src/strcpy_s.o libboundscheck/src/strncpy_s.o \ +libboundscheck/src/memmove_s.o libboundscheck/src/strcat_s.o libboundscheck/src/strncat_s.o libboundscheck/src/strtok_s.o \ +libboundscheck/src/securecutil.o libboundscheck/src/secureprintoutput_a.o libboundscheck/src/snprintf_s.o libboundscheck/src/vsnprintf_s.o +vtzfdriver-objs += tlogger.o serialport.o tee_info.o reserved_shm.o process_data.o block_pages.o + +# endif + +KERN_VER = $(shell uname -r) +KERN_DIR = /lib/modules/$(KERN_VER)/build + +EXTRA_CFLAGS += -fstack-protector-strong -DCONFIG_AUTH_ENHANCE +EXTRA_CFLAGS += -I$(PWD)/libboundscheck/include/ +EXTRA_CFLAGS += -I$(PWD)/inc/ + +all: + make -C $(KERN_DIR) M=`pwd` modules + + +.PHONY: clean +clean: + # make -C $(KERN_DIR) M=`pwd` modules clean + -rm -vrf *.o *.ko + -rm -vrf *.order *.symvers *.mod.c *.mod.o .tmp_versions .*o.cmd .*.o.d *.mod \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/block_pages.c b/trustzone-awared-vm/VM/vtzdriver/block_pages.c new file mode 100644 index 0000000000000000000000000000000000000000..8aa62f57920033a14f4c9c8bf46c6a668e7687b0 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/block_pages.c @@ -0,0 +1,304 @@ +#include "block_pages.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tc_ns_client.h" +#include "tc_ns_log.h" +#include "teek_client_constants.h" + +static int get_block_count(uint64_t *phys_addr, uint32_t pages_no) +{ + uint32_t i=0; + int block_count = 1; + uint64_t addr1; + uint64_t addr2; + if (pages_no == 0) + return 0; + addr1 = phys_addr[0]; + for (i = 1; i < pages_no; i++) { + addr2 = phys_addr[i]; + if (addr2 != addr1 + PAGE_SIZE) { + block_count++; + } + addr1 = phys_addr[i]; + } + return block_count; +} + +static int do_convert_page_blocks(uint64_t *phys_addr, + uint32_t pages_no, uint64_t block_addr) +{ + struct page_block *block_buf = NULL; + uint32_t i=0; + uint32_t last_pos=0; + int block_count = 1; + uint64_t addr1; + uint64_t addr2; + if (pages_no == 0) + return 0; + block_buf = (struct page_block *)(uintptr_t)block_addr; + if (pages_no == 1) { + block_buf[0].page_num = 1; + block_buf[0].phy_addr = phys_addr[0]; + return 1; + } + + addr1 = phys_addr[0]; + for (i = 1; i < pages_no; i++) { + addr2 = phys_addr[i]; + if (addr2 != addr1 + PAGE_SIZE) { + block_buf[block_count-1].page_num = i - last_pos; + block_buf[block_count-1].phy_addr = phys_addr[last_pos]; + block_count++; + last_pos = i; + } + addr1 = addr2; + } + + block_buf[block_count-1].page_num = i - last_pos; + block_buf[block_count-1].phy_addr = phys_addr[last_pos]; + + return block_count; +} + +void dump_page_blocks(int block_num, uint64_t block_addr) +{ + struct page_block *block_buf = (struct page_block *)(uintptr_t)block_addr; + int i = 0; + for (i=0;ipage_num) { + tloge("bad size, cannot release page\n"); + return; + } + + for (i = 0; i < page_info->page_num; i++) { + page = (struct page *)(uintptr_t)phys_to_page(phys_addr[i]); + if (page == NULL) + continue; + set_bit(PG_dirty, &page->flags); + put_page(page); + } + kfree((void *)buf); +} + +int fill_shared_mem_info(uint64_t start_vaddr, uint32_t pages_no, + uint32_t offset, uint32_t buffer_size, uint64_t info_addr, + void **block_bufp, uint32_t *block_buf_sizep, int *block_countp) +{ + struct pagelist_info *page_info = NULL; + struct page **pages = NULL; + uint64_t *phys_addr = NULL; + uint32_t page_num; + uint32_t i; + int block_count = 0; + void *block_buf = NULL; + uint32_t block_buf_size = 0; + if (pages_no == 0) + return -EFAULT; + + pages = (struct page **)vmalloc(pages_no * sizeof(uint64_t)); + if (pages == NULL) + return -EFAULT; + + down_read(&mm_sem_lock(current->mm)); + page_num = get_user_pages((uintptr_t)start_vaddr, pages_no, FOLL_WRITE, pages, NULL); + up_read(&mm_sem_lock(current->mm)); + if (page_num != pages_no) { + tloge("get page phy addr failed\n"); + if (page_num > 0) + release_pages(pages, page_num); + vfree(pages); + return -EFAULT; + } + + page_info = (struct pagelist_info *)(uintptr_t)info_addr; + page_info->page_num = pages_no; + page_info->page_size = PAGE_SIZE; + page_info->sharedmem_offset = offset; + page_info->sharedmem_size = buffer_size; + + phys_addr = (uint64_t *)(uintptr_t)info_addr + (sizeof(*page_info) / sizeof(uint64_t)); + for (i = 0; i < pages_no; i++) { + struct page *page = pages[i]; + if (page == NULL) { + release_pages(pages, page_num); + vfree(pages); + return -EFAULT; + } + phys_addr[i] = (uintptr_t)page_to_phys(page); + } + block_count = get_block_count(phys_addr, pages_no); + tlogd("page_block count =%d\n",block_count); + + if (convert_page_blocks(phys_addr, pages_no, &block_buf, &block_buf_size) != 0) { + tloge("convert_page_blocks failed\n"); + release_pages(pages, page_num); + vfree(pages); + return -EFAULT; + } + *block_bufp = block_buf; + *block_buf_sizep = block_buf_size; + *block_countp = block_count; + + vfree(pages); + return 0; +} + +int get_page_block(void *user_buffer, uint32_t buf_size, + void **block_bufp, uint32_t *block_buf_sizep, int *block_countp, + void **pages_bufp, uint32_t *pages_buf_sizep) +{ + void *buff = NULL; + void *start_vaddr = NULL; + uint32_t buffer_size; + uint32_t pages_no; + uint32_t offset; + uint32_t buff_len; + uint64_t buffer_addr; + void *block_buf = NULL; + uint32_t block_buf_size = 0; + int block_count = 0; + + buffer_addr = (uint64_t)user_buffer; + buff = (void *)(uint64_t)(buffer_addr); + buffer_size = buf_size; + start_vaddr = (void *)(((uint64_t)buff) & PAGE_MASK); + offset = ((uint32_t)(uintptr_t)buff) & (~PAGE_MASK); + pages_no = PAGE_ALIGN(offset + buffer_size) / PAGE_SIZE; + buff_len = sizeof(struct pagelist_info) + (sizeof(uint64_t) * pages_no); + + tlogd("buffer_addr = %llx\n", buffer_addr); + tlogd("start_vaddr = %llx\n", (uint64_t)start_vaddr); + tlogd("offset = %x\n", offset); + tlogd("pages_no = %u\n", pages_no); + + buff = kzalloc(buff_len, GFP_KERNEL); + if (buff == NULL) { + tloge("kzalloc failed \n"); + return -EFAULT; + } + + if (fill_shared_mem_info((uint64_t)start_vaddr, pages_no, offset, buffer_size, + (uint64_t)buff, &block_buf, &block_buf_size, &block_count)) { + kfree(buff); + return -EFAULT; + } + *block_bufp = block_buf; + *block_buf_sizep = block_buf_size; + *block_countp = block_count; + *pages_bufp = buff; + *pages_buf_sizep = buff_len; + return 0; +} + +int test_fuc(const struct file *file, unsigned int cmd, + unsigned long arg) +{ + void *buff = NULL; + void *start_vaddr = NULL; + struct test test; + void *argp = (void __user *)(uintptr_t)arg; + uint32_t buffer_size = 0; + uint32_t pages_no = 0; + uint32_t offset = 0; + uint32_t buff_len = 0; + uint64_t buffer_addr = 0; + void *block_buf = NULL; + uint32_t block_buf_size = 0; + int block_count = 0; + tlogi("enter test func\n"); + if (!argp) { + tloge("invalid params\n"); + return -EINVAL; + } + if (copy_from_user(&test, argp, sizeof(test)) != 0) { + tloge("copy from user failed\n"); + return -EFAULT; + } + + buffer_addr = (uint64_t)test.user_buf; + buff = (void *)(uint64_t)(buffer_addr + test.offset); + buffer_size = test.buf_size; + start_vaddr = (void *)(((uint64_t)buff) & PAGE_MASK); + offset = ((uint32_t)(uintptr_t)buff) & (~PAGE_MASK); + pages_no = PAGE_ALIGN(offset + buffer_size) / PAGE_SIZE; + buff_len = sizeof(struct pagelist_info) + (sizeof(uint64_t) * pages_no); + + tlogi("buffer_addr = %llx\n", buffer_addr); + tlogi("start_vaddr = %p\n", start_vaddr); + tlogi("offset = %x\n", offset); + tlogi("pages_no = %u\n", pages_no); + + buff = kzalloc(buff_len, GFP_KERNEL); + if (buff == NULL) { + tloge("kzalloc failed \n"); + return -EFAULT; + } + + if (fill_shared_mem_info((uint64_t)start_vaddr, pages_no, offset, buffer_size, + (uint64_t)buff, &block_buf, &block_buf_size, &block_count)) { + kfree(buff); + return -EFAULT; + } + dump_page_blocks(block_count, (uint64_t)block_buf); + release_shared_mem_page((uint64_t)buff, buff_len); + return 0; +} \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/block_pages.h b/trustzone-awared-vm/VM/vtzdriver/block_pages.h new file mode 100644 index 0000000000000000000000000000000000000000..7fd6239c33cb7d552d5a594db43d839aed3c04a5 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/block_pages.h @@ -0,0 +1,54 @@ +#ifndef BLOCK_PAGES__H +#define BLOCK_PAGES__H +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tc_ns_client.h" +#include "tc_ns_log.h" +#include "teek_client_constants.h" + +struct test { + void * user_buf; + int buf_size; + uint32_t offset; +}; + +struct pagelist_info { + uint64_t page_num; + uint64_t page_size; + uint64_t sharedmem_offset; + uint64_t sharedmem_size; +}; + +struct page_block +{ + uint64_t phy_addr; + uint32_t page_num; + uint32_t frag_flag; +}; + +void dump_page_blocks(int block_num, uint64_t block_addr); +void release_shared_mem_page(uint64_t buf, uint32_t buf_size); +int get_page_block(void *user_buffer, uint32_t buf_size, + void **block_bufp, uint32_t *block_buf_sizep, int *block_countp, + void **pages_bufp, uint32_t *pages_buf_sizep); +int test_fuc(const struct file *file, unsigned int cmd, + unsigned long arg); + +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/comm_structs.h b/trustzone-awared-vm/VM/vtzdriver/comm_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..7622b9486b1cec5214f5c38a94ca3d8ffa094f41 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/comm_structs.h @@ -0,0 +1,312 @@ +#ifndef COMM_STRUCTS_H +#define COMM_STRUCTS_H + +#include +#include "tc_ns_client.h" +#include "teek_ns_client.h" + +#define CERT_BUF_MAX_SIZE 2048 + +#define TC_NS_CLIENT_DEV_FLAG 3 +#define TC_PRIVATE_DEV_FLAG 4 +#define TC_CVM_DEV_FLAG 5 +#define TLOG_DEV_FLAG 6 +#define TLOG_DEV_THD_FLAG 7 + +#define VTZF_OPEN_TZD 15 +#define VTZF_CLOSE_TZD 17 +#define VTZF_LOG_IN_NHIDL 19 +#define VTZF_GET_TEE_VERSION 21 +#define VTZF_GET_TEE_INFO 23 +#define VTZF_LATE_INIT 25 +#define VTZF_SYNC_TIME 27 +#define VTZF_LOG_IN 29 +#define VTZF_OPEN_SESSION 31 +#define VTZF_SEND_CMD 33 +#define VTZF_CANCEL_CMD 35 +#define VTZF_MMAP 37 +#define VTZF_MUNMAP 39 +#define VTZF_CLOSE_SESSION 41 +#define VTZF_CLOSE_PTZDEV 43 +#define VTZ_REGISTER_AGENT 45 +#define VTZ_UNREGISTER_AGENT 47 +#define VTZF_WAIT_EVENT 49 +#define VTZF_SEND_EVENT_RESPONSE 51 +#define VTZF_LOAD_SEC 53 + +#define VTZF_GET_TEEOS_VER 55 +#define VTZF_SET_READER_CUR 57 +#define VTZF_SET_TLOGCAT_STAT 59 +#define VTZF_GET_TLOGCAT_STAT 61 +#define VTZF_GET_LOG 63 + +#define VTZF_TEST 65 +#define VTZ_NOTHING 67 + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; +} struct_packet_cmd_general; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_general; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; +} struct_packet_cmd_test; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_test; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + uint32_t vmid; + uint32_t flag; +} struct_packet_cmd_open_tzd; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + int32_t ptzfd; + int32_t vmid; +} struct_packet_rsp_open_tzd; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_close_tzd; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_close_tzd; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_getteever; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + uint32_t tee_ver; +} struct_packet_rsp_getteever; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + bool istlog; +} struct_packet_cmd_getteeinfo; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + struct tc_ns_tee_info info; +} struct_packet_rsp_getteeinfo; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + void *phyaddr; + struct agent_ioctl_args args; +} struct_packet_cmd_regagent; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + struct agent_ioctl_args args; +} struct_packet_rsp_regagent; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_unregagent; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_unregagent; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + uint32_t agent_id; +} struct_packet_cmd_event; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + uint32_t index; +} struct_packet_cmd_lateinit; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_lateinit; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + struct tc_ns_client_time tcNsTime; +} struct_packet_cmd_synctime; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_synctime; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + uint8_t cert_buffer[]; +} struct_packet_cmd_login; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_login_non; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_login; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + __s32 cpu_index; + struct load_secfile_ioctl_struct ioctlArg; +} struct_packet_cmd_load_sec; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + struct load_secfile_ioctl_struct ioctlArg; +} struct_packet_rsp_load_sec; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + __s32 cpu_index; + struct tc_ns_client_context cliContext; +} struct_packet_cmd_session; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + struct tc_ns_client_context cliContext; +} struct_packet_rsp_session; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + int32_t err_flag; + int32_t is_fragment; + uint32_t fragment_block_num; + uint32_t vm_page_size; + uint64_t block_addrs[TEE_PARAM_NUM]; + uint32_t block_size[TEE_PARAM_NUM]; + unsigned long long addrs[TEE_PARAM_NUM]; //used by ref mem mmap + struct tc_ns_client_context cliContext; +} struct_packet_cmd_send_cmd; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + struct tc_ns_client_context cliContext; +} struct_packet_rsp_send_cmd; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + __s32 cpu_index; + struct tc_ns_client_context cliContext; + pid_t pid; +} struct_packet_cmd_cancel_cmd; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + struct tc_ns_client_context cliContext; +} struct_packet_rsp_cancel_cmd; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; + uint64_t buffer; + uint32_t size; + uint32_t offset; +} struct_packet_cmd_mmap; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_mmap; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; +} struct_packet_cmd_nothing; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_nothing; + +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/inc/tc_ns_client.h b/trustzone-awared-vm/VM/vtzdriver/inc/tc_ns_client.h new file mode 100644 index 0000000000000000000000000000000000000000..215ece0a3bff1c4744acfb0a13a96867535e77d7 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/inc/tc_ns_client.h @@ -0,0 +1,216 @@ +/* + * tc_ns_client.h + * + * data structure declaration for nonsecure world + * + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TC_NS_CLIENT_H +#define TC_NS_CLIENT_H + +#include +#include + +#define UUID_LEN 16 +#define PARAM_NUM 4 +#define ADDR_TRANS_NUM 32 + +#define teec_param_types(param0_type, param1_type, param2_type, param3_type) \ + ((param3_type) << 12 | (param2_type) << 8 | \ + (param1_type) << 4 | (param0_type)) + +#define teec_param_type_get(param_types, index) \ + (((param_types) >> ((index) << 2)) & 0x0F) + +#ifndef ZERO_SIZE_PTR +#define ZERO_SIZE_PTR ((void *)16) +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= (unsigned long)ZERO_SIZE_PTR) +#endif + +#if (KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE) +#define mm_sem_lock(mm) (mm)->mmap_lock +#else +#define mm_sem_lock(mm) (mm)->mmap_sem +#endif + +struct tc_ns_client_login { + __u32 method; + __u32 mdata; +}; + +union tc_ns_client_param { + struct { + __u32 buffer; + __u32 buffer_h_addr; + __u32 offset; + __u32 h_offset; + __u32 size_addr; + __u32 size_h_addr; + } memref; + struct { + __u32 a_addr; + __u32 a_h_addr; + __u32 b_addr; + __u32 b_h_addr; + } value; +}; + +struct tc_ns_client_return { + int code; + __u32 origin; +}; + +struct tc_ns_client_context { + unsigned char uuid[UUID_LEN]; + __u32 session_id; + __u32 cmd_id; + struct tc_ns_client_return returns; + struct tc_ns_client_login login; + union tc_ns_client_param params[PARAM_NUM]; + __u32 param_types; + __u8 started; + __u32 calling_pid; + unsigned int file_size; + union { + char *file_buffer; + struct { + uint32_t file_addr; + uint32_t file_h_addr; + } memref; + }; +}; + +struct tc_ns_client_time { + uint32_t seconds; + uint32_t millis; +}; + +struct tc_ns_tee_info { + uint16_t tzdriver_version_major; + uint16_t tzdriver_version_minor; + uint32_t reserved[15]; +}; + +enum secfile_type_t { + LOAD_TA = 0, + LOAD_SERVICE, + LOAD_LIB, + LOAD_DYNAMIC_DRV, + LOAD_PATCH, + LOAD_TYPE_MAX, +}; + +struct sec_file_info { + enum secfile_type_t secfile_type; + uint32_t file_size; + int32_t sec_load_err; +}; + +struct load_secfile_ioctl_struct { + struct sec_file_info sec_file_info; + unsigned char uuid[UUID_LEN]; + union { + char *file_buffer; + struct { + uint32_t file_addr; + uint32_t file_h_addr; + } memref; + }; +}__attribute__((packed)); + +struct agent_ioctl_args { + uint32_t id; + uint32_t buffer_size; + union { + void *buffer; + unsigned long long addr; + }; +}; + +struct tc_ns_client_crl { + union { + uint8_t *buffer; + struct { + uint32_t buffer_addr; + uint32_t buffer_h_addr; + } memref; + }; + uint32_t size; +}; + +#ifdef CONFIG_LOG_POOL_ENABLE +struct tc_ns_log_pool { + uint64_t addr; + uint64_t size; +}; +#endif + +#define MAX_SHA_256_SZ 32 + +#define TC_NS_CLIENT_IOCTL_SES_OPEN_REQ \ + _IOW(TC_NS_CLIENT_IOC_MAGIC, 1, struct tc_ns_client_context) +#define TC_NS_CLIENT_IOCTL_SES_CLOSE_REQ \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 2, struct tc_ns_client_context) +#define TC_NS_CLIENT_IOCTL_SEND_CMD_REQ \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 3, struct tc_ns_client_context) +#define TC_NS_CLIENT_IOCTL_SHRD_MEM_RELEASE \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 4, unsigned int) +#define TC_NS_CLIENT_IOCTL_WAIT_EVENT \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 5, unsigned int) +#define TC_NS_CLIENT_IOCTL_SEND_EVENT_RESPONSE \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 6, unsigned int) +#define TC_NS_CLIENT_IOCTL_REGISTER_AGENT \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 7, struct agent_ioctl_args) +#define TC_NS_CLIENT_IOCTL_UNREGISTER_AGENT \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 8, unsigned int) +#define TC_NS_CLIENT_IOCTL_LOAD_APP_REQ \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 9, struct load_secfile_ioctl_struct) +#define TC_NS_CLIENT_IOCTL_NEED_LOAD_APP \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 10, struct tc_ns_client_context) +#define TC_NS_CLIENT_IOCTL_ALLOC_EXCEPTING_MEM \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 12, unsigned int) +#define TC_NS_CLIENT_IOCTL_CANCEL_CMD_REQ \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 13, struct tc_ns_client_context) +#define TC_NS_CLIENT_IOCTL_LOGIN \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 14, int) +#define TC_NS_CLIENT_IOCTL_TUI_EVENT \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 16, int) +#define TC_NS_CLIENT_IOCTL_SYC_SYS_TIME \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 17, struct tc_ns_client_time) +#define TC_NS_CLIENT_IOCTL_SET_NATIVECA_IDENTITY \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 18, int) +#define TC_NS_CLIENT_IOCTL_LOAD_TTF_FILE_AND_NOTCH_HEIGHT \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 19, unsigned int) +#define TC_NS_CLIENT_IOCTL_LATEINIT \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 20, unsigned int) +#define TC_NS_CLIENT_IOCTL_GET_TEE_VERSION \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 21, unsigned int) +#define TC_NS_CLIENT_IOCTL_UPDATE_TA_CRL\ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 22, struct tc_ns_client_crl) +#ifdef CONFIG_LOG_POOL_ENABLE +#define TC_NS_CLIENT_IOCTL_GET_LOG_POOL \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 23, struct tc_ns_log_pool) +#endif +#ifdef CONFIG_TEE_TELEPORT_SUPPORT +#define TC_NS_CLIENT_IOCTL_PORTAL_REGISTER \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 24, struct agent_ioctl_args) +#define TC_NS_CLIENT_IOCTL_PORTAL_WORK \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 25, struct agent_ioctl_args) +#endif +#define TC_NS_CLIENT_IOCTL_GET_TEE_INFO \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 26, struct tc_ns_tee_info) + +#define TC_NS_CLIENT_IOCTL_CHECK_CCOS \ + _IOWR(TC_NS_CLIENT_IOC_MAGIC, 32, unsigned int) +#endif + diff --git a/trustzone-awared-vm/VM/vtzdriver/inc/tc_ns_log.h b/trustzone-awared-vm/VM/vtzdriver/inc/tc_ns_log.h new file mode 100644 index 0000000000000000000000000000000000000000..b159858d9d6e0a344fd709297aea1be48a7eb9ae --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/inc/tc_ns_log.h @@ -0,0 +1,69 @@ +/* + * tc_ns_log.h + * + * log func declaration + * + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TC_NS_LOG_H +#define TC_NS_LOG_H + +#include +#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE) +#include +#endif +#include +enum { + TZ_DEBUG_VERBOSE = 0, + TZ_DEBUG_DEBUG, + TZ_DEBUG_INFO, + TZ_DEBUG_WARN, + TZ_DEBUG_ERROR, +}; +#define MOD_TEE "tzdriver" + +#define TEE_LOG_MASK 2 + +#define tlogv(fmt, args...) \ +do { \ + if (TZ_DEBUG_VERBOSE >= TEE_LOG_MASK) \ + pr_info("[%s] (%i, %s)%s: " fmt, MOD_TEE, current->pid, current->comm, __func__, ## args); \ +} while (0) + + +#define tlogd(fmt, args...) \ +do { \ + if (TZ_DEBUG_DEBUG >= TEE_LOG_MASK) \ + pr_info("[%s] (%i, %s)%s: " fmt, MOD_TEE, current->pid, current->comm, __func__, ## args); \ +} while (0) + + +#define tlogi(fmt, args...) \ +do { \ + if (TZ_DEBUG_INFO >= TEE_LOG_MASK) \ + pr_info("[%s] (%i, %s)%s: " fmt, MOD_TEE, current->pid, current->comm, __func__, ## args); \ +} while (0) + + +#define tlogw(fmt, args...) \ +do { \ + if (TZ_DEBUG_WARN >= TEE_LOG_MASK) \ + pr_warn("[%s] (%i, %s)%s: " fmt, MOD_TEE, current->pid, current->comm, __func__, ## args); \ +} while (0) + + +#define tloge(fmt, args...) \ + pr_err("[%s] (%i, %s)%s: " fmt, MOD_TEE, current->pid, current->comm, __func__, ## args) + +#endif + diff --git a/trustzone-awared-vm/VM/vtzdriver/inc/teek_client_constants.h b/trustzone-awared-vm/VM/vtzdriver/inc/teek_client_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..6b0b32ac0854a06bc1825538d5f4710ccd4e8ce7 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/inc/teek_client_constants.h @@ -0,0 +1,211 @@ +/* + * teek_client_constants.h + * + * macro declaration for libteec interface for kernel CA. + * + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef TEEK_CLIENT_CONSTANTS_H +#define TEEK_CLIENT_CONSTANTS_H + +enum global_service_cmd_id { + GLOBAL_CMD_ID_INVALID = 0x0, + GLOBAL_CMD_ID_BOOT_ACK = 0x1, + GLOBAL_CMD_ID_OPEN_SESSION = 0x2, + GLOBAL_CMD_ID_CLOSE_SESSION = 0x3, + GLOBAL_CMD_ID_LOAD_SECURE_APP = 0x4, + GLOBAL_CMD_ID_NEED_LOAD_APP = 0x5, + GLOBAL_CMD_ID_REGISTER_AGENT = 0x6, + GLOBAL_CMD_ID_UNREGISTER_AGENT = 0x7, + GLOBAL_CMD_ID_REGISTER_NOTIFY_MEMORY = 0x8, + GLOBAL_CMD_ID_UNREGISTER_NOTIFY_MEMORY = 0x9, + GLOBAL_CMD_ID_INIT_CONTENT_PATH = 0xa, + GLOBAL_CMD_ID_TERMINATE_CONTENT_PATH = 0xb, + GLOBAL_CMD_ID_ALLOC_EXCEPTION_MEM = 0xc, + GLOBAL_CMD_ID_TEE_TIME = 0xd, + GLOBAL_CMD_ID_TEE_INFO = 0xe, + GLOBAL_CMD_ID_REGISTER_LOG_MEM = 0xf, + GLOBAL_CMD_ID_KILL_TASK = 0x10, + GLOBAL_CMD_ID_TUI_EXCEPTION = 0x11, + GLOBAL_CMD_ID_ADJUST_TIME = 0x12, + GLOBAL_CMD_ID_SET_CA_HASH = 0x13, + /* set the Android's build version */ + GLOBAL_CMD_ID_SET_BUILD_VERSION = 0x14, + GLOBAL_CMD_ID_REGISTER_TTF_MEM = 0x15, + /* get session key for encrypting dialog */ + GLOBAL_CMD_ID_GET_SESSION_SECURE_PARAMS = 0x16, + GLOBAL_CMD_ID_REGISTER_MAILBOX = 0x17, + GLOBAL_CMD_ID_REGISTER_UNUSUAL_TTF_MEM = 0x18, + GLOBAL_CMD_ID_REGISTER_ION_MEM = 0x19, + GLOBAL_CMD_ID_DUMP_MEMINFO = 0x1a, + /* this cmd will be used to service no ca handle cmd */ + GLOBAL_CMD_ID_SET_SERVE_CMD = 0x1b, + GLOBAL_CMD_ID_ADD_DYNAMIC_ION = 0x1c, + GLOBAL_CMD_ID_DEL_DYNAMIC_ION = 0x1d, + GLOBAL_CMD_ID_RELEASE_ION_SRV = 0x1e, + /* this cmd for tui to get notch_size */ + GLOBAL_CMD_ID_TUI_NOTCH = 0x1f, + GLOBAL_CMD_ID_LATE_INIT = 0x20, + /* this cmd for tui to get information of foldable screen */ + GLOBAL_CMD_ID_TUI_FOLD = 0x21, + GLOBAL_CMD_ID_GET_TEE_VERSION = 0x22, + GLOBAL_CMD_ID_REGISTER_RESMEM = 0x24, + GLOBAL_CMD_ID_DUMP_SRV_SESS = 0x25, + GLOBAL_CMD_ID_TRACE_ENABLE = 0x26, +#ifdef CONFIG_TEE_TELEPORT_SUPPORT + GLOBAL_CMD_ID_PORTAL_WORK = 0x2b, +#endif + GLOBAL_CMD_ID_REGISTER_HOST_NSID = 0x2d, + GLOBAL_CMD_ID_UNKNOWN = 0x7FFFFFFE, + GLOBAL_CMD_ID_MAX = 0x7FFFFFFF +}; + +enum teec_result { + TEEC_SUCCESS = 0x0, + TEEC_ERROR_INVALID_CMD = 0x1, + TEEC_ERROR_SERVICE_NOT_EXIST = 0x2, + TEEC_ERROR_SESSION_NOT_EXIST = 0x3, + TEEC_ERROR_SESSION_MAXIMUM, + TEEC_ERROR_REGISTER_EXIST_SERVICE, + TEEC_ERROR_TAGET_DEAD_FATAL, + TEEC_ERROR_READ_DATA, + TEEC_ERROR_WRITE_DATA, + TEEC_ERROR_TRUNCATE_OBJECT, + TEEC_ERROR_SEEK_DATA, + TEEC_ERROR_RENAME_OBJECT, + TEEC_ERROR_TRUSTED_APP_LOAD_ERROR, + TEEC_ERROR_GENERIC = 0xFFFF0000, + TEEC_ERROR_ACCESS_DENIED = 0xFFFF0001, + TEEC_ERROR_CANCEL = 0xFFFF0002, + TEEC_ERROR_ACCESS_CONFLICT = 0xFFFF0003, + TEEC_ERROR_EXCESS_DATA = 0xFFFF0004, + TEEC_ERROR_BAD_FORMAT = 0xFFFF0005, + TEEC_ERROR_BAD_PARAMETERS = 0xFFFF0006, + TEEC_ERROR_BAD_STATE = 0xFFFF0007, + TEEC_ERROR_ITEM_NOT_FOUND = 0xFFFF0008, + TEEC_ERROR_NOT_IMPLEMENTED = 0xFFFF0009, + TEEC_ERROR_NOT_SUPPORTED = 0xFFFF000A, + TEEC_ERROR_NO_DATA = 0xFFFF000B, + TEEC_ERROR_OUT_OF_MEMORY = 0xFFFF000C, + TEEC_ERROR_BUSY = 0xFFFF000D, + TEEC_ERROR_COMMUNICATION = 0xFFFF000E, + TEEC_ERROR_SECURITY = 0xFFFF000F, + TEEC_ERROR_SHORT_BUFFER = 0xFFFF0010, + TEEC_PENDING = 0xFFFF2000, + TEEC_PENDING2 = 0xFFFF2001, + TEE_ERROR_TAGET_DEAD = 0xFFFF3024, + TEE_ERROR_GT_DEAD = 0xFFFF3124, + TEEC_ERROR_MAC_INVALID = 0xFFFF3071, + TEEC_CLIENT_INTR = 0xFFFF4000, + TEEC_ERROR_TUI_IN_USE = 0xFFFF7110, + TEEC_ERROR_TUI_SWITCH_CHANNAL, + TEEC_ERROR_TUI_CFG_DRIVER, + TEEC_ERROR_TUI_INVALID_EVENT, + TEEC_ERROR_TUI_POLL_EVENT, + TEEC_ERROR_TUI_CANCELED, + TEEC_ERROR_TUI_EXIT, + TEEC_ERROR_TUI_NOT_AVAILABLE, + TEEC_ERROR_SEC_FLASH_NOT_AVAILABLE, + TEEC_ERROR_CA_AUTH_FAIL = 0xFFFFCFE5, + TEE_ERROR_AUDIT_FAIL = 0xFFFF9112, + TEE_ERROR_IS_DEAD = 0xFFFFABAB, +}; + +enum TEEC_ReturnCodeOrigin { + TEEC_ORIGIN_API = 0x1, + TEEC_ORIGIN_COMMS = 0x2, + TEEC_ORIGIN_TEE = 0x3, + TEEC_ORIGIN_TRUSTED_APP = 0x4, +}; + +enum TEEC_SharedMemCtl { + TEEC_MEM_INPUT = 0x1, + TEEC_MEM_OUTPUT = 0x2, + TEEC_MEM_INOUT = 0x3, +}; + +enum TEEC_ParamType { + TEEC_NONE = 0x0, + TEEC_VALUE_INPUT = 0x01, + TEEC_VALUE_OUTPUT = 0x02, + TEEC_VALUE_INOUT = 0x03, + TEEC_MEMREF_TEMP_INPUT = 0x05, + TEEC_MEMREF_TEMP_OUTPUT = 0x06, + TEEC_MEMREF_TEMP_INOUT = 0x07, + TEEC_ION_INPUT = 0x08, + TEEC_ION_SGLIST_INPUT = 0x09, + TEEC_MEMREF_SHARED_INOUT = 0x0a, + TEEC_MEMREF_WHOLE = 0xc, + TEEC_MEMREF_PARTIAL_INPUT = 0xd, + TEEC_MEMREF_PARTIAL_OUTPUT = 0xe, + TEEC_MEMREF_PARTIAL_INOUT = 0xf +}; + +enum TEE_ParamType { + TEE_PARAM_TYPE_NONE = 0x0, + TEE_PARAM_TYPE_VALUE_INPUT = 0x1, + TEE_PARAM_TYPE_VALUE_OUTPUT = 0x2, + TEE_PARAM_TYPE_VALUE_INOUT = 0x3, + TEE_PARAM_TYPE_MEMREF_INPUT = 0x5, + TEE_PARAM_TYPE_MEMREF_OUTPUT = 0x6, + TEE_PARAM_TYPE_MEMREF_INOUT = 0x7, + TEE_PARAM_TYPE_ION_INPUT = 0x8, + TEE_PARAM_TYPE_ION_SGLIST_INPUT = 0x9, + TEE_PARAM_TYPE_MEMREF_SHARED_INOUT = 0x0a, + TEE_PARAM_TYPE_RESMEM_INPUT = 0xc, + TEE_PARAM_TYPE_RESMEM_OUTPUT = 0xd, + TEE_PARAM_TYPE_RESMEM_INOUT = 0xe +}; + +enum TEEC_LoginMethod { + TEEC_LOGIN_PUBLIC = 0x0, + TEEC_LOGIN_USER, + TEEC_LOGIN_GROUP, + TEEC_LOGIN_APPLICATION = 0x4, + TEEC_LOGIN_USER_APPLICATION = 0x5, + TEEC_LOGIN_GROUP_APPLICATION = 0x6, + TEEC_LOGIN_IDENTIFY = 0x7, + TEEK_LOGIN_IDENTIFY = 0x80000001, +}; + +/* Add event id's name in 'view_state[]' in same order */ +enum tee_event_id { + INVOKE_CMD_START, + INVOKE_CMD_END, + SMC_SEND, + SMC_DONE, + SMC_IN, + SMC_OUT, + SMC_SLEEP, + SMC_PREEMPT, + GTASK_GET_CMD, + GTASK_PUT_CMD, + GTASK_REQ_TA, + GTASK_RESP_TA, + SPI_WAKEUP, + SCHED_IN, + SCHED_OUT, + INTERRUPT_HANDLE_SPI_START, + INTERRUPT_HANDLE_SPI_REE_RESPONSE, + INTERRUPT_HANDLE_SPI_REE_MISS, + INTERRUPT_HANDLE_SPI_REE_SCHEDULED, + INTERRUPT_HANDLE_SPI_END, + INTERRUPT_HANDLE_START, + INTERRUPT_HANDLE_END, + TEE_EVENT_MAX +}; + +#define TZ_WQ_MAX_ACTIVE 1 +#endif + diff --git a/trustzone-awared-vm/VM/vtzdriver/inc/teek_ns_client.h b/trustzone-awared-vm/VM/vtzdriver/inc/teek_ns_client.h new file mode 100644 index 0000000000000000000000000000000000000000..d26ab509186e7f9fb65a72f565f1f4001b47dce4 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/inc/teek_ns_client.h @@ -0,0 +1,256 @@ +/* + * teek_ns_client.h + * + * define structures and IOCTLs. + * + * Copyright (c) 2012-2022 Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef TEEK_NS_CLIENT_H +#define TEEK_NS_CLIENT_H + +#include +#include +#include +#include +#include "tc_ns_client.h" +#include "tc_ns_log.h" + +#define TC_NS_CLIENT_IOC_MAGIC 't' +#define TC_NS_CLIENT_DEV "tc_ns_client" +#define TC_PRIV_DEV "tc_private" +#define TC_NS_CVM_DEV "tc_ns_cvm" +#define TC_NS_CLIENT_DEV_NAME "/dev/tc_ns_client" + +#define EXCEPTION_MEM_SIZE (8*1024) /* mem for exception handling */ + +#define TSP_REQUEST 0xB2000008 +#define TSP_RESPONSE 0xB2000009 + +#define TSP_REE_SIQ 0xB200000A +#define TSP_CRASH 0xB200000B + +#ifdef CONFIG_TEE_UPGRADE +#define TSP_REBOOT 0xB2000012 +#define TSP_CPU_ON 0xB2000013 +#define TSP_REBOOT_DONE 0xB2000015 +#else +#define TSP_REBOOT 0xB200000E +#define TSP_CPU_ON 0xB200000F +#define TSP_REBOOT_DONE 0xB2000010 +#endif + +#define TSP_PREEMPTED 0xB2000005 +#define TC_CALL_GLOBAL 0x01 +#define TC_CALL_SYNC 0x02 +#define TC_CALL_LOGIN 0x04 +#define TEE_REQ_FROM_USER_MODE 0U +#define TEE_REQ_FROM_KERNEL_MODE 1U +#define TEE_PARAM_NUM 4 +#define VMALLOC_TYPE 0 +#define RESERVED_TYPE 1 + +/* Max sizes for login info buffer comming from teecd */ +#define MAX_PACKAGE_NAME_LEN 255 +/* The apk certificate format is as follows: + * modulus_size(4 bytes) + modulus buffer(512 bytes) + * + exponent size(4 bytes) + exponent buffer(1 bytes) + */ +#define MAX_PUBKEY_LEN 1024 + +struct tc_ns_dev_list { + struct mutex dev_lock; /* for dev_file_list */ + struct list_head dev_file_list; +}; + +struct tc_uuid { + uint32_t time_low; + uint16_t time_mid; + uint16_t timehi_and_version; + uint8_t clockseq_and_node[8]; /* clock len is 8 */ +}; + +#define INVALID_MAP_ADDR ((void *)-1) +struct tc_ns_shared_mem { + void *kernel_addr; + void *user_addr; + void *user_addr_ca; /* for ca alloc share mem */ + unsigned int len; + int mem_type; + struct list_head head; + atomic_t usage; + atomic_t offset; +}; + +struct tc_ns_service { + unsigned char uuid[UUID_LEN]; + struct mutex session_lock; /* for session_list */ + struct list_head session_list; + struct list_head head; + struct mutex operation_lock; /* for session's open/close */ + atomic_t usage; + unsigned int nsid; +}; + +#define SERVICES_MAX_COUNT 32 /* service limit can opened on 1 fd */ +struct tc_ns_dev_file { + unsigned int dev_file_id; + struct mutex service_lock; /* for service_ref[], services[] */ + uint8_t service_ref[SERVICES_MAX_COUNT]; /* a judge if set services[i]=NULL */ + struct tc_ns_service *services[SERVICES_MAX_COUNT]; + struct mutex shared_mem_lock; /* for shared_mem_list */ + struct list_head shared_mem_list; + struct list_head head; + /* Device is linked to call from kernel */ + uint8_t kernel_api; + /* client login info provided by teecd, can be either package name and public + * key or uid(for non android services/daemons) + * login information can only be set once, dont' allow subsequent calls + */ + bool login_setup; + struct mutex login_setup_lock; /* for login_setup */ +#ifdef CONFIG_AUTH_HASH + bool cainfo_hash_setup; + struct mutex cainfo_hash_setup_lock; +#endif + uint32_t pkg_name_len; + uint8_t pkg_name[MAX_PACKAGE_NAME_LEN]; + uint32_t pub_key_len; + uint8_t pub_key[MAX_PUBKEY_LEN]; + int load_app_flag; +#ifdef CONFIG_CONFIDENTIAL_CONTAINER + uint32_t nsid; +#endif + struct completion close_comp; /* for kthread close unclosed session */ +#ifdef CONFIG_TEE_TELEPORT_SUPPORT + bool portal_enabled; +#endif +}; + +union tc_ns_parameter { + struct { + unsigned int buffer; + unsigned int size; + } memref; + struct { + unsigned int a; + unsigned int b; + } value; +}; + +struct tc_ns_login { + unsigned int method; + unsigned int mdata; +}; + +struct tc_ns_operation { + unsigned int paramtypes; + union tc_ns_parameter params[TEE_PARAM_NUM]; + unsigned int buffer_h_addr[TEE_PARAM_NUM]; + struct tc_ns_shared_mem *sharemem[TEE_PARAM_NUM]; + void *mb_buffer[TEE_PARAM_NUM]; +}; + +struct tc_ns_temp_buf { + void *temp_buffer; + unsigned int size; +}; + +enum smc_cmd_type { + CMD_TYPE_GLOBAL, + CMD_TYPE_TA, + CMD_TYPE_TA_AGENT, + CMD_TYPE_TA2TA_AGENT, /* compatible with TA2TA2TA->AGENT etc. */ + CMD_TYPE_BUILDIN_AGENT, + CMD_TYPE_RELEASE_AGENT, /* only for release agent */ +}; + +struct tc_ns_smc_cmd { + uint8_t uuid[sizeof(struct tc_uuid)]; + unsigned int cmd_type; + unsigned int cmd_id; + unsigned int dev_file_id; + unsigned int context_id; + unsigned int agent_id; + unsigned int operation_phys; + unsigned int operation_h_phys; + unsigned int login_method; + unsigned int login_data_phy; + unsigned int login_data_h_addr; + unsigned int login_data_len; + unsigned int err_origin; + int ret_val; + unsigned int event_nr; + unsigned int uid; + unsigned int ca_pid; /* pid */ + unsigned int pid; /* tgid */ + unsigned int nsid; + unsigned int eventindex; /* tee audit event index for upload */ + bool started; +} __attribute__((__packed__)); + +/* + * @brief + */ +struct tc_wait_data { + wait_queue_head_t send_cmd_wq; + int send_wait_flag; +}; + +#define NUM_OF_SO 1 +#ifdef CONFIG_CMS_CAHASH_AUTH +#define KIND_OF_SO 1 +#else +#define KIND_OF_SO 2 +#endif +struct tc_ns_session { + unsigned int session_id; + struct list_head head; + struct tc_wait_data wait_data; + struct mutex ta_session_lock; /* for open/close/invoke on 1 session */ + struct tc_ns_dev_file *owner; + uint8_t auth_hash_buf[MAX_SHA_256_SZ * NUM_OF_SO + MAX_SHA_256_SZ]; + atomic_t usage; +}; + +struct mb_cmd_pack { + struct tc_ns_operation operation; + unsigned char login_data[MAX_SHA_256_SZ * NUM_OF_SO + MAX_SHA_256_SZ]; +}; + +struct load_img_params { + struct tc_ns_dev_file *dev_file; + const char *file_buffer; + unsigned int file_size; + struct mb_cmd_pack *mb_pack; + char *mb_load_mem; + struct tc_uuid *uuid_return; + unsigned int mb_load_size; +}; + +struct tc_call_params { + struct tc_ns_dev_file *dev; + struct tc_ns_client_context *context; + struct tc_ns_session *sess; + uint8_t flags; +}; + +struct tc_op_params { + struct mb_cmd_pack *mb_pack; + struct tc_ns_smc_cmd *smc_cmd; + struct tc_ns_temp_buf local_tmpbuf[TEE_PARAM_NUM]; + uint32_t trans_paramtype[TEE_PARAM_NUM]; + bool op_inited; +}; + +#endif + diff --git a/trustzone-awared-vm/VM/vtzdriver/process_data.c b/trustzone-awared-vm/VM/vtzdriver/process_data.c new file mode 100644 index 0000000000000000000000000000000000000000..e553c96098c8e89be3b42e4d44cfca48fbfa0aaf --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/process_data.c @@ -0,0 +1,43 @@ +#include +#include +#include "serialport.h" +#include "process_data.h" +#include "comm_structs.h" +#include "tlogger.h" + +void *malloc_copy(void *buf, int buf_len , int size, int *poffset) +{ + void *res; + int offset = *poffset; + if (buf_len < offset + size || size < 4) { + memmove_s(buf, buf_len, buf + offset, buf_len - offset); + *poffset = buf_len - offset; + return NULL; + } + *poffset = offset + size; + res = buf + offset; + return res; +} + +void *get_packet_item(void *buf, int buf_len, int *poffset, int *packet_sizep) +{ + uint32_t packet_size = 0; + void *res = NULL; + + if (buf_len == *poffset) { + *poffset = 0; + return NULL; + } + if (buf_len < *poffset + 4) { + return malloc_copy(buf, buf_len, buf_len - *poffset, poffset); + } + packet_size = *(uint32_t *)(buf + *poffset); + if (packet_size > SERIAL_PORT_BUF_LEN) { + tloge("packet_size err, size = %u, buf_len = %d, offset = %d\n", packet_size, buf_len, *poffset); + return NULL; + } + + res = malloc_copy(buf, buf_len, packet_size, poffset); + *packet_sizep = packet_size; + return res; +} \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/process_data.h b/trustzone-awared-vm/VM/vtzdriver/process_data.h new file mode 100644 index 0000000000000000000000000000000000000000..f7828e3e8d66b544628ea5c51165d59928cbfecd --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/process_data.h @@ -0,0 +1,6 @@ +#ifndef PROCESS_DATA_H +#define PROCESS_DATA_H + +void *get_packet_item(void *buf, int buf_len, int *poffset, int *psize); + +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/reserved_shm.c b/trustzone-awared-vm/VM/vtzdriver/reserved_shm.c new file mode 100644 index 0000000000000000000000000000000000000000..50a3f3189863ab3e85fd7dbf3eee853e725bde5a --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/reserved_shm.c @@ -0,0 +1,116 @@ +#include "reserved_shm.h" +#include +#include +#include +#include +#include "tc_ns_log.h" + +struct reserved_shm_list g_res_shm_list; +struct mutex g_lock; +size_t g_alloc_size; +size_t g_relese_size; + +void put_alloc(size_t size) +{ + mutex_lock(&g_lock); + g_alloc_size += size; + mutex_unlock(&g_lock); +} + +void put_relese(size_t size) +{ + mutex_lock(&g_lock); + g_relese_size += size; + mutex_unlock(&g_lock); +} + +void init_res_shm_list(void) +{ + INIT_LIST_HEAD(&g_res_shm_list.head); + mutex_init(&g_res_shm_list.lock); + mutex_init(&g_lock); + g_alloc_size = 0; + g_relese_size = 0; +} + +void destroy_res_shm_list(void) +{ + struct reserved_shm *shm = NULL; + struct reserved_shm *temp = NULL; + mutex_lock(&g_res_shm_list.lock); + list_for_each_entry_safe(shm, temp, &g_res_shm_list.head, head) { + if (shm->kernel_addr) + kfree(shm->kernel_addr); + list_del(&shm->head); + kfree(shm); + } + mutex_unlock(&g_res_shm_list.lock); + mutex_destroy(&g_res_shm_list.lock); +} + +void *alloc_res_shm(size_t len) +{ + size_t size = 0; + struct reserved_shm *shm = NULL; + struct reserved_shm *temp = NULL; + struct reserved_shm *result = NULL; + + mutex_lock(&g_res_shm_list.lock); + list_for_each_entry_safe(shm, temp, &g_res_shm_list.head, head) { + if (!shm->using && shm->buf_len >= len) { + shm->using = 1; + result = shm; + break; + } + } + mutex_unlock(&g_res_shm_list.lock); + + if (result) { + return result->kernel_addr; + } + size = ALIGN(len, PAGE_SIZE); + if (size > MAILBOX_POOL_SIZE) { + tloge("vtzf alloc sharemem buffer size %zu is too large \n", len); + return NULL; + } + result = kzalloc(sizeof(struct reserved_shm), GFP_KERNEL); + if (!result) { + tloge("failed to alloc mem for struct reserved_shm\n"); + return NULL; + } + result->kernel_addr = kzalloc(size, GFP_KERNEL); + if (!result->kernel_addr) { + tloge("failed to alloc mem for struct reserved_shm buffer\n"); + kfree(result); + return NULL; + } + result->using = 1; + result->buf_len = size; + INIT_LIST_HEAD(&result->head); + mutex_lock(&g_res_shm_list.lock); + list_add_tail(&result->head, &g_res_shm_list.head); + mutex_unlock(&g_res_shm_list.lock); + return result->kernel_addr; +} + +void dealloc_res_shm(void *kernel_buffer) +{ + int bfind= 0; + struct reserved_shm *shm = NULL; + struct reserved_shm *temp = NULL; + mutex_lock(&g_res_shm_list.lock); + list_for_each_entry_safe(shm, temp, &g_res_shm_list.head, head) { + if (shm->kernel_addr == kernel_buffer) { + shm->using = 0; + memset_s(shm->kernel_addr, shm->buf_len, 0, shm->buf_len); + bfind = 1; + list_del(&shm->head); + list_add_tail(&shm->head, &g_res_shm_list.head); + tlogd("dealloc res shm \n"); + break; + } + } + mutex_unlock(&g_res_shm_list.lock); + if (!bfind) + tloge("can't find res mem\n"); +} \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/reserved_shm.h b/trustzone-awared-vm/VM/vtzdriver/reserved_shm.h new file mode 100644 index 0000000000000000000000000000000000000000..3ac5fd43a50816333c9ace261d245c3196642190 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/reserved_shm.h @@ -0,0 +1,37 @@ +#ifndef RESERVED_SHM_H +#define RESERVED_SHM_H + +#include +#include + +#define MAILBOX_POOL_SIZE SZ_4M + +struct reserved_shm_list +{ + struct mutex lock; + struct list_head head; +}; + +struct vtzf_shared_mem { + void *kernel_addr; + void *user_addr; + void *phy_addr; + void *user_addr_host; + unsigned int len; + struct list_head head; + atomic_t offset; +}; + +struct reserved_shm +{ + void *kernel_addr; + size_t buf_len; + struct list_head head; + int using; +}; + +void init_res_shm_list(void); +void destroy_res_shm_list(void); +void *alloc_res_shm(size_t len); +void dealloc_res_shm(void *kernel_buffer); +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/serialport.c b/trustzone-awared-vm/VM/vtzdriver/serialport.c new file mode 100644 index 0000000000000000000000000000000000000000..663af409ce3c60af4cc84439754b67caacc7bfc2 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/serialport.c @@ -0,0 +1,783 @@ +#include +#include +#include +#include +#include +#include +#include "serialport.h" +#include "comm_structs.h" +#include "process_data.h" +#include "block_pages.h" +#include "tc_ns_log.h" +#include "tlogger.h" + +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 0, 0) +#include +struct timespec64 start, end; +#else +#include +struct timeval start, end; +#endif + +#define SEQ_NUM_AGENT_MAX 65536u +extern int g_log_ret_flag; +extern wait_queue_head_t g_log_wait_event_wq; +uint32_t g_seq_num_normal; +uint32_t g_seq_num_fs_agent; +uint32_t g_seq_num_sec_agent; +uint32_t g_seq_num_misc_agent; +struct mutex g_seq_lock; +struct vtzf_serial_port_list g_serial_port_list; +struct vtzf_event_data_list g_event_data_list; +struct vtzf_wr_data_list g_wr_data_list; +int g_destroy_rd_thread = 0; +int g_destroy_wr_thread = 0; +struct vtzf_serial_port_file *g_serial_port_file; + +void dump_time(void) +{ + uint32_t cost = 0; +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 0, 0) + cost = (1000000 * end.tv_sec + end.tv_nsec/1000) - (1000000 * start.tv_sec + start.tv_nsec/1000); +#else + cost = (1000000 * end.tv_sec + end.tv_usec) - (1000000 * start.tv_sec + start.tv_usec); +#endif + tlogi("time cost = %u us\n", cost); +} + +uint32_t get_seq_num(int agent_id) +{ + uint32_t ret; + if (agent_id == AGENT_FS_ID) { + return 10; + } else if (agent_id == SECFILE_LOAD_AGENT_ID) { + return 20; + } else if (agent_id ==AGENT_MISC_ID) { + return 30; + } + mutex_lock(&g_seq_lock); + g_seq_num_normal = (g_seq_num_normal + 2u) % 0xfffffff0; + if (g_seq_num_normal < SEQ_NUM_AGENT_MAX) + g_seq_num_normal = SEQ_NUM_AGENT_MAX; + ret = g_seq_num_normal; + mutex_unlock(&g_seq_lock); + return ret; +} + +void seq_num_init(void) +{ + mutex_init(&g_seq_lock); + g_seq_num_normal = SEQ_NUM_AGENT_MAX; + g_seq_num_fs_agent = 10; + g_seq_num_sec_agent = 20; + g_seq_num_misc_agent = 30; +} + +struct vtzf_serial_port_list *get_serial_port_list(void) +{ + return &g_serial_port_list; +} + +static inline void rd_increment(struct vtzf_serial_port_file *file) { + if (!file) + return; + mutex_lock(&file->rd_flag_lock); + file->rd_flag += 1; + tlogd("increment rd wait flag = %d \n", file->rd_flag); + mutex_unlock(&file->rd_flag_lock); +} + +static inline void rd_decrement(struct vtzf_serial_port_file *file) { + if (!file) + return; + mutex_lock(&file->rd_flag_lock); + file->rd_flag -= 1 ; + if (file->rd_flag < 0) + file->rd_flag = 0; + tlogd("decrement rd wait flag = %d \n", file->rd_flag); + mutex_unlock(&file->rd_flag_lock); +} + +static inline void wr_increment(struct vtzf_serial_port_file *file) { + if (!file) + return; + mutex_lock(&file->wr_flag_lock); + file->wr_flag += 1; + tlogd("increment wr wait flag = %d \n", file->wr_flag); + mutex_unlock(&file->wr_flag_lock); +} + +static inline void wr_decrement(struct vtzf_serial_port_file *file) { + if (!file) + return; + mutex_lock(&file->wr_flag_lock); + file->wr_flag -= 1 ; + if (file->wr_flag < 0) + file->wr_flag = 0; + tlogd("decrement wr wait flag = %d \n", file->wr_flag); + mutex_unlock(&file->wr_flag_lock); +} + +static void wake_tlog_thrd(void) +{ + struct vhc_event_data *event_data; + struct vhc_event_data *tmp; + spin_lock(&g_event_data_list.spinlock); + list_for_each_entry_safe(event_data, tmp, &g_event_data_list.head, head) { + if (event_data->seq_num == 41) { + event_data->rd_ret = -EAGAIN; + event_data->ret_flag = 1; + tlogi("wake event\n"); + wake_up(&event_data->wait_event_wq); + break; + } + } + spin_unlock(&g_event_data_list.spinlock); +} + +static inline void wake_up_log_thread(void) +{ + struct vtzf_serial_port_file *file = g_serial_port_file; + file->log_flag = 1; + wake_up(&file->log_wait_event_wq); + wake_tlog_thrd(); +} + +static inline void wake_up_rd_thread(void) +{ + struct vtzf_serial_port_file *file = g_serial_port_file; + rd_increment(file); + wake_up(&file->rd_wait_event_wq); +} + +static inline void wake_up_wr_thread(void) +{ + struct vtzf_serial_port_file *file = g_serial_port_file; + wr_increment(file); + wake_up(&file->wr_wait_event_wq); +} + +static void destroy_data_list(void) +{ + struct vhc_event_data * event_data; + struct vhc_event_data * temp; + struct wr_data * write_data; + struct wr_data * n; + spin_lock(&g_event_data_list.spinlock); + list_for_each_entry_safe(event_data, temp, &g_event_data_list.head, head) { + if (event_data) { + list_del(&event_data->head); + kfree(event_data); + } + } + spin_unlock(&g_event_data_list.spinlock); + + spin_lock(&g_wr_data_list.spinlock); + list_for_each_entry_safe(write_data, n, &g_wr_data_list.head, head) { + if (write_data) { + list_del(&write_data->head); + if (write_data->wr_buf) { + kfree(write_data->wr_buf); + } + kfree(write_data); + } + } + spin_unlock(&g_wr_data_list.spinlock); +} + +void free_serial_port_list(void) +{ + struct vtzf_serial_port_file *dev_file = NULL; + struct vtzf_serial_port_file *tmp = NULL; + struct_packet_cmd_nothing packet_cmd = {0}; + struct_packet_rsp_nothing packet_rsp = {0}; + uint32_t seq_num = get_seq_num(0); + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZ_NOTHING; + g_destroy_rd_thread = 1; + + mutex_lock(&g_serial_port_list.lock); + /*In fact, there is only one serial port.*/ + list_for_each_entry_safe(dev_file, tmp, &g_serial_port_list.head, head) { + list_del(&dev_file->head); + if (dev_file->log_thread){ + tlogd("before kthread_stop log\n"); + wake_up_log_thread(); + tlogd("after kthread_stop log\n"); + } + if (dev_file->rd_thread){ + tlogd("before kthread_stop rd\n"); + (void)send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num); + tlogd("after kthread_stop rd\n"); + } + if (dev_file->wr_thread){ + tlogd("before kthread_stop wr\n"); + g_destroy_wr_thread = 1; + wake_up_wr_thread(); + tlogd("after kthread_stop wr\n"); + } + if (dev_file->rd_thread_name) + kfree(dev_file->rd_thread_name); + if (dev_file->wr_thread_name) + kfree(dev_file->wr_thread_name); + if (dev_file->filep) + filp_close(dev_file->filep, NULL); + if (dev_file->buffer) + kfree(dev_file->buffer); + mutex_destroy(&dev_file->lock); + mutex_destroy(&dev_file->wr_flag_lock); + mutex_destroy(&dev_file->wr_flag_lock); + kfree(dev_file); + } + mutex_unlock(&g_serial_port_list.lock); + mutex_destroy(&g_serial_port_list.lock); + destroy_data_list(); +} + +void put_event_data(void *packet, int packet_size, uint32_t seq_num) +{ + struct vhc_event_data *event_data; + struct vhc_event_data *tmp; + if (!packet) + return; + spin_lock(&g_event_data_list.spinlock); + list_for_each_entry_safe(event_data, tmp, &g_event_data_list.head, head) { + if (event_data->seq_num == seq_num) { + if (memcpy_s(event_data->rd_buf, event_data->size_rd_buf, packet, packet_size) != 0) { + tloge("memcpy_s failed\n"); + } + event_data->rd_ret = 0; + event_data->ret_flag = 1; + wake_up(&event_data->wait_event_wq); + break; + } + } + spin_unlock(&g_event_data_list.spinlock); + return; +} + +int rd_thread_func(void *arg) +{ + struct vtzf_serial_port_file *file = (struct vtzf_serial_port_file *)arg; + loff_t off = 0; + int ssize_ret = 0; + int ret = 0; + uint32_t seq_num; + int buf_len = 0; + int offset = 0; +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 0, 0) + struct vtz_buf_struct vtz_buf = {0}; +#endif + struct file *fp_serialport = NULL; + fp_serialport = file->filep; + while (!kthread_should_stop()) { + ret = wait_event_interruptible(file->rd_wait_event_wq, file->rd_flag); + if (ret != 0) { + tloge("rd thread wait event interruptible failed!\n"); + ret = -EINTR; + } + if (g_destroy_rd_thread) + break; + off = 0; +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 0, 0) + vtz_buf.buf = file->buffer + file->offset; + vtz_buf.buf_size = SERIAL_PORT_BUF_LEN - file->offset; + ssize_ret = fp_serialport->f_op->unlocked_ioctl(fp_serialport, + TC_NS_CLIENT_IOCTL_READ_REQ, (unsigned long)(&vtz_buf)); +#else + ssize_ret = kernel_read(file->filep, file->buffer + file->offset, + SERIAL_PORT_BUF_LEN - file->offset, &off); +#endif + tlogd("kernel_read, ret value = %d, offset = %ld \n", (int)ssize_ret, (long)off); + if (ssize_ret <= 0) { + tloge("kernel_read failed, ret = %d \n", (int)ssize_ret); + wake_tlog_thrd(); + rd_decrement(file); + continue; + } + if (file->log_flag == 2) { + wake_tlog_thrd(); + file->log_flag = 0; + } + + buf_len = ssize_ret + file->offset; + tlogd("buf_len = %d\n", buf_len); + /* + * The data may span across multiple packets, and the last packet may even be incomplete. + * Each time, complete packets are extracted from the buffer, and the last incomplete + * packet is moved to the starting position of the buffer. The offset is recorded simultaneously, + * and the next data is filled into the buffer starting from that offset. Each round of + * processing begins from the starting position of the buffer, so the offset is set to 0. + */ + offset = 0; + while(1) { + void *packet = NULL; + int packet_size = 0; + packet = get_packet_item(file->buffer, buf_len, &offset, &packet_size); + if (packet == NULL) { + break; + } + rd_decrement(file); + seq_num = *(int *)(packet + 4); + put_event_data(packet, packet_size, seq_num); + } + file->offset = offset; + tlogd("offset = %d\n", offset); + schedule(); + } + return 0; +} + +struct wr_data *get_wr_data(void) +{ + struct wr_data *write_data = NULL; + struct wr_data *tmp = NULL; + spin_lock(&g_wr_data_list.spinlock); + if (!list_empty(&g_wr_data_list.head)){ + list_for_each_entry_safe(write_data, tmp, &g_wr_data_list.head, head) { + if (write_data->wr_buf){ + list_del(&write_data->head); + break; + } + } + } + spin_unlock(&g_wr_data_list.spinlock); + if (write_data && !write_data->wr_buf) { + tloge("write_data->wr_buf NULL\n"); + return NULL; + } + return write_data; +} + +void destroy_wr_data(struct wr_data *write_data) +{ + if (!write_data) + return; + if (write_data->wr_buf) + kfree(write_data->wr_buf); + kfree(write_data); +} + +static int do_write(struct file *fp_serialport, void *buf, uint32_t buf_size) +{ + int ret = 0; +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 0, 0) + struct vtz_buf_struct vtz_buf = {0}; +#else + loff_t off =0; +#endif + + if (!fp_serialport || !buf || buf_size > 32 * 1024) + return -EINVAL; + +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 0, 0) + if (!fp_serialport->f_op->unlocked_ioctl) { + tloge("kernel version > 5.0.0, f_op->unlocked_ioctl is NULL, check virtio-console\n"); + return -EINVAL; + } + vtz_buf.buf = buf; + vtz_buf.buf_size = buf_size; + ret = fp_serialport->f_op->unlocked_ioctl(fp_serialport, + TC_NS_CLIENT_IOCTL_WRITE_REQ, (unsigned long)(&vtz_buf)); +#else + ret = kernel_write(fp_serialport, buf, buf_size, &off); +#endif + return ret < 0 ? ret : 0; +} + +#define BLOCK_MTU 1024 +#define MASK (BLOCK_MTU - 1) +#define FIRST_FRAG_LEN (sizeof(struct_packet_cmd_send_cmd) + BLOCK_MTU * sizeof(struct page_block)) +#define MID_FRAG_LEN (BLOCK_MTU * sizeof(struct page_block)) + +int alloc_packet_space(void **fragments, uint32_t nums, uint32_t last_size) +{ + int i = 0; + uint32_t buf_size = 0; + for (i = 0; i < nums; i++) { + if (i == 0 ) + buf_size = FIRST_FRAG_LEN; + else if (i == nums - 1) + buf_size = last_size; + else + buf_size = MID_FRAG_LEN; + fragments[i] = kzalloc(buf_size, GFP_KERNEL); + if (!fragments[i]) { + goto err; + } + } + return 0; +err: + for (i = 0; i< nums; i++) { + if (fragments[i]) + kfree(fragments[i]); + } + return -ENOMEM; +} + +#define FRAG_FLAG 0xAEAE + +int memcpy_packet(void **fragments, uint32_t nums, void *wr_buf, uint32_t last_size) +{ + int i = 0; + int offset = 0; + if (memcpy_s(fragments[0], FIRST_FRAG_LEN, wr_buf, FIRST_FRAG_LEN)) + return -EFAULT; + offset += FIRST_FRAG_LEN; + ((struct_packet_cmd_send_cmd *)fragments[0])->fragment_block_num = BLOCK_MTU; + for (i = 1; i < nums - 1; i++) { + if (memcpy_s(fragments[i], MID_FRAG_LEN, wr_buf + offset, MID_FRAG_LEN)) + return -EFAULT; + offset += MID_FRAG_LEN; + ((struct page_block *)fragments[i])->frag_flag = FRAG_FLAG; + } + if (memcpy_s(fragments[nums - 1], last_size, wr_buf + offset, last_size)) + return -EFAULT; + ((struct page_block *)fragments[nums - 1])->frag_flag = FRAG_FLAG; + return 0; +} + +static int write_split(struct file *fp_serialport, void *wr_buf) +{ + int ret = 0; + int i = 0; + void **fragments; + int blocks_num = 0; + int fragms_num = 0; + int last_num = 0; + uint32_t last_size = 0; + blocks_num = ((struct_packet_cmd_send_cmd *)wr_buf)->fragment_block_num; + fragms_num = blocks_num / BLOCK_MTU; + last_num = blocks_num & MASK; + if (last_num) + fragms_num++; + else + last_num = BLOCK_MTU; + fragments = (void **)kzalloc(sizeof(void *) * fragms_num, GFP_KERNEL); + if (!fragments) { + return -ENOMEM; + } + last_size = last_num * sizeof(struct page_block); + if (alloc_packet_space(fragments, fragms_num, last_size)) { + kfree(fragments); + return -EFAULT; + } + + if (memcpy_packet(fragments, fragms_num, wr_buf, last_size)) { + ret = -EFAULT; + goto free; + } + + for (i = 0; i < fragms_num; i++) { + int buf_size = 0; + if (i == 0) + buf_size = FIRST_FRAG_LEN; + else if (i == fragms_num -1) + buf_size = last_size; + else + buf_size = MID_FRAG_LEN; + do_write(fp_serialport, fragments[i], buf_size); + } + +free: + for (i = 0; i < fragms_num; i++) { + if (fragments[i]) + kfree(fragments[i]); + } + if (fragments) + kfree(fragments); + + return ret < 0 ? ret : 0; +} + +static inline bool require_split(void *buf) +{ + struct_packet_cmd_general *packet_cmd = (struct_packet_cmd_general *)buf; + if (packet_cmd->cmd == VTZF_SEND_CMD && + packet_cmd->packet_size - sizeof(struct_packet_cmd_send_cmd) > BLOCK_MTU * sizeof(struct page_block)) { + return true; + } + return false; +} + +static int safe_write(struct wr_data *write_data, struct file *fp_serialport) +{ + int ret = 0; + if (!write_data || !write_data->wr_buf || + write_data->size_wr_buf < 3 * sizeof(uint32_t)) + return -EINVAL; + if (require_split(write_data->wr_buf)) { + if (write_split(fp_serialport, write_data->wr_buf)) + return -EFAULT; + } else { + ret = do_write(fp_serialport, write_data->wr_buf, write_data->size_wr_buf); + } + return ret; +} + +int wr_thread_func(void *arg) +{ + struct vtzf_serial_port_file *file = (struct vtzf_serial_port_file *)arg; + struct file *fp_serialport = NULL; + int ret = 0; + struct wr_data *write_data = NULL; + if (!file || !file->filep) + return -EFAULT; + fp_serialport = file->filep; + + while (!kthread_should_stop()) { + ret = wait_event_interruptible(file->wr_wait_event_wq, file->wr_flag); + if (ret != 0) { + tloge("wr thread wait event interruptible failed!\n"); + ret = -EINTR; + } + if (g_destroy_wr_thread) + break; + write_data = get_wr_data(); + wr_decrement(file); + if(!write_data) + continue; + ret = safe_write(write_data, fp_serialport); + if (ret < 0) + tloge("write failed ret = %d\n", ret); + wake_up_rd_thread(); + destroy_wr_data(write_data); + schedule(); + } + return 0; +} + +int create_thread(int pos, struct vtzf_serial_port_file *file) +{ + struct task_struct *tmp_thread; + char *thread_name = kzalloc(32, GFP_KERNEL); + if (!thread_name) { + tloge("Failed to allocate memory for thread name\n"); + return -ENOMEM; + } + + (void)snprintf(thread_name, 32, "vtz_rd_thread_%d", pos); + file->rd_thread_name = thread_name; + + thread_name = kzalloc(32, GFP_KERNEL); + if (!thread_name) { + tloge("Failed to allocate memory for thread name\n"); + kfree(file->rd_thread_name); + file->rd_thread_name = NULL; + return -ENOMEM; + } + (void)snprintf(thread_name, 32, "vtz_wr_thread_%d", pos); + file->wr_thread_name = thread_name; + + thread_name = kzalloc(32, GFP_KERNEL); + if (!thread_name) { + tloge("Failed to allocate memory for thread name\n"); + kfree(file->rd_thread_name); + file->rd_thread_name = NULL; + return -ENOMEM; + } + (void)snprintf(thread_name, 32, "vtz_log_thread_%d", pos); + file->log_thread_name = thread_name; + + tmp_thread = kthread_run(rd_thread_func, file, file->rd_thread_name); + if (tmp_thread) { + file->rd_thread = tmp_thread; + tlogi("Kernel thread created successfully\n"); + } else { + tloge("Failed to create kernel thread\n"); + return -EFAULT; + } + + tmp_thread = kthread_run(wr_thread_func, file, file->wr_thread_name); + if (tmp_thread) { + file->wr_thread = tmp_thread; + tlogi("Kernel thread created successfully\n"); + } else { + tloge("Failed to create kernel thread\n"); + return -EFAULT; + } + + tmp_thread = kthread_run(log_thread_func, file, file->log_thread_name); + if (tmp_thread) { + file->log_thread = tmp_thread; + tlogi("Kernel thread created successfully\n"); + } else { + tloge("Failed to create kernel thread\n"); + return -EFAULT; + } + + return 0; +} + +int serial_port_init(void) +{ + int ret = 0; + int i; + int size_written; + struct file *file; + struct vtzf_serial_port_file *serial_port_file = NULL; + void *buffer = NULL; + char device_path[256]; + + void *threads = kzalloc(sizeof(struct task_struct) * SERIAL_PORT_NUM, GFP_KERNEL); + if (!threads) { + tloge("Failed to allocate memory for threads\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&g_serial_port_list.head); + mutex_init(&g_serial_port_list.lock); + INIT_LIST_HEAD(&g_event_data_list.head); + spin_lock_init(&g_event_data_list.spinlock); + + INIT_LIST_HEAD(&g_wr_data_list.head); + spin_lock_init(&g_wr_data_list.spinlock); + + for (i = 0; i < SERIAL_PORT_NUM; i++) { + size_written = snprintf(device_path, sizeof(device_path), "%s%d", VTZF_SERIALPORT, i); + serial_port_file = kzalloc(sizeof(*serial_port_file), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)serial_port_file)) { + tloge("alloc serial_port_file failed\n"); + ret = -ENOMEM; + goto err; + } + buffer = kzalloc(SERIAL_PORT_BUF_LEN, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)buffer)) { + tloge("alloc serial_port_file failed\n"); + ret = -ENOMEM; + kfree(serial_port_file); + goto err; + } + file = filp_open(device_path, O_RDWR, 0); + if (IS_ERR(file)) { + tloge("open serial_pore failed \n"); + ret = -EFAULT; + kfree(serial_port_file); + kfree(buffer); + goto err; + } + serial_port_file->filep = file; + serial_port_file->buffer = buffer; + serial_port_file->rd_flag = 0; + serial_port_file->wr_flag = 0; + serial_port_file->log_flag = 0; + mutex_init(&serial_port_file->lock); + mutex_init(&serial_port_file->rd_flag_lock); + mutex_init(&serial_port_file->wr_flag_lock); + init_waitqueue_head(&(serial_port_file->rd_wait_event_wq)); + init_waitqueue_head(&(serial_port_file->wr_wait_event_wq)); + init_waitqueue_head(&(serial_port_file->log_wait_event_wq)); + list_add_tail(&serial_port_file->head, &g_serial_port_list.head); + g_serial_port_file = serial_port_file; +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 0, 0) + if (!file->f_op->unlocked_ioctl) { + tloge("waring! file->f_op->unlocked_ioctl undefine!\n"); + ret = -EFAULT; + goto err; + } +#endif + if (create_thread(i, serial_port_file)) + goto err; + } + tlogi(" open serial port success\n"); + return 0; +err: + free_serial_port_list(); + return ret; +} + +int creat_wr_data(void *wr_buf, size_t buf_size) +{ + struct wr_data * write_data; + if (!wr_buf) + return -EINVAL; + write_data = kzalloc(sizeof(struct wr_data), GFP_KERNEL); + if (!write_data) { + tloge("alloc wr write_data failed\n"); + return -ENOMEM; + } + write_data->size_wr_buf = buf_size; + write_data->wr_buf = kzalloc(buf_size, GFP_KERNEL); + if (!write_data->wr_buf) { + tloge("alloc failed\n"); + kfree(write_data); + return -ENOMEM; + } + if (memcpy_s(write_data->wr_buf, buf_size, wr_buf, buf_size) != 0) { + tloge("memcpy_s write_data->wr_buf failed\n"); + kfree(write_data->wr_buf); + kfree(write_data); + return -EFAULT; + } + INIT_LIST_HEAD(&(write_data->head)); + spin_lock(&g_wr_data_list.spinlock); + list_add_tail(&write_data->head, &g_wr_data_list.head); + spin_unlock(&g_wr_data_list.spinlock); + return 0; +} + +struct vhc_event_data *creat_event_data(void *rd_buf, size_t size_rd_buf, int seq_num) +{ + struct vhc_event_data * event_data = kzalloc(sizeof(struct vhc_event_data), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)event_data)) { + tloge("alloc event_data failed\n"); + return NULL; + } + event_data->seq_num = seq_num + 1; + event_data->rd_buf = rd_buf; + event_data->size_rd_buf = size_rd_buf; + event_data->ret_flag = 0; + event_data->rd_ret = 0; + INIT_LIST_HEAD(&(event_data->head)); + init_waitqueue_head(&(event_data->wait_event_wq)); + + spin_lock(&g_event_data_list.spinlock); + list_add_tail(&event_data->head, &g_event_data_list.head); + spin_unlock(&g_event_data_list.spinlock); + return event_data; +} + +void destroy_event_data(struct vhc_event_data *event_data) +{ + if (event_data == NULL) + return ; + + spin_lock(&g_event_data_list.spinlock); + list_del(&event_data->head); + spin_unlock(&g_event_data_list.spinlock); + + kfree(event_data); + return ; +} + +int send_to_proxy(void * wrt_buf, size_t size_wrt_buf, void * rd_buf, size_t size_rd_buf, uint32_t seq_num) +{ + int ret = -1; + struct vhc_event_data *event_data; + + ret = creat_wr_data(wrt_buf, size_wrt_buf); + if (ret != 0) { + tloge("creat_wr_data failed\n"); + return ret; + } + + event_data = creat_event_data(rd_buf, size_rd_buf, seq_num); + if (event_data == NULL) + goto err; + wake_up_wr_thread(); + + ret = wait_event_interruptible(event_data->wait_event_wq, + event_data->ret_flag); + if (ret != 0) { + tloge("wait event interruptible failed!, ret = %d\n", ret); + ret = -EINTR; + } else { + ret = event_data->rd_ret; + } + destroy_event_data(event_data); + return ret; + +err: + return ret; +} \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/serialport.h b/trustzone-awared-vm/VM/vtzdriver/serialport.h new file mode 100644 index 0000000000000000000000000000000000000000..8fc011832a92378543720dd83b5069987ca65bde --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/serialport.h @@ -0,0 +1,90 @@ +#ifndef SERIAL_H +#define SERIAL_H + +#include +#include +#include + +#define SERIAL_PORT_BUF_LEN 1024*128 +#define VTZF_SERIALPORT "/dev/virtio-ports/vtzf_serialport" +#define SERIAL_PORT_NUM 1 + +#define AGENT_FS_ID 0x46536673 +#define SECFILE_LOAD_AGENT_ID 0x4c4f4144 +#define AGENT_MISC_ID 0x4d495343 + +#define VTZ_IOC_MAGIC 'v' +#define TC_NS_CLIENT_IOCTL_READ_REQ \ + _IOWR(VTZ_IOC_MAGIC, 1, struct vtz_buf_struct) +#define TC_NS_CLIENT_IOCTL_WRITE_REQ \ + _IOWR(VTZ_IOC_MAGIC, 2, struct vtz_buf_struct) + +struct vtz_buf_struct{ + uint32_t buf_size; + void * buf; +}; + +struct vtzf_serial_port_list { + struct mutex lock; + struct list_head head; +}; + +struct vtzf_event_data_list { + spinlock_t spinlock; + struct list_head head; +}; + +struct vtzf_wr_data_list { + spinlock_t spinlock; + struct list_head head; +}; + +struct vtzf_serial_port_file +{ + struct file *filep; + struct list_head head; + struct mutex lock; + wait_queue_head_t rd_wait_event_wq; + wait_queue_head_t wr_wait_event_wq; + wait_queue_head_t log_wait_event_wq; + int rd_flag; + int wr_flag; + int log_flag; + struct mutex rd_flag_lock; + struct mutex wr_flag_lock; + struct task_struct * rd_thread; + struct task_struct * wr_thread; + struct task_struct * log_thread; + char *rd_thread_name; + char *wr_thread_name; + char *log_thread_name; + bool using; + void *buffer; + int buf_size; + int offset; +}; + +struct wr_data +{ + struct list_head head; + void *wr_buf; + size_t size_wr_buf; +}; + +struct vhc_event_data { + struct list_head head; + wait_queue_head_t wait_event_wq; + int ret_flag; + uint32_t seq_num; + void *rd_buf; + size_t size_rd_buf; + int rd_ret; +}; + +int serial_port_init(void); +void free_serial_port_list(void); +void seq_num_init(void); +uint32_t get_seq_num(int agent_id); +struct vtzf_serial_port_list *get_serial_port_list(void); +int send_to_proxy(void * wrt_buf, size_t size_wrt_buf, void * rd_buf, size_t size_rd_buf, uint32_t seq_num); +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/tee_info.c b/trustzone-awared-vm/VM/vtzdriver/tee_info.c new file mode 100644 index 0000000000000000000000000000000000000000..2c85f076ca4008aaf99acec7690eb823f561a583 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/tee_info.c @@ -0,0 +1,35 @@ +#include "tee_info.h" +#include "tc_ns_log.h" +#include "comm_structs.h" +#include "serialport.h" + +int tc_ns_get_tee_info(int ptzfd, void __user *argp, bool flag) +{ + int ret; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_getteeinfo packet_cmd = {0}; + struct_packet_rsp_getteeinfo packet_rsp = {0}; + + if (!argp || ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.cmd = VTZF_GET_TEE_INFO; + packet_cmd.seq_num = seq_num; + packet_cmd.ptzfd = ptzfd; + packet_cmd.istlog = flag; + + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + ret = -EFAULT; + goto END; + } else { + ret = packet_rsp.ret; + if (copy_to_user(argp, &packet_rsp.info, sizeof(struct tc_ns_tee_info)) != 0) + ret = -EFAULT; + } + +END: + return ret; +} \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/tee_info.h b/trustzone-awared-vm/VM/vtzdriver/tee_info.h new file mode 100644 index 0000000000000000000000000000000000000000..e31b5456eb6cafa85b212d8007a679dd06578af4 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/tee_info.h @@ -0,0 +1,8 @@ +#ifndef TEE_INFO_H +#define TEE_INFO_H + +#include + +int tc_ns_get_tee_info(int ptzfd, void __user *argp, bool flag); + +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/tlogger.c b/trustzone-awared-vm/VM/vtzdriver/tlogger.c new file mode 100644 index 0000000000000000000000000000000000000000..eaddd7d120869e8605b306b2aff886438df993a2 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/tlogger.c @@ -0,0 +1,870 @@ +#include "tlogger.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "comm_structs.h" +#include "serialport.h" +#include "tee_info.h" +#include "teek_client_constants.h" +#include "tc_ns_client.h" +#include "teek_ns_client.h" +#include + +#ifndef CONFIG_TEE_LOG_ACHIVE_PATH +#define CONFIG_TEE_LOG_ACHIVE_PATH "/var/log/tee/last_teemsg" +#endif +#define MAX_LOG__FILE_SIZE 1024 * 1024 * 32 +static struct log_buffer *g_log_buffer = NULL; +static struct tlogger_reader *g_thread_reader = NULL; +static int g_buff_len = 0; +static LIST_HEAD(m_log_list); +static uint32_t g_log_mem_len = 0; +static uint32_t g_tlogcat_count = 0; +static struct tlogger_log *g_log; +static struct mutex g_reader_group_mutex; +static LIST_HEAD(g_reader_group_list); + +static int g_vmid = 0; + +static struct tlogger_log *get_reader_log(const struct file *file) +{ + struct tlogger_reader *reader = NULL; + + reader = file->private_data; + if (!reader) + return NULL; + + return reader->log; +} + +static struct tlogger_group *get_tlogger_group(void) +{ + struct tlogger_group *group = NULL; +#ifdef CONFIG_CONFIDENTIAL_CONTAINER + uint32_t nsid = task_active_pid_ns(current)->ns.inum; +#else + uint32_t nsid = PROC_PID_INIT_INO; +#endif + + list_for_each_entry(group, &g_reader_group_list, node) { + if (group->nsid == nsid) + return group; + } + + return NULL; +} + +static struct tlogger_log *get_tlogger_log_by_minor(int minor) +{ + struct tlogger_log *log = NULL; + + list_for_each_entry(log, &m_log_list, logs) { + if (log->misc_device.minor == minor) + return log; + } + + return NULL; +} + +static void init_tlogger_reader(struct tlogger_reader *reader, struct tlogger_log *log, struct tlogger_group *group) +{ + reader->log = log; + reader->group = group; + + get_task_struct(current); + reader->pid = get_task_pid(current, PIDTYPE_PID); + put_task_struct(current); + + reader->r_all = true; + reader->r_off = 0; + reader->r_loops = 0; + reader->r_sn = 0; + reader->r_failtimes = 0; + reader->r_is_tlogf = 0; + reader->r_from_cur = 0; + + INIT_LIST_HEAD(&reader->list); + init_waitqueue_head(&reader->wait_queue_head); +} + +static void init_tlogger_group(struct tlogger_group *group) +{ + group->reader_cnt = 1; +#ifdef CONFIG_CONFIDENTIAL_CONTAINER + group->nsid = task_active_pid_ns(current)->ns.inum; +#else + group->nsid = PROC_PID_INIT_INO; +#endif + group->tlogf_stat = 0; +} + +#ifndef NSID_FILE_PATH +#define NSID_FILE_PATH "/tmp/qca_nsid" +#endif + +static int store_nsid_file(int vmid) +{ + struct file *filep = NULL; + char str[32]={0}; + loff_t pos = 0; + filep = filp_open(NSID_FILE_PATH, O_CREAT | O_RDWR | O_TRUNC, OPEN_FILE_MODE); + if (!filep || IS_ERR(filep)) { + tloge("open last teemsg file err %ld\n", PTR_ERR(filep)); + return -1; + } + (void)snprintf(str, 32, "%d", vmid); + (void)kernel_write(filep, str, strlen(str), &pos); + filp_close(filep, NULL); + return 0; +} + +static int open_tzdriver_tlogger(struct tlogger_reader *dev_file, uint32_t flag, bool isthd) +{ + int ret = 0; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_open_tzd packet_cmd = {0}; + struct_packet_rsp_open_tzd packet_rsp = {0}; + + if (!dev_file) { + tloge("invalid params\n"); + return -EINVAL; + } + + dev_file->ptzfd = -1; + if (isthd) + seq_num = 40; + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_OPEN_TZD; + packet_cmd.vmid = 0; + /* if flag==0, open tc_ns_client; if flag==1, open tc_private */ + packet_cmd.flag = flag; + tlogi("start open tlogger\n"); + ret = send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num); + if (!ret) { + ret = packet_rsp.ret; + if (ret) { + tloge("open TZdriver failed ret is %d\n", ret); + goto END; + } + dev_file->ptzfd = packet_rsp.ptzfd; + if (!g_vmid) { + store_nsid_file(packet_rsp.vmid); + } + } else { + tloge("send to proxy failed ret is %d\n", ret); + } + tlogi("try open tlogger, ret = %d\n", ret); +END: + return ret; +} + +static int process_tlogger_open(struct inode *inode, + struct file *file) +{ + struct tlogger_log *log = NULL; + int ret; + struct tlogger_reader *reader = NULL; + struct tlogger_group *group = NULL; + + tlogd("open logger open ++\n"); + /* not support seek */ + ret = nonseekable_open(inode, file); + if (ret != 0) + return ret; + + tlogd("Before get log from minor\n"); + log = get_tlogger_log_by_minor(MINOR(inode->i_rdev)); + if (!log) + return -ENODEV; + + mutex_lock(&g_reader_group_mutex); + group = get_tlogger_group(); + if (group == NULL) { + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)group)) { + mutex_unlock(&g_reader_group_mutex); + return -ENOMEM; + } + init_tlogger_group(group); + list_add_tail(&group->node, &g_reader_group_list); + } else { + group->reader_cnt++; + } + mutex_unlock(&g_reader_group_mutex); + + reader = kmalloc(sizeof(*reader), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)reader)) { + mutex_lock(&g_reader_group_mutex); + if (--group->reader_cnt == 0) { + list_del(&group->node); + kfree(group); + } + mutex_unlock(&g_reader_group_mutex); + return -ENOMEM; + } + init_tlogger_reader(reader, log, group); + + mutex_lock(&log->mutex_log_chnl); + list_add_tail(&reader->list, &log->readers); + g_tlogcat_count++; + mutex_unlock(&log->mutex_log_chnl); + + file->private_data = reader; + (void)open_tzdriver_tlogger(reader, TLOG_DEV_FLAG, false); + tlogd("tlogcat count %u\n", g_tlogcat_count); + return 0; +} + +static int close_tzdriver_tlogger(struct tlogger_reader *dev_file) +{ + int ret = 0; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_close_tzd packet_cmd = {0}; + struct_packet_rsp_close_tzd packet_rsp = {0}; + + if (!dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_CLOSE_TZD; + packet_cmd.ptzfd = dev_file->ptzfd; + tlogd("close ptzfd = %d\n", dev_file->ptzfd); + + ret = send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num); + if (!ret) { + ret = packet_rsp.ret; + if (ret) { + tloge("close TZdriver failed ret is %d\n", ret); + goto END; + } + } else { + tloge("send to proxy failed ret is %d\n", ret); + } +END: + return ret; +} + +static int process_tlogger_release(struct inode *ignored, + struct file *file) +{ + struct tlogger_reader *reader = NULL; + struct tlogger_log *log = NULL; + struct tlogger_group *group = NULL; + + (void)ignored; + + tlogd("logger_release ++\n"); + + if (!file) + return -1; + + reader = file->private_data; + if (!reader) { + tloge("reader is null\n"); + return -1; + } + + log = reader->log; + if (!log) { + tloge("log is null\n"); + return -1; + } + + mutex_lock(&log->mutex_log_chnl); + list_del(&reader->list); + if (g_tlogcat_count >= 1) + g_tlogcat_count--; + mutex_unlock(&log->mutex_log_chnl); + + group = reader->group; + if (group != NULL) { + mutex_lock(&g_reader_group_mutex); + if (reader->r_is_tlogf != 0) + group->tlogf_stat = 0; + if (--group->reader_cnt == 0) { + list_del(&group->node); + kfree(group); + } + mutex_unlock(&g_reader_group_mutex); + } + (void)close_tzdriver_tlogger(reader); + kfree(reader); + tlogd("tlogcat count %u\n", g_tlogcat_count); + return 0; +} + +static int get_log_from_host(struct tlogger_reader *dev_file) +{ + int ret; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_get_log packet_cmd = {0}; + struct_packet_rsp_get_log *packet_rsp = {0}; + packet_rsp= kzalloc(sizeof(*packet_rsp) + LOG_BUFFER_LEN, GFP_KERNEL); + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_GET_LOG; + packet_cmd.ptzfd = dev_file->ptzfd; + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), packet_rsp, + sizeof(*packet_rsp) + LOG_BUFFER_LEN, seq_num)) { + tloge("send to proxy failed\n"); + return -EFAULT; + } else { + ret = packet_rsp->ret; + g_buff_len = packet_rsp->length; + memcpy_s(g_log_buffer, packet_rsp->length, packet_rsp->buffer, packet_rsp->length); + } + kfree((void *)(packet_rsp)); + return ret; +} + +static ssize_t process_tlogger_read(struct file *file, + char __user *buf, size_t count, loff_t *pos) +{ + int ret = g_buff_len; + (void)buf; + (void)count; + (void)pos; + if (copy_to_user(buf, (void *)g_log_buffer, g_buff_len) != 0) + tloge("copy failed, item len %u\n", g_buff_len); + g_buff_len = 0; + return ret; +} + +static unsigned int process_tlogger_poll(struct file *file, + poll_table *wait) +{ + struct tlogger_reader *reader = NULL; + struct tlogger_log *log = NULL; + struct log_buffer *buffer = NULL; + uint32_t ret = POLLOUT | POLLWRNORM; + + tlogd("logger_poll ++\n"); + if (!file) { + tloge("file is null\n"); + return ret; + } + + reader = file->private_data; + if (!reader) { + tloge("the private data is null\n"); + return ret; + } + + log = reader->log; + if (!log) { + tloge("log is null\n"); + return ret; + } + + buffer = (struct log_buffer *)log->buffer_info; + if (!buffer) { + tloge("buffer is null\n"); + return ret; + } + + (void)get_log_from_host(reader); + ret |= POLLIN | POLLRDNORM; + tlogd("before return \n"); + return ret; +} + +static int check_user_arg(unsigned long arg, size_t arg_len) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 18) || \ + LINUX_VERSION_CODE == KERNEL_VERSION(4, 19, 71)) + return (int)access_ok(VERIFY_READ, + (void __user *)(uintptr_t)arg, arg_len); +#else + return (int)access_ok((void __user *)(uintptr_t)arg, arg_len); +#endif +} + +static int get_teeos_version(struct tlogger_reader *dev_file, + uint32_t cmd, unsigned long arg) +{ + int ret; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_get_ver packet_cmd = {0}; + struct_packet_rsp_get_ver packet_rsp = {0}; + + if ((_IOC_DIR(cmd) & _IOC_READ) == 0) { + tloge("check get version cmd failed\n"); + return -1; + } + + ret = check_user_arg(arg, + sizeof(g_log_buffer->flag.version_info)); + if (ret == 0) { + tloge("check version info arg failed\n"); + return -1; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_GET_TEEOS_VER; + packet_cmd.ptzfd = dev_file->ptzfd; + + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + tloge("sen to proxy failed\n"); + return -EFAULT; + } else { + ret = packet_rsp.ret; + if (copy_to_user((void __user *)(uintptr_t)arg, + (void *)packet_rsp.version_info, + sizeof(packet_rsp.version_info)) != 0) { + tloge("version info copy failed\n"); + return -1; + } + } + return ret; +} + +#define SET_READ_POS 1U +static int set_reader_cur_pos(const struct file *file) +{ + int ret = 0; + struct tlogger_reader *dev_file = NULL; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_set_reader_cur packet_cmd = {0}; + struct_packet_rsp_set_reader_cur packet_rsp = {0}; + + if (!file || !file->private_data) + return -EINVAL; + dev_file = file->private_data; + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_SET_READER_CUR; + packet_cmd.ptzfd = dev_file->ptzfd; + + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + tloge("sen to proxy failed\n"); + return -EFAULT; + } else { + ret = packet_rsp.ret; + } + return ret; +} + +static int set_tlogcat_f_stat(const struct file *file) +{ + int ret = 0; + struct tlogger_reader *dev_file = NULL; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_set_tlogcat_stat packet_cmd = {0}; + struct_packet_rsp_set_tlogcat_stat packet_rsp = {0}; + + if (!file || !file->private_data) + return -EINVAL; + + dev_file = file->private_data; + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_SET_TLOGCAT_STAT; + packet_cmd.ptzfd = dev_file->ptzfd; + + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + tloge("sen to proxy failed\n"); + return -EFAULT; + } else { + ret = packet_rsp.ret; + } + return ret; +} + +static int get_tlogcat_f_stat(const struct file *file) +{ + struct tlogger_reader *dev_file = NULL; + int tlogf_stat = 0; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_get_tlogcat_stat packet_cmd = {0}; + struct_packet_rsp_get_tlogcat_stat packet_rsp = {0}; + + if (!file || !file->private_data) + return tlogf_stat; + + dev_file = file->private_data; + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_GET_TLOGCAT_STAT; + packet_cmd.ptzfd = dev_file->ptzfd; + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + tloge("sen to proxy failed\n"); + return -EFAULT; + } else { + tlogf_stat = packet_rsp.ret; + } + return tlogf_stat; +} + +static long process_tlogger_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct tlogger_log *log = NULL; + long ret = -EINVAL; + struct tlogger_reader *dev_file = NULL; + if (!file) + return -1; + + log = get_reader_log(file); + if (!log) { + tloge("log is null\n"); + return -1; + } + dev_file = file->private_data; + if (dev_file == NULL) { + return -1; + } + + tlogd("logger_ioctl start ++\n"); + mutex_lock(&log->mutex_info); + + switch (cmd) { + case TEELOGGER_GET_VERSION: + if (get_teeos_version(dev_file, cmd, arg) == 0) + ret = 0; + break; + case TEELOGGER_SET_READERPOS_CUR: + (void)set_reader_cur_pos(file); + ret = 0; + break; + case TEELOGGER_SET_TLOGCAT_STAT: + (void)set_tlogcat_f_stat(file); + ret = 0; + break; + case TEELOGGER_GET_TLOGCAT_STAT: + ret = get_tlogcat_f_stat(file); + break; + case TEELOGGER_GET_TEE_INFO: + ret = tc_ns_get_tee_info(dev_file->ptzfd, (void *)(uintptr_t)arg, true); + break; + default: + tloge("ioctl error default\n"); + break; + } + + mutex_unlock(&log->mutex_info); + return ret; +} + +#ifdef CONFIG_COMPAT +static long process_tlogger_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + tlogd("logger_compat_ioctl ++\n"); + arg = (unsigned long)(uintptr_t)compat_ptr(arg); + return process_tlogger_ioctl(file, cmd, arg); +} +#endif + +static const struct file_operations g_logger_fops = { + .owner = THIS_MODULE, + .read = process_tlogger_read, + .poll = process_tlogger_poll, + .unlocked_ioctl = process_tlogger_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = process_tlogger_compat_ioctl, +#endif + .open = process_tlogger_open, + .release = process_tlogger_release, +}; + +static int open_msg_file(struct file **file, + const char *file_path, uint32_t file_path_len) +{ + struct file *filep = NULL; + loff_t file_size; + (void)file_path_len; + + filep = filp_open(file_path, O_CREAT | O_RDWR | O_APPEND, OPEN_FILE_MODE); + if (!filep || IS_ERR(filep)) { + tloge("open last teemsg file err %ld\n", PTR_ERR(filep)); + return -1; + } + + file_size = i_size_read(file_inode(filep)); + + if (file_size > MAX_LOG__FILE_SIZE) { + vfs_truncate(&filep->f_path, 0); + pr_info("Log file %s truncated.\n", file_path); + } + + *file = filep; + return 0; +} + +#ifdef WRITE_VERSION +static int write_version_to_msg(struct file *filep, + loff_t *pos) +{ + ssize_t write_len; + /* first write tee versino info */ + write_len = kernel_write(filep, g_log_buffer->flag.version_info, + strlen(g_log_buffer->flag.version_info), pos); + if (write_len < 0) { + tloge("Failed to write to last teemsg version\n"); + return -1; + } + + tlogd("Succeed to Write to last teemsg version, len=%zd\n", write_len); + return 0; +} +#endif + +static bool check_log_item_validite(const struct log_item *item, + uint32_t item_max_size) +{ + bool con = (item && (item->magic == LOG_ITEM_MAGIC) && + (item->buffer_len > 0) && + (item->real_len > 0) && + (item->buffer_len % LOG_ITEM_LEN_ALIGN == 0) && + (item->real_len <= item->buffer_len) && + ((item->buffer_len - item->real_len) < LOG_ITEM_LEN_ALIGN) && + (item->buffer_len + sizeof(*item) <= item_max_size)); + + return con; +} + +static struct log_item *msg_get_next(unsigned char *buffer_start, + uint32_t read_pos, uint32_t scope_len, uint32_t max_len) +{ + uint32_t i = 0; + struct log_item *item = NULL; + uint32_t item_max_size; + uint32_t len; + + while (i <= scope_len && + ((read_pos + i + sizeof(*item)) < max_len)) { + len = (uint32_t)(scope_len - i); + item_max_size = + ((len > LOG_ITEM_MAX_LEN) ? LOG_ITEM_MAX_LEN : len); + item = (struct log_item *)(buffer_start + read_pos + i); + + if (check_log_item_validite(item, item_max_size)) { + if ((read_pos + i + sizeof(*item) + + item->buffer_len) > max_len) { + tloge("check item len error\n"); + return NULL; + } + + return item; + } + + i += LOG_ITEM_LEN_ALIGN; + item = NULL; + } + + return NULL; +} + +static int write_part_log_to_msg( unsigned char *buffer, + uint32_t buffer_max_len, loff_t *pos, + uint32_t read_off, uint32_t read_off_end) +{ + struct file *filep = NULL; + struct log_item *next_item = NULL; + uint32_t item_len; + uint32_t total_len = 0; + ssize_t write_len; + int ret = 0; + ret = open_msg_file(&filep, CONFIG_TEE_LOG_ACHIVE_PATH, sizeof(CONFIG_TEE_LOG_ACHIVE_PATH)); + if (ret != 0) + return 0; + + next_item = msg_get_next(buffer, read_off, + LOG_ITEM_MAX_LEN, buffer_max_len); + + while (next_item && read_off <= read_off_end) { + item_len = next_item->buffer_len + sizeof(*next_item); + write_len = kernel_write(filep, next_item->log_buffer, + next_item->real_len, pos); + if (write_len < 0) { + tloge("Failed to write last teemsg %zd\n", write_len); + ret = -1; + goto END; + } + + tlogd("Succeed to Write last teemsg, len=%zd\n", write_len); + total_len += item_len; + read_off = (unsigned char *)next_item - buffer + item_len; + if (total_len >= buffer_max_len) + break; + + next_item = msg_get_next(buffer, read_off, + LOG_ITEM_MAX_LEN, buffer_max_len); + } +END: + filp_close(filep, NULL); + return ret; +} + +int log_thread_func(void *arg) +{ + struct vtzf_serial_port_file *dev_file = (struct vtzf_serial_port_file *)arg; + int ret = 0; + uint32_t seq_num = 40; + loff_t pos = 0; + struct_packet_cmd_get_log packet_cmd; + struct_packet_rsp_get_log *packet_rsp; + if (!dev_file) + return -EINVAL; + g_thread_reader = kmalloc(sizeof(*g_thread_reader), GFP_KERNEL); + if (!g_thread_reader) { + return -ENOMEM; + } + packet_rsp= kzalloc(sizeof(*packet_rsp) + LOG_BUFFER_LEN, GFP_KERNEL); + if (!packet_rsp) { + kfree(g_thread_reader); + return -ENOMEM; + } + + ret = -EAGAIN; + while(!kthread_should_stop() && !dev_file->log_flag){ + if (ret == -EAGAIN) { + dev_file->log_flag = 2; + while(!kthread_should_stop() && open_tzdriver_tlogger(g_thread_reader, TLOG_DEV_THD_FLAG, true)){ + tlogd("will sleep 10s\n"); + (void)wait_event_interruptible_timeout(dev_file->log_wait_event_wq, dev_file->log_flag, 10 * HZ); + tlogd("after sleep 10s\n"); + } + } else if (!ret) { + write_part_log_to_msg(packet_rsp->buffer, packet_rsp->length, &pos, 0, packet_rsp->length); + tlogd("write tlog \n"); + } + if (kthread_should_stop() || dev_file->log_flag) + break; + tlogd("g_thread_reader->ptzfd = %d\n", g_thread_reader->ptzfd); + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_GET_LOG; + packet_cmd.ptzfd = g_thread_reader->ptzfd; + ret = send_to_proxy(&packet_cmd, sizeof(packet_cmd), packet_rsp, + sizeof(*packet_rsp) + LOG_BUFFER_LEN, seq_num); + schedule(); + } + kfree((void *)(packet_rsp)); + kfree(g_thread_reader); + return 0; +} + +static int register_device(const char *log_name, + uintptr_t addr, int size) +{ + int ret; + struct tlogger_log *log = NULL; + unsigned char *buffer = (unsigned char *)addr; + (void)size; + + log = kzalloc(sizeof(*log), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)log)) { + tloge("kzalloc is failed\n"); + return -ENOMEM; + } + log->buffer_info = buffer; + log->misc_device.minor = MISC_DYNAMIC_MINOR; + log->misc_device.name = kstrdup(log_name, GFP_KERNEL); + if (!log->misc_device.name) { + ret = -ENOMEM; + tloge("kstrdup is failed\n"); + goto out_free_log; + } + log->misc_device.fops = &g_logger_fops; + log->misc_device.parent = NULL; + + INIT_LIST_HEAD(&log->readers); + mutex_init(&log->mutex_info); + mutex_init(&log->mutex_log_chnl); + INIT_LIST_HEAD(&log->logs); + list_add_tail(&log->logs, &m_log_list); + + /* register misc device for this log */ + ret = misc_register(&log->misc_device); + if (unlikely(ret)) { + tloge("failed to register misc device:%s\n", + log->misc_device.name); + goto out_free_log; + } + g_log = log; + return 0; + +out_free_log: + if (log->misc_device.name) + kfree(log->misc_device.name); + + kfree(log); + return ret; +} + +static int alloc_log_mem(void) +{ + int ret = 0; + void *tmp = kzalloc(TEMP_LOG_MEM_SIZE, GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)tmp)) { + tloge("alloc serial_port_file failed\n"); + ret = -ENOMEM; + goto END; + } + g_log_buffer = (struct log_buffer *)tmp; + g_log_mem_len = TEMP_LOG_MEM_SIZE; + g_log_buffer->flag.max_len = g_log_mem_len - sizeof(*g_log_buffer); +END: + return ret; +} + +int register_tloger_device(void) +{ + int ret; + ret = alloc_log_mem(); + if (ret) + return ret; + ret = register_device(LOGGER_LOG_TEEOS, (uintptr_t)g_log_buffer, + g_log_mem_len); + if (ret != 0) { + kfree((void *)g_log_buffer); + g_log_buffer = NULL; + g_log_mem_len = 0; + } + return ret; +} + +static void unregister_tlogger(void) +{ + struct tlogger_log *current_log = NULL; + struct tlogger_log *next_log = NULL; + + list_for_each_entry_safe(current_log, next_log, &m_log_list, logs) { + /* we have to delete all the entry inside m_log_list */ + misc_deregister(¤t_log->misc_device); + kfree(current_log->misc_device.name); + list_del(¤t_log->logs); + kfree(current_log); + } + + kfree((void *)g_log_buffer); + g_log_buffer = NULL; + g_log_mem_len = 0; +} + +void tlogger_exit(void) +{ + unregister_tlogger(); +} + +int tlogger_init(void) +{ + return register_tloger_device(); +} \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/tlogger.h b/trustzone-awared-vm/VM/vtzdriver/tlogger.h new file mode 100644 index 0000000000000000000000000000000000000000..21ad016b0ce213ebb52abe8568a02c28819f61e3 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/tlogger.h @@ -0,0 +1,215 @@ +/* + * tlogger.h + * + * TEE Logging Subsystem, read the tee os log from rdr memory + */ +#ifndef TLOGGER_H +#define TLOGGER_H + +#include +#include +#include "tc_ns_client.h" + +#define OPEN_FILE_MODE 0640U +#define ROOT_UID 0 +#define ROOT_GID 0 +#define SYSTEM_GID 1000 +#ifdef LAST_TEE_MSG_ROOT_GID +#define FILE_CHOWN_GID 0 +#else +/* system gid for last_teemsg file sys chown */ +#define FILE_CHOWN_GID 1000 +#endif + +#define UINT64_MAX (uint64_t)(~((uint64_t)0)) /* 0xFFFFFFFFFFFFFFFF */ + +/* for log item ----------------------------------- */ +#define LOG_ITEM_MAGIC 0x5A5A +#define LOG_ITEM_LEN_ALIGN 64 +#define LOG_ITEM_MAX_LEN 1024 +#define LOG_READ_STATUS_ERROR 0x000FFFF + +/* =================================================== */ +#define LOGGER_LOG_TEEOS "teelog" /* tee os log */ +#define LOGGERIOCTL 0xBE /* for ioctl */ + +#define DUMP_START_MAGIC "Dump SPI notification" +#define DUMP_END_MAGIC "Dump task states END" + +#define GET_VERSION_BASE 5 +#define SET_READERPOS_CUR_BASE 6 +#define SET_TLOGCAT_STAT_BASE 7 +#define GET_TLOGCAT_STAT_BASE 8 +#define GET_TEE_INFO_BASE 9 + +/* get tee verison */ +#define MAX_TEE_VERSION_LEN 256 +#define TEELOGGER_GET_VERSION \ + _IOR(LOGGERIOCTL, GET_VERSION_BASE, char[MAX_TEE_VERSION_LEN]) +/* set the log reader pos to current pos */ +#define TEELOGGER_SET_READERPOS_CUR \ + _IO(LOGGERIOCTL, SET_READERPOS_CUR_BASE) +#define TEELOGGER_SET_TLOGCAT_STAT \ + _IO(LOGGERIOCTL, SET_TLOGCAT_STAT_BASE) +#define TEELOGGER_GET_TLOGCAT_STAT \ + _IO(LOGGERIOCTL, GET_TLOGCAT_STAT_BASE) +#define TEELOGGER_GET_TEE_INFO \ + _IOR(LOGGERIOCTL, GET_TEE_INFO_BASE, struct tc_ns_tee_info) + +#define NEVER_USED_LEN 28U +#define LOG_ITEM_RESERVED_LEN 1U + +/* 64 byte head + user log */ +struct log_item { + unsigned char never_used[NEVER_USED_LEN]; + unsigned int nsid; + unsigned short magic; + unsigned short reserved0; + uint32_t serial_no; + unsigned short real_len; /* log real len */ + unsigned short buffer_len; /* log buffer's len, multiple of 32 bytes */ + unsigned char uuid[UUID_LEN]; + unsigned char log_source_type; + unsigned char reserved[LOG_ITEM_RESERVED_LEN]; + unsigned char log_level; + unsigned char new_line; /* '\n' char, easy viewing log in bbox.bin file */ + unsigned char log_buffer[]; +}; + +/* --- for log mem --------------------------------- */ +#define TEMP_LOG_MEM_SIZE (64 * SZ_1K) + +#define LOG_BUFFER_RESERVED_LEN 11U +#define VERSION_INFO_LEN 256U + +#define LOG_BUFFER_LEN 2000 + +/* + * Log's buffer flag info, size: 64 bytes head + 156 bytes's version info. + * For filed description: + * last_pos : current log's end position, last log's start position. + * write_loops: Write cyclically. Init value is 0, when memory is used + * up, the value add 1. + */ +struct log_buffer_flag { + uint32_t reserved0; + uint32_t last_pos; + uint32_t write_loops; + uint32_t log_level; + /* [0] is magic failed, [1] is serial_no failed, used fior log retention feature */ + uint32_t reserved[LOG_BUFFER_RESERVED_LEN]; + uint32_t max_len; + unsigned char version_info[VERSION_INFO_LEN]; +}; + +struct log_buffer { + struct log_buffer_flag flag; + unsigned char buffer_start[]; +}; + +struct tlogger_log { + unsigned char *buffer_info; /* ring buffer info */ + struct mutex mutex_info; /* this mutex protects buffer_info */ + struct list_head logs; /* log channels list */ + struct mutex mutex_log_chnl; /* this mutex protects log channels */ + struct miscdevice misc_device; /* misc device log */ + struct list_head readers; /* log's readers */ +}; + +struct tlogger_group { + struct list_head node; + uint32_t nsid; + volatile uint32_t reader_cnt; + volatile uint32_t tlogf_stat; +}; + +struct tlogger_reader { + struct tlogger_log *log; /* tlogger_log info data */ + struct tlogger_group *group; /* tlogger_group info data */ + struct pid *pid; /* current process pid */ + struct list_head list; /* log entry in tlogger_log's list */ + wait_queue_head_t wait_queue_head; /* wait queue head for reader */ + /* Current reading position, start position of next read again */ + uint32_t r_off; + uint32_t r_loops; + uint32_t r_sn; + uint32_t r_failtimes; + uint32_t r_from_cur; + uint32_t r_is_tlogf; + bool r_all; /* whether this reader can read all entries */ + uint32_t r_ver; + int32_t ptzfd; +}; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_get_ver; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + unsigned char version_info[VERSION_INFO_LEN]; +} struct_packet_rsp_get_ver; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_set_reader_cur; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_set_reader_cur; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_set_tlogcat_stat; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_set_tlogcat_stat; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_get_tlogcat_stat; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; +} struct_packet_rsp_get_tlogcat_stat; + +typedef struct { + uint32_t packet_size; + uint32_t cmd; + uint32_t seq_num; + int32_t ptzfd; +} struct_packet_cmd_get_log; + +typedef struct { + uint32_t packet_size; + uint32_t seq_num; + uint32_t ret; + int length; + char buffer[]; +} struct_packet_rsp_get_log; + +int tlogger_init(void); +void tlogger_exit(void); +int log_thread_func(void *arg); +#endif \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/vtzf.c b/trustzone-awared-vm/VM/vtzdriver/vtzf.c new file mode 100644 index 0000000000000000000000000000000000000000..6ffb1b2d9f221636ffc36fde1c61692eee86c465 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/vtzf.c @@ -0,0 +1,1862 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "vtzf.h" +#include "tlogger.h" +#include "serialport.h" +#include "tee_info.h" +#include "comm_structs.h" +#include "reserved_shm.h" +#include "securec.h" +#include "tc_ns_client.h" +#include "tc_ns_log.h" +#include "teek_client_constants.h" +#include "block_pages.h" + +#define CONFIG_CONFIDENTIAL_CONTAINER + +#define PRINTF_SIZE 16 +void dump_buff(const char *buffer, size_t bufLen) { + size_t i; + if (buffer == NULL || bufLen == 0) { + return; + } + tlogd("--------------------------------------------------\n"); + tlogd("bufLen = %d\n", (int)bufLen); + for (i = 0; i < bufLen; i++) { + if (i % PRINTF_SIZE == 0 && i != 0) { + tlogd("\n"); + } + tlogd("%02x ", *(buffer + i)); + } + tlogd("\n--------------------------------------------------\n"); + return; +} + +static struct class *g_driver_class; +static struct device_node *g_dev_node; + +struct dev_node g_tc_client; +struct dev_node g_tc_private; +#if defined(CONFIG_CONFIDENTIAL_CONTAINER) || defined(CONFIG_TEE_TELEPORT_SUPPORT) +struct dev_node g_tc_cvm; +#endif +/* dev node list and itself has mutex to avoid race */ +struct vtzf_dev_list g_tc_ns_dev_list; + +static unsigned int g_device_file_cnt = 1; +static DEFINE_MUTEX(g_device_file_cnt_lock); + +#define MAX_AGENTS_NUM 10 +struct agent_buf g_agents_buf[MAX_AGENTS_NUM] = {0}; + +static struct vm_operations_struct g_shared_remap_vm_ops = { + .open = shared_vma_open, + .close = shared_vma_close, +}; + +static const struct file_operations g_tc_ns_client_fops = { + .owner = THIS_MODULE, + .open = vtzf_client_open, + .release = vtzf_close, + .unlocked_ioctl = tc_client_ioctl, + .mmap = vtzf_mmap, +#ifdef CONFIG_COMPAT + .compat_ioctl = tc_compat_client_ioctl, +#endif +}; + +static const struct file_operations g_teecd_fops = { + .owner = THIS_MODULE, + .open = vtzf_private_open, + .release = vtzf_close, + .unlocked_ioctl = tc_private_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = tc_compat_private_ioctl, +#endif +}; + +static const struct file_operations g_cvm_fops = { + .owner = THIS_MODULE, + .open = vtzf_cvm_open, + .release = vtzf_close, + .unlocked_ioctl = tc_cvm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = tc_compat_cvm_ioctl, +#endif +}; + +const struct file_operations *get_cvm_fops(void) +{ + return &g_cvm_fops; +} + +struct vtzf_dev_list *get_dev_list(void) +{ + return &g_tc_ns_dev_list; +} + +static int create_dev_node(struct dev_node *node) +{ + int ret; + if (!node || !(node->node_name)) { + tloge("node or member is null\n"); + return -EFAULT; + } + if (alloc_chrdev_region(&(node->devt), 0, 1, + node->node_name) != 0) { + tloge("alloc chrdev region failed"); + ret = -EFAULT; + return ret; + } + node->class_dev = device_create(node->driver_class, NULL, node->devt, + NULL, node->node_name); + if (IS_ERR_OR_NULL(node->class_dev)) { + tloge("class device create failed"); + ret = -ENOMEM; + goto chrdev_region_unregister; + } + node->class_dev->of_node = g_dev_node; + + cdev_init(&(node->char_dev), node->fops); + (node->char_dev).owner = THIS_MODULE; + + return 0; + +chrdev_region_unregister: + unregister_chrdev_region(node->devt, 1); + return ret; +} + +static int init_dev_node(struct dev_node *node, char *node_name, + struct class *driver_class, const struct file_operations *fops) +{ + int ret = -1; + if (!node) { + tloge("node is NULL\n"); + return ret; + } + node->node_name = node_name; + node->driver_class = driver_class; + node->fops = fops; + + ret = create_dev_node(node); + return ret; +} + +static void destory_dev_node(struct dev_node *node, struct class *driver_class) +{ + device_destroy(driver_class, node->devt); + unregister_chrdev_region(node->devt, 1); + return; +} + +static int tc_ns_client_init(void) +{ + int ret; + + g_driver_class = class_create(THIS_MODULE, TC_NS_CLIENT_DEV); + if (IS_ERR_OR_NULL(g_driver_class)) { + tloge("class create failed"); + ret = -ENOMEM; + return ret; + } + + ret = init_dev_node(&g_tc_client, TC_NS_CLIENT_DEV, g_driver_class, &g_tc_ns_client_fops); + if (ret != 0) { + class_destroy(g_driver_class); + return ret; + } + ret = init_dev_node(&g_tc_private, TC_PRIV_DEV, g_driver_class, &g_teecd_fops); + if (ret != 0) { + destory_dev_node(&g_tc_client, g_driver_class); + class_destroy(g_driver_class); + return ret; + } +#if defined(CONFIG_CONFIDENTIAL_CONTAINER) || defined(CONFIG_TEE_TELEPORT_SUPPORT) + ret = init_dev_node(&g_tc_cvm, TC_NS_CVM_DEV, g_driver_class, get_cvm_fops()); + if (ret != 0) { + destory_dev_node(&g_tc_private, g_driver_class); + destory_dev_node(&g_tc_client, g_driver_class); + class_destroy(g_driver_class); + return ret; + } +#endif + INIT_LIST_HEAD(&g_tc_ns_dev_list.dev_file_list); + mutex_init(&g_tc_ns_dev_list.dev_lock); + return ret; +} + +static int enable_dev_nodes(void) +{ + int ret; + + ret = cdev_add(&(g_tc_private.char_dev), + MKDEV(MAJOR(g_tc_private.devt), 0), 1); + if (ret < 0) { + tloge("cdev add failed %d", ret); + return ret; + } + + ret = cdev_add(&(g_tc_client.char_dev), + MKDEV(MAJOR(g_tc_client.devt), 0), 1); + if (ret < 0) { + tloge("cdev add failed %d", ret); + cdev_del(&(g_tc_private.char_dev)); + return ret; + } + +#if defined(CONFIG_CONFIDENTIAL_CONTAINER) || defined(CONFIG_TEE_TELEPORT_SUPPORT) + ret = cdev_add(&(g_tc_cvm.char_dev), + MKDEV(MAJOR(g_tc_cvm.devt), 0), 1); + if (ret < 0) { + tloge("cdev add failed %d", ret); + cdev_del(&(g_tc_client.char_dev)); + cdev_del(&(g_tc_private.char_dev)); + return ret; + } +#endif + return 0; +} + +static int __init vtzf_init(void) +{ + int ret; + ret = tc_ns_client_init(); + if (ret != 0) + return ret; + init_res_shm_list(); + ret = tlogger_init(); + if (ret != 0) { + tloge("tlogger init failed\n"); + goto exit_tlogger; + } + ret = enable_dev_nodes(); + if (ret != 0) { + tloge("enable dev nodes failed\n"); + goto class_device_destroy; + } + ret = serial_port_init(); + if (ret != 0) { + goto class_device_destroy; + } + seq_num_init(); + return 0; + +class_device_destroy: +#if defined(CONFIG_CONFIDENTIAL_CONTAINER) || defined(CONFIG_TEE_TELEPORT_SUPPORT) + destory_dev_node(&g_tc_cvm, g_driver_class); +#endif + destory_dev_node(&g_tc_client, g_driver_class); + destory_dev_node(&g_tc_private, g_driver_class); + class_destroy(g_driver_class); +exit_tlogger: + tlogger_exit(); + return ret; +} + +static void free_dev_list(void) +{ + struct vtzf_dev_file *dev_file = NULL, *temp = NULL; + + mutex_lock(&g_tc_ns_dev_list.dev_lock); + list_for_each_entry_safe(dev_file, temp, &g_tc_ns_dev_list.dev_file_list, head) { + list_del(&dev_file->head); + kfree(dev_file); + } + mutex_unlock(&g_tc_ns_dev_list.dev_lock); +} + +static void __exit vtzf_exit(void) +{ +#if defined(CONFIG_CONFIDENTIAL_CONTAINER) || defined(CONFIG_TEE_TELEPORT_SUPPORT) + cdev_del(&(g_tc_cvm.char_dev)); +#endif + cdev_del(&(g_tc_private.char_dev)); + cdev_del(&(g_tc_client.char_dev)); +#if defined(CONFIG_CONFIDENTIAL_CONTAINER) || defined(CONFIG_TEE_TELEPORT_SUPPORT) + destory_dev_node(&g_tc_cvm, g_driver_class); +#endif + + destory_dev_node(&g_tc_client, g_driver_class); + destory_dev_node(&g_tc_private, g_driver_class); + class_destroy(g_driver_class); + tlogi("class_destroy success\n"); + tlogger_exit(); + tlogi("tlogger_exit success\n"); + free_dev_list(); + tlogi("free_dev_list success\n"); + free_serial_port_list(); + tlogi("free_serial_port_list success\n"); + destroy_res_shm_list(); + tlogi("destroy_res_shm_list success\n"); +} + +int tc_ns_client_open(struct vtzf_dev_file **dev_file, uint32_t flag) +{ + struct vtzf_dev_file *dev = NULL; + tlogd("vtzf open \n"); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)dev)) { + tloge("vtzf_dev_file malloc failed\n"); + return -ENOMEM; + } + + mutex_lock(&g_tc_ns_dev_list.dev_lock); + list_add_tail(&dev->head, &g_tc_ns_dev_list.dev_file_list); + mutex_unlock(&g_tc_ns_dev_list.dev_lock); + + mutex_lock(&g_device_file_cnt_lock); + dev->dev_file_id = g_device_file_cnt; + g_device_file_cnt++; + mutex_unlock(&g_device_file_cnt_lock); + + INIT_LIST_HEAD(&dev->shared_mem_list); + mutex_init(&dev->shared_mem_lock); + + (void)open_tzdriver(dev, flag); + + *dev_file = dev; + return 0; +} + +static int vtzf_client_open(struct inode *inode, struct file *file) +{ + int ret; + struct vtzf_dev_file *dev_file = NULL; + (void)inode; + file->private_data = NULL; + ret = tc_ns_client_open(&dev_file, TC_NS_CLIENT_DEV_FLAG); + if (!ret) + file->private_data = dev_file; + + return 0; +} + +static int vtzf_private_open(struct inode *inode, struct file *file) +{ + struct vtzf_dev_file *dev_file = NULL; + + dev_file = kzalloc(sizeof(*dev_file), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)dev_file)) { + tloge("vtzf_dev_file malloc failed\n"); + return -ENOMEM; + } + + mutex_lock(&g_tc_ns_dev_list.dev_lock); + list_add_tail(&dev_file->head, &g_tc_ns_dev_list.dev_file_list); + mutex_unlock(&g_tc_ns_dev_list.dev_lock); + + mutex_lock(&g_device_file_cnt_lock); + dev_file->dev_file_id = g_device_file_cnt; + g_device_file_cnt++; + mutex_unlock(&g_device_file_cnt_lock); + + INIT_LIST_HEAD(&dev_file->shared_mem_list); + mutex_init(&dev_file->shared_mem_lock); + + file->private_data = dev_file; + + (void)open_tzdriver(dev_file, TC_PRIVATE_DEV_FLAG); + return 0; +} + +static int vtzf_cvm_open(struct inode *inode, struct file *file) +{ + int ret = -1; + struct vtzf_dev_file *dev = NULL; + (void)inode; + + file->private_data = NULL; + ret = tc_ns_client_open(&dev, TC_CVM_DEV_FLAG); + if (ret == 0) + file->private_data = dev; + return ret; +} + +static int open_tzdriver(struct vtzf_dev_file *dev_file, uint32_t flag) +{ + int ret = 0; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_open_tzd packet_cmd = {0}; + struct_packet_rsp_open_tzd packet_rsp = {0}; + packet_cmd.packet_size = sizeof(packet_cmd); + if (!dev_file) { + tloge("invalid params\n"); + return -EINVAL; + } + + dev_file->ptzfd = -1; + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_OPEN_TZD; + packet_cmd.vmid = 0; + /* if flag==0, open tc_ns_client; if flag==1, open tc_private */ + packet_cmd.flag = flag; + + ret = send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num); + if (!ret) { + ret = packet_rsp.ret; + if (ret) { + tloge("open TZdriver failed ret is %d\n", ret); + goto END; + } + dev_file->ptzfd = packet_rsp.ptzfd; + } else { + tloge("send to proxy failed ret is %d\n", ret); + } +END: + return ret; +} + +int vtzf_close(struct inode *inode, struct file *file) +{ + int ret = 0; + struct vtzf_dev_file *dev_file = file->private_data; + + mutex_destroy(&dev_file->shared_mem_lock); + + mutex_lock(&g_tc_ns_dev_list.dev_lock); + list_del(&dev_file->head); + mutex_unlock(&g_tc_ns_dev_list.dev_lock); + + (void)close_tzdriver(dev_file); + + kfree(dev_file); + mutex_lock(&g_device_file_cnt_lock); + g_device_file_cnt--; + mutex_unlock(&g_device_file_cnt_lock); + + file->private_data = NULL; + return ret; +} + +static int close_tzdriver(struct vtzf_dev_file *dev_file) +{ + int ret = 0; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_close_tzd packet_cmd = {0}; + struct_packet_rsp_close_tzd packet_rsp = {0}; + packet_cmd.packet_size = sizeof(packet_cmd); + + if (!dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_CLOSE_TZD; + packet_cmd.ptzfd = dev_file->ptzfd; + tlogd("close ptzfd = %d\n", dev_file->ptzfd); + + ret = send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num); + if (!ret) { + ret = packet_rsp.ret; + if (ret) { + tloge("close TZdriver failed ret is %d\n", ret); + goto END; + } + } else { + tloge("send to proxy failed ret is %d\n", ret); + } +END: + return ret; +} + +void shared_vma_open(struct vm_area_struct *vma) +{ + (void)vma; +} + +void shared_vma_close(struct vm_area_struct *vma) +{ + struct vtzf_dev_file *dev_file = NULL; + struct vtzf_shared_mem *shared_mem = NULL; + struct vtzf_shared_mem *shared_mem_temp = NULL; + bool find = false; + + if (!vma) { + tloge("virtual memory area is null \n"); + return; + } + dev_file = vma->vm_private_data; + if (!dev_file) { + tloge("virtual memory area private data is null \n"); + return; + } + + mutex_lock(&dev_file->shared_mem_lock); + list_for_each_entry_safe(shared_mem, shared_mem_temp, + &dev_file->shared_mem_list, head) { + if (shared_mem) { + if (shared_mem->user_addr == + (void *)(uintptr_t)vma->vm_start) { + shared_mem->user_addr = NULL; + list_del(&shared_mem->head); + if (shared_mem->kernel_addr) { + dealloc_res_shm(shared_mem->kernel_addr); + shared_mem->kernel_addr = NULL; + } + kfree(shared_mem); + find = true; + break; + } + } + } + mutex_unlock(&dev_file->shared_mem_lock); + if (find) { + (void)proxy_mmap(dev_file, (void *)(uintptr_t)vma->vm_start, 0 ,0, true); + } +} + +static int proxy_mmap(struct vtzf_dev_file *dev_file, + void * user_buffer, uint32_t buffer_size, + uint32_t pgoff, uint8_t unmap) +{ + int ret = 0; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_mmap packet_cmd = {0}; + struct_packet_rsp_mmap packet_rsp = {0}; + + if (!dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.cmd = unmap ? VTZF_MUNMAP : VTZF_MMAP; + packet_cmd.seq_num = seq_num; + packet_cmd.ptzfd = dev_file->ptzfd; + packet_cmd.buffer = (uint64_t)user_buffer; + packet_cmd.size = buffer_size; + packet_cmd.offset = pgoff; + + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + ret = -EFAULT; + goto END; + } else { + ret = packet_rsp.ret; + } +END: + return ret; +} + +static int vtzf_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct vtzf_dev_file *dev_file = NULL; + struct vtzf_shared_mem *shared_mem = NULL; + void *addr = NULL; + size_t len; + + if (!filp || !vma || !filp->private_data) { + tloge("vtzf invalid args for mmap \n"); + return -EINVAL; + } + dev_file = filp->private_data; + + shared_mem = kmalloc(sizeof(*shared_mem), GFP_KERNEL | __GFP_ZERO); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)shared_mem)) { + tloge("vtzf shared_mem kmalloc failed \n"); + return -ENOMEM; + } + + len = vma->vm_end - vma->vm_start; + len = ALIGN(len, 1 << PAGE_SHIFT); + if (len > MAILBOX_POOL_SIZE) { + tloge("vtzf alloc sharemem buffer size %zu is too large \n", len); + kfree(shared_mem); + return -EINVAL; + } + + addr = alloc_res_shm(len); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)addr)) { + tloge("kmalloc shared_mem buffer failed \n"); + return -ENOMEM; + } + + shared_mem->kernel_addr = addr; + shared_mem->len = len; + shared_mem->user_addr =(void *)vma->vm_start; + shared_mem->phy_addr = (void *)virt_to_phys(addr); + + tlogv("shared_mem user virtual address = 0x%016llx \n", (uint64_t)shared_mem->user_addr); + tlogv("shared_mem kernel virtual address = 0x%016llx \n", (uint64_t)shared_mem->kernel_addr); + tlogv("shared_mem physical address = 0x%016llx \n", (uint64_t)shared_mem->phy_addr); + tlogv("shared_mem allocated buffer len = 0x%08x, %d \n", (int)len, (int)len); + + vma->vm_flags |= VM_USERMAP; + + if (remap_pfn_range(vma, vma->vm_start, + virt_to_phys(addr)>>PAGE_SHIFT, len, vma->vm_page_prot)) { + tloge("shared_mem buffer remap failed \n"); + return -EAGAIN; + } + + vma->vm_flags |= VM_DONTCOPY; + vma->vm_ops = &g_shared_remap_vm_ops; + shared_vma_open(vma); + vma->vm_private_data = (void *)dev_file; + + shared_mem->user_addr = (void *)(uintptr_t)vma->vm_start; + atomic_set(&shared_mem->offset, vma->vm_pgoff); + mutex_lock(&dev_file->shared_mem_lock); + list_add_tail(&shared_mem->head, &dev_file->shared_mem_list); + mutex_unlock(&dev_file->shared_mem_lock); + + (void)proxy_mmap(filp->private_data, shared_mem->user_addr, + shared_mem->len, (uint32_t)atomic_read(&shared_mem->offset), false); + return 0; +} + +#define INPUT 0 +#define OUTPUT 1 +#define INOUT 2 + +static inline bool is_input_type(int dir) +{ + if (dir == INPUT || dir == INOUT) + return true; + + return false; +} + +static inline bool is_output_type(int dir) +{ + if (dir == OUTPUT || dir == INOUT) + return true; + + return false; +} + +static inline bool teec_value_type(unsigned int type, int dir) +{ + return ((is_input_type(dir) && type == TEEC_VALUE_INPUT) || + (is_output_type(dir) && type == TEEC_VALUE_OUTPUT) || + type == TEEC_VALUE_INOUT) ? true : false; +} + +static inline bool teec_tmpmem_type(unsigned int type, int dir) +{ + return ((is_input_type(dir) && type == TEEC_MEMREF_TEMP_INPUT) || + (is_output_type(dir) && type == TEEC_MEMREF_TEMP_OUTPUT) || + type == TEEC_MEMREF_TEMP_INOUT) ? true : false; +} + +static inline bool teec_memref_type(unsigned int type, int dir) +{ + return ((is_input_type(dir) && type == TEEC_MEMREF_PARTIAL_INPUT) || + (is_output_type(dir) && type == TEEC_MEMREF_PARTIAL_OUTPUT) || + type == TEEC_MEMREF_PARTIAL_INOUT) ? true : false; +} + +static long tc_client_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret = -EFAULT; + void *argp = (void __user *)(uintptr_t)arg; + + switch (cmd) { + case TC_NS_CLIENT_IOCTL_SES_OPEN_REQ: + case TC_NS_CLIENT_IOCTL_SES_CLOSE_REQ: + case TC_NS_CLIENT_IOCTL_SEND_CMD_REQ: + ret = tc_client_session_ioctl(file->private_data, cmd, arg); + break; + case TC_NS_CLIENT_IOCTL_CANCEL_CMD_REQ: + /* TZdriver don't support send cancel cmd now */ + ret = tc_ns_send_cancel_cmd(file->private_data, argp); + break; + case TC_NS_CLIENT_IOCTL_LOGIN: + ret = tc_ns_client_login_func(file->private_data, argp); + break; + case TC_NS_CLIENT_IOCTL_LOAD_APP_REQ: + ret = public_ioctl(file, cmd, arg, true); + break; + default: + tlogd(" default\n"); + break; + } + + tlogd("tc client ioctl ret = 0x%x\n", ret); + return (long)ret; +} + +static int tc_ns_open_session(struct vtzf_dev_file *dev_file, + struct tc_ns_client_context *clicontext) +{ + int ret; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_session packet_cmd = {0}; + struct_packet_rsp_session packet_rsp = {0}; + size_t file_size = 0; + char *buffer = NULL; + char *tmp_buffer = NULL; + + if (!clicontext || !dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_OPEN_SESSION; + packet_cmd.ptzfd = dev_file->ptzfd; + packet_cmd.cliContext = *clicontext; + + file_size = (size_t)packet_cmd.cliContext.file_size; + tlogd("file_size = %lu \n", file_size); + buffer = (char *)alloc_res_shm(file_size); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)buffer)) { + tloge("vtzf_dev_file malloc failed\n"); + return -ENOMEM; + } + + tmp_buffer = packet_cmd.cliContext.file_buffer; + tlogd("buffer addr = %016llx ,tmp_buffer =%016llx \n", + (unsigned long long)clicontext->file_buffer, (unsigned long long)tmp_buffer); + packet_cmd.cliContext.file_buffer = (char *)virt_to_phys(buffer); + + if (copy_from_user(buffer, (const void __user *)tmp_buffer, file_size)) { + tloge("file buf get failed \n"); + ret = -EFAULT; + goto END; + } + + ret = send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num); + if (!ret) { + ret = packet_rsp.ret; + tlogd(" opensession ret =%d \n", ret); + if (!ret) { + packet_rsp.cliContext.file_buffer = tmp_buffer; + *clicontext = packet_rsp.cliContext; + } else { + tloge("open session failed ret is %d\n", ret); + clicontext->returns = packet_rsp.cliContext.returns; + } + } else { + tloge("send to proxy failed ret is %d\n", ret); + } +END: + dealloc_res_shm(buffer); + return ret; +} + +static int tc_ns_close_session(struct vtzf_dev_file *dev_file, void __user *argp) +{ + int ret; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_session packet_cmd = {0}; + struct_packet_rsp_general packet_rsp = {0}; + + if (!argp || !dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + + if (copy_from_user(&packet_cmd.cliContext, argp, sizeof(packet_cmd.cliContext)) != 0) { + tloge("copy from user failed\n"); + return -ENOMEM; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_CLOSE_SESSION; + packet_cmd.ptzfd = dev_file->ptzfd; + + ret = send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num); + if (!ret) { + ret = packet_rsp.ret; + if (ret) { + tloge("close session failed ret is %d\n", ret); + } + } else if(ret != -EINTR) { + tloge("send to proxy failed ret is %d\n", ret); + } + + return ret; +} + +static int alloc_for_tmp_mem(struct tc_ns_client_context *clicontext, + int index, uintptr_t addrs[][3]) +{ + uint32_t buf_size; + uintptr_t buffer; + uintptr_t user_buf_size, user_buf; + + user_buf = (uintptr_t)(clicontext->params[index].memref.buffer + | (uint64_t)clicontext->params[index].memref.buffer_h_addr << ADDR_TRANS_NUM); + user_buf_size = (uintptr_t)(clicontext->params[index].memref.size_addr + | (uint64_t)clicontext->params[index].memref.size_h_addr << ADDR_TRANS_NUM); + + if (copy_from_user(&buf_size, (void *)user_buf_size, sizeof(uint32_t)) != 0) { + tloge("copy from user failed\n"); + return -EFAULT; + } + + tlogd(" buffer size = %u\n", buf_size); + buffer = (uintptr_t)alloc_res_shm(buf_size); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)buffer)) { + tloge("buffer malloc failed\n"); + return -ENOMEM; + } + if (copy_from_user((void *)buffer, (void *)user_buf, buf_size) != 0) { + tloge("copy from user failed\n"); + dealloc_res_shm((void *)buffer); + return -EFAULT; + } + + addrs[index][1] = buffer; + buffer = (uintptr_t)virt_to_phys((void *)buffer); + clicontext->params[index].memref.buffer = (unsigned int)(uintptr_t)buffer; + clicontext->params[index].memref.buffer_h_addr = ((unsigned long long)(uintptr_t)buffer) >> ADDR_TRANS_NUM; + clicontext->params[index].memref.size_addr = buf_size; + return 0; +} + +static int alloc_for_val_mem(struct tc_ns_client_context *clicontext, + int index, uintptr_t addrs[][3]) +{ + uint32_t val_a, val_b; + uintptr_t user_val_a, user_val_b; + + user_val_a = (uintptr_t)(clicontext->params[index].value.a_addr + | (uint64_t)clicontext->params[index].value.a_h_addr << ADDR_TRANS_NUM); + user_val_b = (uintptr_t)(clicontext->params[index].value.b_addr + | (uint64_t)clicontext->params[index].value.b_h_addr << ADDR_TRANS_NUM); + if (copy_from_user(&val_a, (void *)user_val_a, sizeof(uint32_t)) != 0) { + tloge("copy from user failed\n"); + return -EFAULT; + } + if (copy_from_user(&val_b, (void *)user_val_b, sizeof(uint32_t)) != 0) { + tloge("copy from user failed\n"); + return -EFAULT; + } + + clicontext->params[index].value.a_addr = val_a; + clicontext->params[index].value.b_addr = val_b; + return 0; +} + +static int alloc_for_ref_mem(struct vtzf_dev_file *dev_file, + struct_packet_cmd_send_cmd *packet_cmd, int index, uintptr_t addrs[][3]) +{ + uintptr_t user_size_addr; + struct tc_ns_client_context *clicontext = &packet_cmd->cliContext; + bool b_found = false; + struct vtzf_shared_mem *shared_mem = NULL; + struct vtzf_shared_mem *shared_mem_temp = NULL; + void *user_buffer = NULL; + uintptr_t phy_buffer; + uint32_t buf_size; + + user_size_addr = (uintptr_t)(clicontext->params[index].memref.size_addr + | (uint64_t)clicontext->params[index].memref.size_h_addr << ADDR_TRANS_NUM); + user_buffer = (void *)(clicontext->params[index].memref.buffer + | (uint64_t)clicontext->params[index].memref.buffer_h_addr << ADDR_TRANS_NUM); + + if (copy_from_user(&buf_size, (void *)user_size_addr, sizeof(uint32_t)) != 0) { + tloge("copy from user failed\n"); + return -EFAULT; + } + + mutex_lock(&dev_file->shared_mem_lock); + list_for_each_entry_safe(shared_mem, shared_mem_temp, &dev_file->shared_mem_list, head) { + if (shared_mem) { + if (shared_mem->user_addr == user_buffer) { + tlogv("found the mapped shared_mem for cliContext.params[index].memref.buffer\n"); + phy_buffer = (uintptr_t)shared_mem->phy_addr; + clicontext->params[index].memref.buffer = + (unsigned int)(uintptr_t)phy_buffer; + clicontext->params[index].memref.buffer_h_addr = + ((unsigned long long)(uintptr_t)phy_buffer) >> ADDR_TRANS_NUM; + + packet_cmd->addrs[index] = (unsigned long long)user_buffer; + b_found = true; + break; + } + } + } + mutex_unlock(&dev_file->shared_mem_lock); + if (!b_found) { + tloge("can't found the mapped shared_mem for cliContext.params[index].memref.buffer \n"); + return -EFAULT; + } + clicontext->params[index].memref.size_addr = buf_size; + return 0; +} + +static int check_buffer_for_sharedmem(uint32_t *buffer_size, + struct_packet_cmd_send_cmd *packet_cmd, int index) +{ + uintptr_t user_size_addr; + void *user_buffer = NULL; + struct tc_ns_client_context *clicontext = &packet_cmd->cliContext; + user_size_addr = (uintptr_t)(clicontext->params[index].memref.size_addr + | (uint64_t)clicontext->params[index].memref.size_h_addr << ADDR_TRANS_NUM); + user_buffer = (void *)(clicontext->params[index].memref.buffer + | (uint64_t)clicontext->params[index].memref.buffer_h_addr << ADDR_TRANS_NUM); + + if (copy_from_user(buffer_size, (void *)user_size_addr, sizeof(uint32_t)) != 0) { + tloge("copy from user failed\n"); + return -EFAULT; + } + + if (*buffer_size == 0 || *buffer_size > SZ_256M) { + tloge("invalid buffer size\n"); + return -ENOMEM; + } + + if ((packet_cmd->cliContext.params[index].memref.offset >= SZ_256M) || + (UINT64_MAX - (uint64_t)user_buffer <= packet_cmd->cliContext.params[index].memref.offset)) { + tloge("invalid buff or offset\n"); + return -EFAULT; + } + return 0; +} + +static int alloc_for_share_mem(struct vtzf_dev_file *dev_file, + struct_packet_cmd_send_cmd *packet_cmd, int index, uintptr_t addrs[][3]) +{ + struct tc_ns_client_context *clicontext = &packet_cmd->cliContext; + void *user_buffer = NULL; + uint32_t user_buf_size = 0; + void *block_buf = NULL; + uint32_t block_buf_size = 0; + void *pages_buf = NULL; + uint32_t pages_buf_size = 0; + int block_count; + uint32_t offset; + + user_buffer = (void *)(clicontext->params[index].memref.buffer + | (uint64_t)clicontext->params[index].memref.buffer_h_addr << ADDR_TRANS_NUM); + tlogd(" user_buffer = %p \n", user_buffer); + if (check_buffer_for_sharedmem(&user_buf_size, packet_cmd, index)) + return -EINVAL; + + tlogd("share mem buf size = %u\n", user_buf_size); + if (get_page_block(user_buffer, user_buf_size, &block_buf, &block_buf_size, &block_count, &pages_buf, &pages_buf_size) != 0) { + tloge("get_page_block failed \n"); + return -EFAULT; + } + tlogd("alloc for share mem \n"); + tlogd("block_buf = %llx \n", (uint64_t)block_buf); + tlogd("block_buf_size = %u \n", block_buf_size); + //dump_page_blocks(block_count, (uint64_t)block_buf); + addrs[index][1] = (uintptr_t)pages_buf; + addrs[index][0] = (uintptr_t)pages_buf_size; + packet_cmd->block_addrs[index] = (uint64_t)block_buf; + packet_cmd->block_size[index] = block_buf_size; + packet_cmd->vm_page_size = PAGE_SIZE; + clicontext->params[index].memref.size_addr = user_buf_size; + offset = ((uint32_t)(uintptr_t)user_buffer) & (~PAGE_MASK); + /*memref.h_offset 保存首个PAGE内部的偏移, memref.offset用户buffer的偏移*/ + clicontext->params[index].memref.h_offset = offset; + tlogd("clicontext->params[index].memref.h_offset = %u, 0x%x\n", + clicontext->params[index].memref.h_offset, clicontext->params[index].memref.h_offset); + return 0; +} + +static int alloc_for_params(struct vtzf_dev_file *dev_file, + struct_packet_cmd_send_cmd *packet_cmd, uintptr_t addrs[][3]) +{ + int ret; + int index; + uint32_t param_type; + bool checkValue; + for (index = 0; index < TEE_PARAM_NUM; index++) { + param_type = teec_param_type_get(packet_cmd->cliContext.param_types, index); + checkValue = (param_type == TEEC_ION_INPUT || param_type == TEEC_ION_SGLIST_INPUT); + tlogd("param %u type is %x\n", index, param_type); + if (teec_tmpmem_type(param_type, INOUT)) + ret = alloc_for_tmp_mem(&packet_cmd->cliContext, index, addrs); + else if (teec_memref_type(param_type, INOUT)) + ret = alloc_for_ref_mem(dev_file , packet_cmd, index, addrs); + else if (teec_value_type(param_type, INOUT) || checkValue) + ret = alloc_for_val_mem(&packet_cmd->cliContext, index, addrs); + else if (param_type == TEEC_MEMREF_SHARED_INOUT) + ret = alloc_for_share_mem(dev_file , packet_cmd, index, addrs); + else + tlogd("param type = TEEC_NONE\n"); + if (ret != 0) { + goto ERR; + } + } + + return 0; +ERR: + return ret; + +} + +static void update_free_params(struct tc_ns_client_context *clicontext, + struct tc_ns_client_context *context, uintptr_t addrs[4][3]) +{ + int ret = 0; + int index; + uint32_t param_type; + bool checkValue; + uintptr_t buf; + uintptr_t user_addr_size, user_addr_buf; + uintptr_t user_addr_val_a, user_addr_val_b; + uint32_t buf_size; + uint32_t val_a, val_b; + void *pages_buf = NULL; + uint32_t pages_buf_size = 0; + for (index = 0; index < TEE_PARAM_NUM; index++) { + param_type = teec_param_type_get(clicontext->param_types, index); + checkValue = (param_type == TEEC_ION_INPUT || param_type == TEEC_ION_SGLIST_INPUT); + if (teec_tmpmem_type(param_type, INOUT)) { + buf_size = clicontext->params[index].memref.size_addr; + buf = addrs[index][1]; + + user_addr_size = (uintptr_t)(context->params[index].memref.size_addr + | (uint64_t)context->params[index].memref.size_h_addr << ADDR_TRANS_NUM); + user_addr_buf = (uintptr_t)(context->params[index].memref.buffer + | (uint64_t)context->params[index].memref.buffer_h_addr << ADDR_TRANS_NUM); + + if (copy_to_user((void *)user_addr_size, &buf_size, sizeof(uint32_t)) != 0) + ret = -EFAULT; + if (copy_to_user((void *)user_addr_buf, (void *)buf, buf_size) != 0) + ret = -EFAULT; + dealloc_res_shm((void *)buf); + + } else if(teec_memref_type(param_type, INOUT)) { + buf_size = clicontext->params[index].memref.size_addr; + + user_addr_size = (uintptr_t)(context->params[index].memref.size_addr + | (uint64_t)context->params[index].memref.size_h_addr << ADDR_TRANS_NUM); + if (copy_to_user((void *)user_addr_size, &buf_size, sizeof(uint32_t)) != 0) + ret = -EFAULT; + } else if(teec_value_type(param_type, INOUT) || checkValue) { + val_a = clicontext->params[index].value.a_addr; + val_b = clicontext->params[index].value.b_addr; + + user_addr_val_a = (uintptr_t)(context->params[index].value.a_addr + | (uint64_t)context->params[index].value.a_h_addr << ADDR_TRANS_NUM); + user_addr_val_b = (uintptr_t)(context->params[index].value.b_addr + | (uint64_t)context->params[index].value.b_h_addr << ADDR_TRANS_NUM); + + if (copy_to_user((void *)user_addr_val_a, &val_a, sizeof(uint32_t)) != 0) + ret = -EFAULT; + if (copy_to_user((void *)user_addr_val_b, &val_b, sizeof(uint32_t)) != 0) + ret = -EFAULT; + } else if (param_type == TEEC_MEMREF_SHARED_INOUT){ + pages_buf = (void *)addrs[index][1]; + pages_buf_size = (uint32_t)addrs[index][0]; + release_shared_mem_page((uint64_t)pages_buf, pages_buf_size); + } else { + /* nothing */ + } + + if (ret) { + tloge(" ret =%d \n", ret); + } + } +} + +static void free_for_params(struct tc_ns_client_context *clicontext, + uintptr_t addrs[4][3]) +{ + int index; + uint32_t param_type; + uintptr_t buf; + + void *pages_buf = NULL; + uint32_t pages_buf_size = 0; + for (index = 0; index < TEE_PARAM_NUM; index++) { + param_type = teec_param_type_get(clicontext->param_types, index); + if (teec_tmpmem_type(param_type, INOUT) && addrs[index][1]) { + buf = addrs[index][1]; + dealloc_res_shm((void *)buf); + }else if (param_type == TEEC_MEMREF_SHARED_INOUT){ + pages_buf = (void *)addrs[index][1]; + pages_buf_size = (uint32_t)addrs[index][0]; + release_shared_mem_page((uint64_t)pages_buf, pages_buf_size); + } else { + /* nothing */ + } + } +} + +static int tc_ns_send_cmd(struct vtzf_dev_file *dev_file, + struct tc_ns_client_context *context) +{ + int ret = -EINVAL; + int i = 0; + uint32_t offset =0; + uint32_t total_buf_size = 0; + void *cmd_buf =NULL; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_send_cmd packet_cmd = {0}; + struct_packet_rsp_send_cmd packet_rsp = {0}; + uintptr_t addrs[4][3]; + if (!dev_file || !context || dev_file->ptzfd <= 0) { + tloge("invalid dev_file or context\n"); + return -EINVAL; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_SEND_CMD; + packet_cmd.ptzfd = dev_file->ptzfd; + packet_cmd.cliContext = *context; + + ret = alloc_for_params(dev_file, &packet_cmd, addrs); + if (ret) { + tloge("alloc for params failed \n"); + return ret; + } + for (i = 0;i < TEE_PARAM_NUM; i++) { + total_buf_size += packet_cmd.block_size[i]; + } + packet_cmd.fragment_block_num = total_buf_size / sizeof(struct page_block); + total_buf_size += sizeof(packet_cmd); + packet_cmd.packet_size = total_buf_size; + cmd_buf = kzalloc(total_buf_size, GFP_KERNEL); + if (!cmd_buf) { + tloge("cmd_buf malloc failed\n"); + ret = -ENOMEM; + goto err2; + } + + if (memcpy_s(cmd_buf, sizeof(packet_cmd), &packet_cmd, sizeof(packet_cmd)) != 0) { + ret = -EFAULT; + goto err1; + } + offset = sizeof(packet_cmd); + + for (i = 0; i < TEE_PARAM_NUM; i++) { + if (packet_cmd.block_size[i] != 0 && + memcpy_s(cmd_buf + offset, packet_cmd.block_size[i], + (void *)packet_cmd.block_addrs[i], packet_cmd.block_size[i]) != 0) { + ret = -EFAULT; + goto err1; + } + offset += packet_cmd.block_size[i]; + } + + ret = send_to_proxy(cmd_buf, total_buf_size, &packet_rsp, sizeof(packet_rsp), seq_num); + if (!ret) { + ret = packet_rsp.ret; + if (ret) { + tloge("invoke cmd failed ret is %d\n", ret); + context->returns = packet_rsp.cliContext.returns; + free_for_params(&packet_cmd.cliContext, addrs); + } else { + context->returns = packet_rsp.cliContext.returns; + update_free_params(&packet_rsp.cliContext, context, addrs); + } + } else { + tloge("send to proxy failed ret is %d\n", ret); + free_for_params(&packet_cmd.cliContext, addrs); + } + + kfree(cmd_buf); + for (i = 0; i < TEE_PARAM_NUM; i++) { + if (packet_cmd.block_size[i] != 0 && packet_cmd.block_addrs[i]) { + kfree((void *)packet_cmd.block_addrs[i]); + } + } + return ret; +err1: + kfree(cmd_buf); + for (i = 0; i < TEE_PARAM_NUM; i++) { + if (packet_cmd.block_size[i] != 0 && packet_cmd.block_addrs[i]) { + kfree((void *)packet_cmd.block_addrs[i]); + } + } +err2: + free_for_params(&packet_cmd.cliContext, addrs); + return ret; +} + +static int ioctl_session_send_cmd(struct vtzf_dev_file *dev_file, + struct tc_ns_client_context *context, void *argp) +{ + int ret; + ret = tc_ns_send_cmd(dev_file, context); + if (ret != 0) + tloge("send cmd failed ret is %d\n", ret); + if (copy_to_user(argp, context, sizeof(*context)) != 0) { + if (ret == 0) + ret = -EFAULT; + } + return ret; +} + +int tc_client_session_ioctl(struct vtzf_dev_file *dev_file, unsigned int cmd, + unsigned long arg) +{ + int ret = -EINVAL; + void *argp = (void __user *)(uintptr_t)arg; + struct tc_ns_client_context context; + + if (!argp || !dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + if (copy_from_user(&context, argp, sizeof(context)) != 0) { + tloge("copy from user failed\n"); + return -EFAULT; + } + + switch (cmd) { + case TC_NS_CLIENT_IOCTL_SES_OPEN_REQ: + ret = tc_ns_open_session(dev_file, &context); + if (copy_to_user(argp, &context, sizeof(context)) != 0 && ret == 0) + ret = -EFAULT; + break; + case TC_NS_CLIENT_IOCTL_SES_CLOSE_REQ: + ret = tc_ns_close_session(dev_file, argp); + break; + case TC_NS_CLIENT_IOCTL_SEND_CMD_REQ: + ret = ioctl_session_send_cmd(dev_file, &context, argp); + break; + default: + tloge("invalid cmd:0x%x!\n", cmd); + return ret; + } + + return ret; +} + +static int tc_ns_send_cancel_cmd(struct vtzf_dev_file *dev_file, void *argp) +{ + int ret; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_cancel_cmd packet_cmd = {0}; + struct_packet_rsp_cancel_cmd packet_rsp = {0}; + + if (!argp || !dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_CANCEL_CMD; + packet_cmd.ptzfd = dev_file->ptzfd; + + if (copy_from_user(&packet_cmd.cliContext, argp, sizeof(packet_cmd.cliContext)) != 0) { + tloge("copy from user failed\n"); + return -ENOMEM; + } + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + ret = -EFAULT; + goto END; + } else { + ret = packet_rsp.ret; + if (!ret && copy_to_user(argp, &packet_rsp.cliContext, sizeof(packet_rsp.cliContext)) != 0) + ret = -EFAULT; + } + +END: + return ret; +} + +static int tc_ns_client_login_func(struct vtzf_dev_file *dev_file, + const void __user *buffer) +{ + int ret = 0; + uint32_t seq_num = get_seq_num(0); + uint32_t total_size = 0; + struct_packet_cmd_login_non packet_cmd_non = {0}; + struct_packet_rsp_login packet_rsp = {0}; + struct_packet_cmd_login *packet_cmd = NULL; + + if (!dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + + if (!buffer) { + packet_cmd_non.packet_size = sizeof(packet_cmd_non); + packet_cmd_non.seq_num = seq_num; + packet_cmd_non.cmd = VTZF_LOG_IN_NHIDL; + packet_cmd_non.ptzfd = dev_file->ptzfd; + if (send_to_proxy(&packet_cmd_non, sizeof(packet_cmd_non), &packet_rsp, sizeof(packet_rsp), seq_num)) { + ret = -EFAULT; + goto END; + } + ret = packet_rsp.ret; + goto END; + } + total_size = sizeof(*packet_cmd) + CERT_BUF_MAX_SIZE; + packet_cmd = kzalloc(total_size, GFP_KERNEL); + if (!packet_cmd) + goto END; + + packet_cmd->packet_size = total_size; + packet_cmd->seq_num = seq_num; + packet_cmd->cmd = VTZF_LOG_IN; + packet_cmd->ptzfd = dev_file->ptzfd; + + if (copy_from_user(packet_cmd->cert_buffer, buffer, CERT_BUF_MAX_SIZE) != 0) { + tloge("copy from user failed\n"); + ret = -EFAULT; + goto END; + } + if (send_to_proxy(packet_cmd, total_size, &packet_rsp, sizeof(packet_rsp), seq_num)) { + ret = -EFAULT; + goto END; + } else { + ret = packet_rsp.ret; + } + +END: + if (packet_cmd) + kfree(packet_cmd); + return ret; +} + +static int tc_ns_get_tee_version(struct vtzf_dev_file *dev_file, void __user *argp) +{ + int ret; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_getteever packet_cmd = {0}; + struct_packet_rsp_getteever packet_rsp = {0}; + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.cmd = VTZF_GET_TEE_VERSION; + packet_cmd.seq_num = seq_num; + packet_cmd.ptzfd = dev_file->ptzfd; + + /* There is no ptzfd, the TZdriver is opened and close immediately after use. */ + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + ret = -EFAULT; + goto END; + } else { + ret = packet_rsp.ret; + if (!ret && copy_to_user(argp, &packet_rsp.tee_ver, sizeof(uint32_t)) != 0) + ret = -EFAULT; + } +END: + return ret; +} + +static int tc_ns_late_init(unsigned long arg) +{ + int ret; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_lateinit packet_cmd = {0}; + struct_packet_rsp_lateinit packet_rsp = {0}; + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.cmd = VTZF_LATE_INIT; + packet_cmd.seq_num = seq_num; + packet_cmd.index = (uint32_t)arg; + + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + ret = -EFAULT; + goto END; + } else { + ret = packet_rsp.ret; + } + +END: + return ret; +} + +static int sync_system_time_from_user(struct vtzf_dev_file *dev_file, + const struct tc_ns_client_time *user_time) +{ + int ret = 0; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_synctime packet_cmd = {0}; + struct_packet_rsp_synctime packet_rsp ={0}; + struct tc_ns_client_time time = {0}; + + if (!user_time) { + tloge("user time is NULL input buffer\n"); + return -EINVAL; + } + + if (copy_from_user(&packet_cmd.tcNsTime, user_time, sizeof(time))) { + tloge("copy from user failed\n"); + return -EFAULT; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.cmd = VTZF_SYNC_TIME; + packet_cmd.seq_num = seq_num; + packet_cmd.ptzfd = dev_file->ptzfd; + + /* There is no ptzfd, the TZdriver is opened and close immediately after use */ + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + ret = -EFAULT; + goto END; + } else { + ret = packet_rsp.ret; + } +END: + return ret; +} + +static bool is_valid_agent(unsigned int buffer_size) +{ + if (buffer_size > SZ_512K) { + tloge("size: %u of user agent's shared mem is invalid\n", buffer_size); + return false; + } + return true; +} + +static unsigned long agent_buffer_map(unsigned long phy_buffer, uint32_t size) +{ + struct vm_area_struct *vma = NULL; + unsigned long user_addr; + int ret; + + user_addr = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, 0); + if (IS_ERR_VALUE((uintptr_t)user_addr)) { + tloge("vm mmap failed\n"); + return user_addr; + } + + down_read(&mm_sem_lock(current->mm)); + vma = find_vma(current->mm, user_addr); + if (!vma) { + tloge("user_addr is not valid in vma"); + goto err_out; + } + + ret = remap_pfn_range(vma, user_addr, phy_buffer >> PAGE_SHIFT, size, + vma->vm_page_prot); + if (ret != 0) { + tloge("remap agent buffer failed, err=%d", ret); + goto err_out; + } + + up_read(&mm_sem_lock(current->mm)); + return user_addr; +err_out: + up_read(&mm_sem_lock(current->mm)); + if (vm_munmap(user_addr, size)) + tloge("munmap failed\n"); + return 0; +} + +static int get_agent_buf(struct vtzf_dev_file *dev_file, struct vtzf_shared_mem *shared_mem, + struct_packet_cmd_regagent *packet_cmd, void **bufferp, void **user_addrp, uint32_t agentid) +{ + size_t size = 0; + void *buffer = NULL; + void *user_addr = NULL; + + if (!dev_file || !packet_cmd) + return -EINVAL; + + size = (size_t)packet_cmd->args.buffer_size; + size = ALIGN(size, 1 << PAGE_SHIFT); + buffer = kzalloc(size, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + user_addr = (void *)agent_buffer_map(virt_to_phys(buffer), size); + if (!user_addr) { + return -ENOMEM; + } + shared_mem->kernel_addr = buffer; + shared_mem->user_addr = user_addr; + shared_mem->len = size; + mutex_lock(&dev_file->shared_mem_lock); + + list_add_tail(&shared_mem->head, &dev_file->shared_mem_list); + mutex_unlock(&dev_file->shared_mem_lock); + *bufferp = buffer; + *user_addrp = user_addr; + return 0; +} + +static int ioctl_register_agent(struct vtzf_dev_file *dev_file, void __user *argp) +{ + int ret = 0; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_regagent packet_cmd = {0}; + struct_packet_rsp_regagent packet_rsp = {0}; + struct vtzf_shared_mem *shared_mem = NULL; + void *buffer = NULL; + void *user_addr = NULL; + + if (!argp || !dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + if (copy_from_user(&packet_cmd.args, (void *)(uintptr_t)argp, sizeof(packet_cmd.args)) != 0) { + tloge("copy agent args failed\n"); + return -EFAULT; + } + if (!is_valid_agent(packet_cmd.args.buffer_size)) { + return -EINVAL; + } + + shared_mem = kzalloc(sizeof(*shared_mem), GFP_KERNEL); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)shared_mem)) { + tloge("shared_mem malloc failed\n"); + return -ENOMEM; + } + + if (get_agent_buf(dev_file, shared_mem, &packet_cmd, &buffer, &user_addr, packet_cmd.args.id)) { + kfree(shared_mem); + return -1; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.cmd = VTZ_REGISTER_AGENT; + packet_cmd.seq_num = seq_num; + packet_cmd.ptzfd = dev_file->ptzfd; + packet_cmd.phyaddr = (void *)virt_to_phys(buffer); + + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + ret = -EFAULT; + goto END; + } else { + ret = packet_rsp.ret; + if (!ret) { + shared_mem->user_addr_host = packet_rsp.args.buffer; + packet_rsp.args.buffer = user_addr; + if (copy_to_user(argp, &packet_rsp.args, sizeof(packet_rsp.args)) != 0) { + tloge("copy to user failed\n"); + ret = -EFAULT; + } + dev_file->buf = (void *)shared_mem; + } else{ + mutex_lock(&dev_file->shared_mem_lock); + list_del(&shared_mem->head); + mutex_unlock(&dev_file->shared_mem_lock); + if (shared_mem->kernel_addr) + kfree(shared_mem->kernel_addr); + kfree(shared_mem); + dev_file->buf = NULL; + } + tlogd("packet_rsp.ret = %d \n", packet_rsp.ret); + } + +END: + return ret; +} + +static int tc_ns_unregister_agent(struct vtzf_dev_file * dev_file, unsigned int agent_id) +{ + int ret = 0; + uint32_t seq_num = get_seq_num(0); + struct vtzf_shared_mem *shared_mem = NULL; + struct_packet_cmd_unregagent packet_cmd = {0}; + struct_packet_rsp_unregagent packet_rsp = {0}; + + if (!agent_id || !dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.cmd = VTZ_UNREGISTER_AGENT; + packet_cmd.seq_num = seq_num; + packet_cmd.ptzfd = dev_file->ptzfd; + + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + ret = -EFAULT; + } + + if (dev_file->buf) { + shared_mem = (struct vtzf_shared_mem *)dev_file->buf; + mutex_lock(&dev_file->shared_mem_lock); + list_del(&shared_mem->head); + mutex_unlock(&dev_file->shared_mem_lock); + if (shared_mem->kernel_addr) + kfree(shared_mem->kernel_addr); + kfree(shared_mem); + dev_file->buf = NULL; + } + + return ret; +} + +static int send_wait_event(struct vtzf_dev_file *dev_file, unsigned int agent_id) +{ + int ret = 0; + uint32_t seq_num = get_seq_num(agent_id); + struct_packet_cmd_event packet_cmd = {0}; + struct_packet_rsp_general packet_rsp = {0}; + if (!dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.cmd = VTZF_WAIT_EVENT; + packet_cmd.seq_num = seq_num; + packet_cmd.ptzfd = dev_file->ptzfd; + packet_cmd.agent_id = agent_id; + + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + tloge("sen to proxy failed\n"); + return -EFAULT; + } else { + ret = packet_rsp.ret; + } + return ret; +} + +static int send_event_response(struct vtzf_dev_file *dev_file, unsigned int agent_id) +{ + int ret = 0; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_event packet_cmd = {0}; + struct_packet_rsp_general packet_rsp = {0}; + if (!dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.cmd = VTZF_SEND_EVENT_RESPONSE; + packet_cmd.seq_num = seq_num; + packet_cmd.ptzfd = dev_file->ptzfd; + packet_cmd.agent_id = agent_id; + + if (send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num)) { + tloge("sen to proxy failed\n"); + return -EFAULT; + } else { + ret = packet_rsp.ret; + } + return ret; +} + +static int tc_ns_load_secfile(struct vtzf_dev_file *dev_file, + struct load_secfile_ioctl_struct *ioctlArg) +{ + int ret; + uint32_t seq_num = get_seq_num(0); + struct_packet_cmd_load_sec packet_cmd = {0}; + struct_packet_rsp_load_sec packet_rsp = {0}; + size_t file_size = 0; + char *buffer = NULL; + char *tmp_buffer = NULL; + + if (!ioctlArg || !dev_file || dev_file->ptzfd <= 0) { + tloge("invalid params\n"); + return -EINVAL; + } + + packet_cmd.ioctlArg = *ioctlArg; + file_size = (size_t)packet_cmd.ioctlArg.sec_file_info.file_size; + tlogd("file_size = %lu \n", file_size); + + buffer = (char *)alloc_res_shm(file_size); + if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)buffer)) { + tloge("vtzf_dev_file malloc failed\n"); + return -ENOMEM; + } + tmp_buffer = packet_cmd.ioctlArg.file_buffer; + + if (copy_from_user(buffer, (const void __user *)tmp_buffer, file_size)) { + tloge("file buf get failed \n"); + ret = -EFAULT; + goto END; + } + + packet_cmd.packet_size = sizeof(packet_cmd); + packet_cmd.seq_num = seq_num; + packet_cmd.cmd = VTZF_LOAD_SEC; + packet_cmd.ptzfd = dev_file->ptzfd; + packet_cmd.ioctlArg.file_buffer = (char *)virt_to_phys(buffer); + + ret = send_to_proxy(&packet_cmd, sizeof(packet_cmd), &packet_rsp, sizeof(packet_rsp), seq_num); + if (!ret) { + ret = packet_rsp.ret; + if (!ret) { + packet_rsp.ioctlArg.file_buffer = tmp_buffer; + *ioctlArg = packet_rsp.ioctlArg; + } else { + tloge("load_secfile failed ret is %d\n", ret); + } + } else { + tloge("send to proxy failed ret is %d\n", ret); + } +END: + dealloc_res_shm((void *)buffer); + return ret; +} + +static int ioctl_check_is_ccos(void __user *argp) +{ + int ret = 0; + unsigned int check_ccos = 1; + if (!argp) { + tloge("error input parameter\n"); + return -EINVAL; + } + if (copy_to_user(argp, &check_ccos, sizeof(unsigned int)) != 0) + ret = -EFAULT; + return ret; +} + +int public_ioctl(const struct file *file, unsigned int cmd, + unsigned long arg, bool is_from_client_node) +{ + int ret = -EINVAL; + void *argp = (void __user *)(uintptr_t)arg; + struct vtzf_dev_file *dev_file = NULL; + struct load_secfile_ioctl_struct ioctlArg; + dev_file = file->private_data; + switch (cmd) { + case TC_NS_CLIENT_IOCTL_WAIT_EVENT: + ret = send_wait_event(dev_file, (unsigned int)arg); + break; + case TC_NS_CLIENT_IOCTL_SEND_EVENT_RESPONSE: + ret = send_event_response(dev_file, (unsigned int)arg); + break; + case TC_NS_CLIENT_IOCTL_REGISTER_AGENT: + ret = ioctl_register_agent(dev_file, (void *)arg); + break; + case TC_NS_CLIENT_IOCTL_UNREGISTER_AGENT: + ret = tc_ns_unregister_agent(dev_file, (unsigned int)arg); + break; + case TC_NS_CLIENT_IOCTL_LOAD_APP_REQ: + if (copy_from_user(&ioctlArg, argp, sizeof(ioctlArg)) != 0) { + tloge("copy from user failed\n"); + return -EFAULT; + } + ret = tc_ns_load_secfile(file->private_data, &ioctlArg); + if (copy_to_user(argp, &ioctlArg, sizeof(ioctlArg)) != 0 && ret == 0) + ret = -EFAULT; + break; + case TC_NS_CLIENT_IOCTL_CHECK_CCOS: + ret = ioctl_check_is_ccos(argp); + break; + default: + tloge("invalid cmd!"); + return ret; + } + return ret; +} + +static long tc_private_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -EFAULT; + void *argp = (void __user *)(uintptr_t)arg; + struct vtzf_dev_file *dev_file = file->private_data; + if (!dev_file) { + tloge("invalid params\n"); + return -EINVAL; + } + + switch (cmd) { + case TC_NS_CLIENT_IOCTL_GET_TEE_VERSION: + ret = tc_ns_get_tee_version(file->private_data, argp); + break; + case TC_NS_CLIENT_IOCTL_GET_TEE_INFO: + ret = tc_ns_get_tee_info(dev_file->ptzfd, argp, false); + break; + case TC_NS_CLIENT_IOCTL_SET_NATIVECA_IDENTITY: + break; + case TC_NS_CLIENT_IOCTL_LATEINIT: + ret = tc_ns_late_init(arg); + break; + case TC_NS_CLIENT_IOCTL_SYC_SYS_TIME: + ret = sync_system_time_from_user(file->private_data, (struct tc_ns_client_time *)(uintptr_t)arg); + break; + default: + ret = public_ioctl(file, cmd, arg, false); + break; + } + + return ret; +} + +static long tc_cvm_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -EFAULT; + void *argp = (void __user *)(uintptr_t)arg; + struct vtzf_dev_file *dev_file = file->private_data; + if (!dev_file) { + tloge("invalid params\n"); + return -EINVAL; + } + + switch (cmd) { + case TC_NS_CLIENT_IOCTL_GET_TEE_INFO: + ret = tc_ns_get_tee_info(dev_file->ptzfd, argp, false); + break; + +#ifdef CONFIG_TEE_TELEPORT_SUPPORT + case TC_NS_CLIENT_IOCTL_PORTAL_REGISTER: + if (check_tee_teleport_auth() == 0) + ret = tee_portal_register(file->private_data, argp); + else + tloge("check tee_teleport path failed\n"); + break; + case TC_NS_CLIENT_IOCTL_PORTAL_WORK: + if (check_tee_teleport_auth() == 0) + ret = tee_portal_work(file->private_data); + else + tloge("check tee_teleport path failed\n"); + break; +#endif + default: + ret = public_ioctl(file, cmd, arg, false); + break; + } + + return ret; +} + +#ifdef CONFIG_COMPAT +long tc_compat_client_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + long ret; + + if (!file) + return -EINVAL; + + ret = tc_client_ioctl(file, cmd, (unsigned long)(uintptr_t)compat_ptr(arg)); + return ret; +} + +long tc_compat_private_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + long ret; + + if (!file) + return -EINVAL; + + ret = tc_private_ioctl(file, cmd, (unsigned long)(uintptr_t)compat_ptr(arg)); + return ret; +} + +long tc_compat_cvm_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + long ret; + + if (!file) + return -EINVAL; + + ret = tc_cvm_ioctl(file, cmd, (unsigned long)(uintptr_t)compat_ptr(arg)); + return ret; +} + +#endif + +MODULE_DESCRIPTION("virtual trustzone frontend driver"); +MODULE_VERSION("1.00"); +MODULE_AUTHOR("TrustCute"); + +module_init(vtzf_init); +module_exit(vtzf_exit); + +MODULE_LICENSE("GPL"); \ No newline at end of file diff --git a/trustzone-awared-vm/VM/vtzdriver/vtzf.h b/trustzone-awared-vm/VM/vtzdriver/vtzf.h new file mode 100644 index 0000000000000000000000000000000000000000..5ab93802605b8622818f67568f770e76c90ea0b8 --- /dev/null +++ b/trustzone-awared-vm/VM/vtzdriver/vtzf.h @@ -0,0 +1,108 @@ +#ifndef VTZF_H +#define VTZF_H + +#include +#include +#include "tc_ns_client.h" +#include "teek_ns_client.h" +#include "comm_structs.h" +#include "reserved_shm.h" + +#define VTZF_DEV "vtzf" +#define CONFIG_CONFIDENTIAL_CONTAINER +#ifndef SECURITY_AUTH_ENHANCE +#define SECURITY_AUTH_ENHANCE +#endif + +#ifndef ZERO_SIZE_PTR +#define ZERO_SIZE_PTR ((void *)16) +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= (unsigned long)ZERO_SIZE_PTR) +#endif + +#define INVALID_MAP_ADDR ((void *)-1) +#define MAILBOX_POOL_SIZE SZ_4M + +#define IS_TEMP_MEM(paramType) \ + (((paramType) == TEEC_MEMREF_TEMP_INPUT) || ((paramType) == TEEC_MEMREF_TEMP_OUTPUT) || \ + ((paramType) == TEEC_MEMREF_TEMP_INOUT)) + +#define IS_PARTIAL_MEM(paramType) \ + (((paramType) == TEEC_MEMREF_WHOLE) || ((paramType) == TEEC_MEMREF_PARTIAL_INPUT) || \ + ((paramType) == TEEC_MEMREF_PARTIAL_OUTPUT) || ((paramType) == TEEC_MEMREF_PARTIAL_INOUT)) + +#define IS_VALUE_MEM(paramType) \ + (((paramType) == TEEC_VALUE_INPUT) || ((paramType) == TEEC_VALUE_OUTPUT) || ((paramType) == TEEC_VALUE_INOUT)) + +/* Use during device initialization */ +struct dev_node { + struct class *driver_class; + struct cdev char_dev; + dev_t devt; + struct device *class_dev; + const struct file_operations *fops; + char *node_name; +}; + +/* List of devices that have already been opened*/ +struct vtzf_dev_list { + struct mutex dev_lock; /* for dev_file_list */ + struct list_head dev_file_list; +}; + +struct vtzf_dev_file { + unsigned int dev_file_id; + int32_t ptzfd; + struct list_head head; + struct mutex shared_mem_lock; /* for shared_mem_list */ + struct list_head shared_mem_list; + void *buf; +}; + +struct agent_buf +{ + uint32_t id; + uint32_t buf_size; + void *buf; +}; + +int tc_ns_client_open(struct vtzf_dev_file **dev_file, uint32_t flag); +static int vtzf_client_open(struct inode *inode, struct file *file); +static int vtzf_private_open(struct inode *inode, struct file *file); +static int vtzf_cvm_open(struct inode *inode, struct file *file); +int vtzf_close(struct inode *inode, struct file *file); +void shared_vma_open(struct vm_area_struct *vma); +void shared_vma_close(struct vm_area_struct *vma); +static int vtzf_mmap(struct file *filp, struct vm_area_struct *vma); +static long tc_client_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); +static long tc_private_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); +static long tc_cvm_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +static int public_ioctl(const struct file *file, unsigned int cmd, + unsigned long arg, bool is_from_client_node); +static int open_tzdriver(struct vtzf_dev_file *dev_file, uint32_t flag); +static int close_tzdriver(struct vtzf_dev_file *dev_file); +#ifdef CONFIG_COMPAT +long tc_compat_client_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +long tc_compat_private_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +long tc_compat_cvm_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +#endif + +static int proxy_mmap(struct vtzf_dev_file *dev_file, void * user_buffer, + uint32_t buffer_size, uint32_t pgoff, uint8_t unmap); + +static int tc_ns_open_session(struct vtzf_dev_file *dev_file, + struct tc_ns_client_context *clicontext); +int tc_client_session_ioctl(struct vtzf_dev_file *dev_file, + unsigned int cmd, unsigned long arg); +static int tc_ns_send_cancel_cmd(struct vtzf_dev_file *dev_file, void *argp); +static int tc_ns_client_login_func(struct vtzf_dev_file *dev_file, + const void __user *buffer); +static int tc_ns_get_tee_version(struct vtzf_dev_file *dev_file, + void __user *argp); +static int tc_ns_late_init(unsigned long arg); +static int sync_system_time_from_user(struct vtzf_dev_file *dev_file, + const struct tc_ns_client_time *user_time); +#endif // VTZF_H \ No newline at end of file diff --git a/trustzone-awared-vm/demo/test_src.zip b/trustzone-awared-vm/demo/test_src.zip new file mode 100644 index 0000000000000000000000000000000000000000..0c49b6367d5791c64f7e9e31479b1d42e9b43639 Binary files /dev/null and b/trustzone-awared-vm/demo/test_src.zip differ diff --git "a/trustzone-awared-vm/docs/TrustZone\346\204\237\347\237\245\347\232\204\346\234\272\345\257\206\350\231\232\346\213\237\346\234\272\344\274\227\346\231\272\351\241\271\347\233\256\344\270\200\346\234\237\350\257\246\347\273\206\350\256\276\350\256\241\350\257\264\346\230\216\344\271\246.docx" "b/trustzone-awared-vm/docs/TrustZone\346\204\237\347\237\245\347\232\204\346\234\272\345\257\206\350\231\232\346\213\237\346\234\272\344\274\227\346\231\272\351\241\271\347\233\256\344\270\200\346\234\237\350\257\246\347\273\206\350\256\276\350\256\241\350\257\264\346\230\216\344\271\246.docx" new file mode 100644 index 0000000000000000000000000000000000000000..bb5c71e98489cab4dab7b7101025ef6f3093b259 Binary files /dev/null and "b/trustzone-awared-vm/docs/TrustZone\346\204\237\347\237\245\347\232\204\346\234\272\345\257\206\350\231\232\346\213\237\346\234\272\344\274\227\346\231\272\351\241\271\347\233\256\344\270\200\346\234\237\350\257\246\347\273\206\350\256\276\350\256\241\350\257\264\346\230\216\344\271\246.docx" differ diff --git a/trustzone-awared-vm/docs/deployment.docx b/trustzone-awared-vm/docs/deployment.docx new file mode 100644 index 0000000000000000000000000000000000000000..d8d152762d3ca44aeec28e769932afc9c9b34d84 Binary files /dev/null and b/trustzone-awared-vm/docs/deployment.docx differ diff --git a/trustzone-awared-vm/docs/picture/arch.png b/trustzone-awared-vm/docs/picture/arch.png new file mode 100644 index 0000000000000000000000000000000000000000..ed8fe983a713f7e34046f69f6a105aeab5afa1a8 Binary files /dev/null and b/trustzone-awared-vm/docs/picture/arch.png differ diff --git "a/trustzone-awared-vm/docs/\346\265\213\350\257\225.docx" "b/trustzone-awared-vm/docs/\346\265\213\350\257\225.docx" new file mode 100644 index 0000000000000000000000000000000000000000..1ebdb3a4f05da9784403476963b66832b3dcedfb Binary files /dev/null and "b/trustzone-awared-vm/docs/\346\265\213\350\257\225.docx" differ