diff --git a/drivers/hck/vendor_hooks.c b/drivers/hck/vendor_hooks.c index d3e3e2befd9f085f44c530e0933d763bb1615a7e..4fd1a37c61e7a375d4d9b3c62da4f4a6f2eb6b05 100644 --- a/drivers/hck/vendor_hooks.c +++ b/drivers/hck/vendor_hooks.c @@ -9,3 +9,4 @@ #define CREATE_LITE_VENDOR_HOOK /* add your lite vendor hook header file here */ #include +#include diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index 0d768a13e2bb705ff645df6cf1e3d1c7a0055726..db60ec84d94b8fefd66dfaf277a001037ea6cbb6 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -10,6 +10,7 @@ lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o lkdtm-$(CONFIG_LKDTM) += usercopy.o lkdtm-$(CONFIG_LKDTM) += stackleak.o lkdtm-$(CONFIG_LKDTM) += cfi.o +lkdtm-$(CONFIG_LKDTM) += xpm.o KASAN_SANITIZE_stackleak.o := n KCOV_INSTRUMENT_rodata.o := n diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index 32b3d77368e37d734ad541e1cda80997fa733345..740c9ecde7348698e486980293ddb7a7c6a7428b 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -174,6 +174,12 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(STACKLEAK_ERASING), CRASHTYPE(CFI_FORWARD_PROTO), CRASHTYPE(DOUBLE_FAULT), +#ifdef CONFIG_XPM_DEBUG + CRASHTYPE(XPM_ELF_CODE_SEGMENT), + CRASHTYPE(XPM_ELF_CODE_SEGMENT_CACHE_SIZE), + CRASHTYPE(XPM_ELF_CODE_SEGMENT_CACHE_DESTROY), + CRASHTYPE(XPM_ELF_CODE_SEGMENT_CACHE_CLEAR), +#endif }; diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h index 6dec4c9b442ff34e9c516e54ee8d4706af7ad0bd..c277bdbd4d0e80cab5ee741e1a86888bcbf410ba 100644 --- a/drivers/misc/lkdtm/lkdtm.h +++ b/drivers/misc/lkdtm/lkdtm.h @@ -102,4 +102,10 @@ void lkdtm_STACKLEAK_ERASING(void); /* cfi.c */ void lkdtm_CFI_FORWARD_PROTO(void); +#ifdef CONFIG_XPM_DEBUG +void lkdtm_XPM_ELF_CODE_SEGMENT(void); +void lkdtm_XPM_ELF_CODE_SEGMENT_CACHE_SIZE(void); +void lkdtm_XPM_ELF_CODE_SEGMENT_CACHE_DESTROY(void); +void lkdtm_XPM_ELF_CODE_SEGMENT_CACHE_CLEAR(void); +#endif #endif diff --git a/drivers/misc/lkdtm/xpm.c b/drivers/misc/lkdtm/xpm.c new file mode 100644 index 0000000000000000000000000000000000000000..238299f191d7642ee39aa79c932c53bb6bc1cbfd --- /dev/null +++ b/drivers/misc/lkdtm/xpm.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ +#include "lkdtm.h" +#include +#include +#include + +#ifdef CONFIG_XPM_DEBUG +struct exec_file_signature_info; +int get_exec_file_signature_info(struct file*, bool, struct exec_file_signature_info **); +int put_exec_file_signature_info(struct exec_file_signature_info *); +int test_destroy_elf_code_segment_info_cache(void); +void test_rm_elf_code_segment_info_cache(void); +void test_print_elf_code_segment_info(const char *file_path, const struct exec_file_signature_info *file_info); +int test_delete_elf_code_segment_info(struct exec_file_signature_info *code_segment_info); +void test_get_elf_code_segment_info_cache_size(void); + +void lkdtm_XPM_ELF_CODE_SEGMENT(void) +{ + int ret; + struct exec_file_signature_info *file_info = NULL; + struct exec_file_signature_info *file_info_cache = NULL; + char file_path[PATH_MAX] = "/system/bin/dmesg"; + struct file *test_file; + + test_file = filp_open(file_path, O_RDONLY, 0); + if (test_file == NULL) { + pr_info("[%s:%d] filp_open failed\n", __func__, __LINE__); + return; + } + + ret = get_exec_file_signature_info(test_file, false, &file_info); + if (ret < 0) { + filp_close(test_file, 0); + pr_info("[%s:%d] get_exec_file_signature_info failed\n", __func__, __LINE__); + return; + } + + ret = put_exec_file_signature_info(file_info); + if (ret < 0) { + filp_close(test_file, 0); + pr_info("[%s:%d] put_exec_file_signature_info failed\n", __func__, __LINE__); + return; + } + + (void)get_exec_file_signature_info(test_file, false, &file_info_cache); + (void)put_exec_file_signature_info(file_info_cache); + + if (file_info_cache != file_info) + pr_info("[%s:%d] get cache failed!\n", __func__, __LINE__); + + ret = test_delete_elf_code_segment_info(file_info); + if (ret < 0) { + filp_close(test_file, 0); + pr_info("[%s:%d] delete_elf_code_segment_info failed\n", __func__, __LINE__); + return; + } + test_print_elf_code_segment_info(file_path, file_info); + filp_close(test_file, 0); +} + +void lkdtm_XPM_ELF_CODE_SEGMENT_CACHE_DESTROY(void) +{ + test_destroy_elf_code_segment_info_cache(); +} + +void lkdtm_XPM_ELF_CODE_SEGMENT_CACHE_CLEAR(void) +{ + test_rm_elf_code_segment_info_cache(); +} + +void lkdtm_XPM_ELF_CODE_SEGMENT_CACHE_SIZE(void) +{ + test_get_elf_code_segment_info_cache_size(); +} +#endif diff --git a/fs/inode.c b/fs/inode.c index 9f49e0bdc2f77bee7207050071577d31e7cdeeab..e56bbfbb0c34145c31ecbba11f38c2d7de30a08c 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "internal.h" @@ -258,6 +259,7 @@ void __destroy_inode(struct inode *inode) security_inode_free(inode); fsnotify_inode_delete(inode); locks_free_lock_context(inode); + xpm_delete_cache_node_hook(inode); if (!inode->i_nlink) { WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); atomic_long_dec(&inode->i_sb->s_remove_count); diff --git a/fs/proc/base.c b/fs/proc/base.c index 27145778c144f26473ca8b8ac9d703b9173abe74..96cfc8e0a8c5445d2b26e0d3137b78fc2a21206e 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1576,6 +1576,54 @@ static const struct file_operations proc_pid_sched_group_id_operations = { }; #endif /* CONFIG_SCHED_RTG_DEBUG */ +#ifdef CONFIG_SECURITY_XPM +#define XPM_REGION_LEN 48 +static int xpm_region_open(struct inode *inode, struct file *file) +{ + struct mm_struct *mm = proc_mem_open(inode, PTRACE_MODE_READ); + + if (IS_ERR(mm)) + return PTR_ERR(mm); + + file->private_data = mm; + return 0; +} + +static ssize_t xpm_region_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct mm_struct *mm = file->private_data; + char xpm_region[XPM_REGION_LEN] = {0}; + size_t len; + + if (!mm) + return 0; + + len = snprintf(xpm_region, XPM_REGION_LEN - 1, "%lx-%lx", + mm->xpm_region.addr_start, + mm->xpm_region.addr_end); + + return simple_read_from_buffer(buf, count, pos, xpm_region, len); +} + +static int xpm_region_release(struct inode *inode, struct file *file) +{ + struct mm_struct *mm = file->private_data; + + if (mm) + mmdrop(mm); + + return 0; +} + +static const struct file_operations proc_xpm_region_operations = { + .open = xpm_region_open, + .read = xpm_region_read, + .llseek = generic_file_llseek, + .release = xpm_region_release, +}; +#endif /* CONFIG_SECURITY_XPM */ + #ifdef CONFIG_SCHED_AUTOGROUP /* * Print out autogroup related information: @@ -3459,6 +3507,9 @@ static const struct pid_entry tgid_base_stuff[] = { #ifdef CONFIG_SCHED_RTG_DEBUG REG("sched_group_id", S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations), #endif +#ifdef CONFIG_SECURITY_XPM + REG("xpm_region", S_IRUSR|S_IRGRP, proc_xpm_region_operations), +#endif }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) @@ -3794,6 +3845,9 @@ static const struct pid_entry tid_base_stuff[] = { #ifdef CONFIG_SCHED_RTG_DEBUG REG("sched_group_id", S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations), #endif +#ifdef CONFIG_SECURITY_XPM + REG("xpm_region", S_IRUSR|S_IRGRP, proc_xpm_region_operations), +#endif }; static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) diff --git a/include/linux/hck/lite_hck_xpm.h b/include/linux/hck/lite_hck_xpm.h new file mode 100644 index 0000000000000000000000000000000000000000..da6934882b03a02bc59d28caf24d90891abc1cd0 --- /dev/null +++ b/include/linux/hck/lite_hck_xpm.h @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#ifndef _LITE_HCK_XPM_H +#define _LITE_HCK_XPM_H + +#include "linux/mm_types.h" +#include +#include +#include + +#ifndef CONFIG_HCK +#undef CALL_HCK_LITE_HOOK +#define CALL_HCK_LITE_HOOK(name, args...) +#undef REGISTER_HCK_LITE_HOOK +#define REGISTER_HCK_LITE_HOOK(name, probe) +#undef REGISTER_HCK_LITE_DATA_HOOK +#define REGISTER_HCK_LITE_DATA_HOOK(name, probe, data) +#else +DECLARE_HCK_LITE_HOOK(xpm_delete_cache_node_lhck, + TP_PROTO(struct inode *file_node), + TP_ARGS(file_node)); + +DECLARE_HCK_LITE_HOOK(xpm_region_outer_lhck, + TP_PROTO(unsigned long addr_start, unsigned long addr_end, + unsigned long flags, bool *ret), + TP_ARGS(addr_start, addr_end, flags, ret)); + +DECLARE_HCK_LITE_HOOK(xpm_get_unmapped_area_lhck, + TP_PROTO(unsigned long addr, unsigned long len, unsigned long map_flags, + unsigned long unmapped_flags, unsigned long *ret), + TP_ARGS(addr, len, map_flags, unmapped_flags, ret)); + +DECLARE_HCK_LITE_HOOK(xpm_integrity_equal_lhck, + TP_PROTO(struct page *page, struct page *kpage, bool *ret), + TP_ARGS(page, kpage, ret)); + +DECLARE_HCK_LITE_HOOK(xpm_integrity_check_lhck, + TP_PROTO(struct vm_area_struct *vma, unsigned int vflags, + unsigned long addr, struct page *page, vm_fault_t *ret), + TP_ARGS(vma, vflags, addr, page, ret)); + +DECLARE_HCK_LITE_HOOK(xpm_integrity_validate_lhck, + TP_PROTO(struct vm_area_struct *vma, unsigned int vflags, + unsigned long addr, struct page *page, vm_fault_t *ret), + TP_ARGS(vma, vflags, addr, page, ret)); + +DECLARE_HCK_LITE_HOOK(xpm_integrity_update_lhck, + TP_PROTO(struct vm_area_struct *vma, unsigned int vflags, + struct page *page), + TP_ARGS(vma, vflags, page)); +#endif /* CONFIG_HCK */ + +#endif /* _LITE_HCK_XPM_H */ diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index d13631a5e908760eb5b07e065bda9fa14256d892..d7c61d382c4afca33d4327dd69cd695d7a26356b 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -161,6 +161,9 @@ LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd, LSM_HOOK(int, 0, mmap_addr, unsigned long addr) LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags) +#ifdef CONFIG_SECURITY_XPM +LSM_HOOK(int, 0, mmap_region, struct vm_area_struct *vma) +#endif LSM_HOOK(int, 0, file_mprotect, struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) LSM_HOOK(int, 0, file_lock, struct file *file, unsigned int cmd) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 64cdf4d7bfb30b2c6363e56d02ef5c9d6b2a5c53..4a97b07338ba8293bf7226eb1bb1477e542037ac 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -534,6 +534,10 @@ * @prot contains the protection that will be applied by the kernel. * @flags contains the operational flags. * Return 0 if permission is granted. + * @mmap_region : + * Check permission for a mmap operation. The @file may be NULL, e,g. + * if mapping anonymous memory. + * @vma contains the memory region to mmap. * @file_mprotect: * Check permissions before changing memory access permissions. * @vma contains the memory region to modify. diff --git a/include/linux/mm.h b/include/linux/mm.h index 8b199766b8837879b247469bed0ea2359cfde71d..9ed1be47c8cb96bbdc0a9c096fe8f5d224e1ed35 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -316,6 +316,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_6 38 /* bit only usable on 64-bit architectures */ +#define VM_HIGH_ARCH_BIT_7 39 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) @@ -323,6 +324,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) #define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5) #define VM_HIGH_ARCH_6 BIT(VM_HIGH_ARCH_BIT_6) +#define VM_HIGH_ARCH_7 BIT(VM_HIGH_ARCH_BIT_7) #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ #ifdef CONFIG_MEM_PURGEABLE @@ -333,6 +335,12 @@ extern unsigned int kobjsize(const void *objp); #define VM_USEREXPTE 0 #endif /* CONFIG_MEM_PURGEABLE */ +#ifdef CONFIG_SECURITY_XPM +#define VM_XPM VM_HIGH_ARCH_7 +#else /* CONFIG_MEM_PURGEABLE */ +#define VM_XPM VM_NONE +#endif /* CONFIG_MEM_PURGEABLE */ + #ifdef CONFIG_ARCH_HAS_PKEYS # define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 # define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ @@ -2659,6 +2667,7 @@ extern unsigned long __must_check vm_mmap(struct file *, unsigned long, struct vm_unmapped_area_info { #define VM_UNMAPPED_AREA_TOPDOWN 1 +#define VM_UNMAPPED_AREA_XPM 2 unsigned long flags; unsigned long length; unsigned long low_limit; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index d86bc1d2dcc3c7e03076158e5eed317431c835ec..3eb93c4870045ce60d6a096b4fd71bdbbc7a3391 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -16,6 +16,7 @@ #include #include #include +#include #include @@ -603,6 +604,10 @@ struct mm_struct { #ifdef CONFIG_IOMMU_SUPPORT u32 pasid; #endif + +#ifdef CONFIG_SECURITY_XPM + struct xpm_region xpm_region; +#endif } __randomize_layout; /* diff --git a/include/linux/mman.h b/include/linux/mman.h index 629cefc4ecba671682408ccdfe53a0a0726dcebd..3225d2c14f87aa6b8e4d3e40a7685358a1f87b76 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -154,6 +154,7 @@ calc_vm_flag_bits(unsigned long flags) _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | + _calc_vm_trans(flags, MAP_XPM, VM_XPM ) | arch_calc_vm_flag_bits(flags); } diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index dcf83c01f57b07b32ac115e4c5e3228120b953b2..7b3212b93d3e6b2f0ad73d935b4338735e10af77 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -145,6 +145,10 @@ enum pageflags { #endif #ifdef CONFIG_MEM_PURGEABLE PG_purgeable, +#endif +#ifdef CONFIG_SECURITY_XPM + PG_xpm_readonly, + PG_xpm_writetainted, #endif __NR_PAGEFLAGS, @@ -350,6 +354,14 @@ __PAGEFLAG(Slab, slab, PF_NO_TAIL) __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL) PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ +#ifdef CONFIG_SECURITY_XPM +PAGEFLAG(XPMReadonly, xpm_readonly, PF_HEAD) +PAGEFLAG(XPMWritetainted, xpm_writetainted, PF_HEAD) +#else +PAGEFLAG_FALSE(XPMReadonly) +PAGEFLAG_FALSE(XPMWritetainted) +#endif + /* Xen */ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) @@ -843,11 +855,18 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) * Flags checked when a page is freed. Pages being freed should not have * these flags set. It they are, there is a problem. */ +#ifdef CONFIG_SECURITY_XPM +#define __XPM_PAGE_FLAGS (1UL << PG_xpm_readonly | 1UL << PG_xpm_writetainted) +#else +#define __XPM_PAGE_FLAGS 0 +#endif + #define PAGE_FLAGS_CHECK_AT_FREE \ (1UL << PG_lru | 1UL << PG_locked | \ 1UL << PG_private | 1UL << PG_private_2 | \ 1UL << PG_writeback | 1UL << PG_reserved | \ 1UL << PG_slab | 1UL << PG_active | \ + __XPM_PAGE_FLAGS | \ 1UL << PG_unevictable | __PG_MLOCKED) /* diff --git a/include/linux/security.h b/include/linux/security.h index e9b4b541061477c127cd89d9125ed6856534e10e..f940ab809ad1b3795c2d362d8de2e26785f8a062 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -2002,4 +2002,13 @@ static inline int security_perf_event_write(struct perf_event *event) #endif /* CONFIG_SECURITY */ #endif /* CONFIG_PERF_EVENTS */ +#if IS_ENABLED(CONFIG_SECURITY) && IS_ENABLED(CONFIG_SECURITY_XPM) +extern int security_mmap_region(struct vm_area_struct *vma); +#else +static inline int security_mmap_region(struct vm_area_struct *vma) +{ + return 0; +} +#endif /* CONFIG_SECURITY */ + #endif /* ! __LINUX_SECURITY_H */ diff --git a/include/linux/xpm.h b/include/linux/xpm.h new file mode 100644 index 0000000000000000000000000000000000000000..fd7f65bca590ad41e3300dbd4f03acb48f1648b7 --- /dev/null +++ b/include/linux/xpm.h @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#ifndef _XPM_H +#define _XPM_H + +#include +#include +#include +#include +#include + +/** + * when inodes are destroyed, the corresponding cache must be destroyed + */ +static inline void xpm_delete_cache_node_hook(struct inode *file_node) +{ + CALL_HCK_LITE_HOOK(xpm_delete_cache_node_lhck, file_node); +} + +/** + * check whether input address range is out of the xpm region + */ +static inline bool xpm_region_outer_hook(unsigned long addr_start, + unsigned long addr_end, unsigned long flags) +{ + bool ret = true; + + CALL_HCK_LITE_HOOK(xpm_region_outer_lhck, addr_start, + addr_end, flags, &ret); + return ret; +} + +/** + * get unmapped area in xpm region + */ +static inline unsigned long xpm_get_unmapped_area_hook(unsigned long addr, + unsigned long len, unsigned long map_flags, + unsigned long unmapped_flags) +{ + unsigned long ret = 0; + + CALL_HCK_LITE_HOOK(xpm_get_unmapped_area_lhck, addr, len, + map_flags, unmapped_flags, &ret); + return ret; +} + +/* + * check the confliction of a page's xpm flags, make sure a process will + * not map any RO page into a writable vma or a WT page into a execuable/XPM + * memory region. + */ +static inline vm_fault_t xpm_integrity_check_hook(struct vm_area_struct *vma, + unsigned int vflags, unsigned long addr, struct page *page) +{ + vm_fault_t ret = 0; + + CALL_HCK_LITE_HOOK(xpm_integrity_check_lhck, vma, vflags, + addr, page, &ret); + return ret; +} + +static inline +vm_fault_t xpm_integrity_validate_hook(struct vm_area_struct *vma, + unsigned int vflags, unsigned long addr, struct page *page) +{ + vm_fault_t ret = 0; + + CALL_HCK_LITE_HOOK(xpm_integrity_validate_lhck, vma, vflags, + addr, page, &ret); + return ret; +} + +static inline +void xpm_integrity_update_hook(struct vm_area_struct *vma, + unsigned int vflags, struct page *page) +{ + CALL_HCK_LITE_HOOK(xpm_integrity_update_lhck, vma, vflags, page); +} + +static inline bool xpm_integrity_equal_hook(struct page *page, + struct page *kpage) +{ + bool ret = true; + + CALL_HCK_LITE_HOOK(xpm_integrity_equal_lhck, page, kpage, &ret); + return ret; +} + +#ifdef CONFIG_ARM64 +#define pte_user_mkexec(oldpte, ptent) \ + ((!pte_user_exec(oldpte) && pte_user_exec(ptent))) +#else +#define pte_user_mkexec(oldpte, ptent) 1 +#endif + +#endif /* _XPM_H */ diff --git a/include/linux/xpm_types.h b/include/linux/xpm_types.h new file mode 100644 index 0000000000000000000000000000000000000000..b56ae6b5b8ee70ca250ec2d0076f9263497a7f4c --- /dev/null +++ b/include/linux/xpm_types.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#ifndef _XPM_TYPES_H +#define _XPM_TYPES_H + +#include + +struct xpm_region { + unsigned long addr_start; /* start adress of xpm region */ + unsigned long addr_end; /* end address of xpm region */ +}; + +#endif /* _XPM_TYPES_H */ \ No newline at end of file diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 2332482f7df748210c509d7a4023966944b41e4b..c452bf5bd5b436277c3d61ca40c4bb08528fe549 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -61,6 +61,12 @@ #define IF_HAVE_PG_PURGEABLE(flag,string) #endif +#ifdef CONFIG_SECURITY_XPM +#define IF_HAVE_PG_XPM_INTEGRITY(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_XPM_INTEGRITY(flag,string) +#endif + #ifdef CONFIG_MMU #define IF_HAVE_PG_MLOCK(flag,string) ,{1UL << flag, string} #else @@ -114,6 +120,8 @@ {1UL << PG_swapbacked, "swapbacked" }, \ {1UL << PG_unevictable, "unevictable" } \ IF_HAVE_PG_PURGEABLE(PG_purgeable, "purgeable" ) \ +IF_HAVE_PG_XPM_INTEGRITY(PG_xpm_readonly, "readonly") \ +IF_HAVE_PG_XPM_INTEGRITY(PG_xpm_writetainted, "writetained") \ IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \ IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \ IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \ diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index f94f65d429bea3c26bdcdc3197376916399089e9..da6cf2b78e8da33fb5b19478125655db9f441eb0 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -21,6 +21,7 @@ #define MAP_TYPE 0x0f /* Mask for type of mapping */ #define MAP_FIXED 0x10 /* Interpret addr exactly */ #define MAP_ANONYMOUS 0x20 /* don't use a file */ +#define MAP_XPM 0x40 /* xpm control memory */ /* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */ #define MAP_POPULATE 0x008000 /* populate (prefault) pagetables */ diff --git a/mm/ksm.c b/mm/ksm.c index 25b8362a4f89537abe7c03156d7407d9a18383c1..1ccca1e6099bafa3353a87b9a2b32d280f2457dc 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -41,6 +41,7 @@ #include #include "internal.h" +#include #ifdef CONFIG_NUMA #define NUMA(x) (x) @@ -1212,6 +1213,10 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, if (!PageAnon(page)) goto out; + /* Check XPM flags */ + if(!xpm_integrity_equal_hook(page, kpage)) + goto out; + /* * We need the page lock to read a stable PageSwapCache in * write_protect_page(). We use trylock_page() instead of diff --git a/mm/memory.c b/mm/memory.c index ea5741b3288f41e9d6e83a7b3c1d72255f2d94a2..d647c1320f4a2f507c8a9f03c68a181d26cb0fbb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -86,6 +86,7 @@ #include "pgalloc-track.h" #include "internal.h" +#include #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. @@ -2942,6 +2943,9 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) */ set_pte_at_notify(mm, vmf->address, vmf->pte, entry); update_mmu_cache(vma, vmf->address, vmf->pte); + + xpm_integrity_update_hook(vma, vmf->flags, new_page); + if (old_page) { /* * Only after switching the pte to the new page may @@ -3036,6 +3040,13 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return VM_FAULT_NOPAGE; } + + if (unlikely(xpm_integrity_validate_hook(vmf->vma, vmf->flags, vmf->address, + vmf->page))) { + pte_unmap_unlock(vmf->pte, vmf->ptl); + return VM_FAULT_SIGSEGV; + } + wp_page_reuse(vmf); return 0; } @@ -3087,6 +3098,14 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) return tmp; } } else { + + if (unlikely(xpm_integrity_validate_hook(vmf->vma, vmf->flags, vmf->address, + vmf->page))){ + pte_unmap_unlock(vmf->pte, vmf->ptl); + put_page(vmf->page); + return VM_FAULT_SIGSEGV; + } + wp_page_reuse(vmf); lock_page(vmf->page); } @@ -3171,6 +3190,13 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) * it's dark out, and we're wearing sunglasses. Hit it. */ unlock_page(page); + + if (unlikely(xpm_integrity_validate_hook(vmf->vma, vmf->flags, vmf->address, + vmf->page))){ + pte_unmap_unlock(vmf->pte, vmf->ptl); + return VM_FAULT_SIGSEGV; + } + wp_page_reuse(vmf); return VM_FAULT_WRITE; } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == @@ -3486,6 +3512,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * must be called after the swap_free(), or it will never succeed. */ + + if (unlikely(xpm_integrity_validate_hook(vmf->vma, vmf->flags, vmf->address, + page))){ + ret = VM_FAULT_SIGSEGV; + goto out_nomap; + } + inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); pte = mk_pte(page, vma->vm_page_prot); @@ -3595,8 +3628,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) if (vma->vm_flags & VM_USEREXPTE) { if (do_uxpte_page_fault(vmf, &entry)) goto oom; - else + else{ + if(xpm_integrity_check_hook(vma, vmf->flags, vmf->address, + pte_page(entry))) + return VM_FAULT_SIGSEGV; goto got_page; + } } /* Use the zero-page for reads */ @@ -3673,6 +3710,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) if (vma->vm_flags & VM_PURGEABLE) uxpte_set_present(vma, vmf->address); + if(!pte_special(entry)){ + xpm_integrity_update_hook(vma, vmf->flags, page); + } + set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); /* No need to invalidate - it was non-present before */ @@ -3930,6 +3971,11 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page) return VM_FAULT_NOPAGE; } + /* check the confliction of xpm integrity flags*/ + if (unlikely(xpm_integrity_validate_hook(vmf->vma, vmf->flags, + vmf->address, page))) + return VM_FAULT_SIGSEGV; + flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); entry = pte_sw_mkyoung(entry); diff --git a/mm/migrate.c b/mm/migrate.c index cf0e966a8faa62b8d0be1f24e6cf8b5b0e6b844a..7ed4eb406d1240f12d7470f1cea94f277ecec1ed 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -636,6 +636,13 @@ void migrate_page_states(struct page *newpage, struct page *page) if (page_is_idle(page)) set_page_idle(newpage); + /* Migrate the page's xpm state */ + if(PageXPMWritetainted(page)) + SetPageXPMWritetainted(newpage); + + if(PageXPMReadonly(page)) + SetPageXPMReadonly(newpage); + /* * Copy NUMA information to the new page, to prevent over-eager * future migrations of this same page. diff --git a/mm/mmap.c b/mm/mmap.c index bccc3235cd61f37f2d30dce0f1a7e1828a8dfdb3..8e47a7d0449719c8671ccbfb5f3f1a75dff3b0fd 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include @@ -1877,8 +1878,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vma_set_anonymous(vma); } - /* Allow architectures to sanity-check the vm_flags */ - if (!arch_validate_flags(vma->vm_flags)) { + /* Allow architectures to sanity-check the vma */ + if (security_mmap_region(vma) || + !arch_validate_flags(vma->vm_flags)) { error = -EINVAL; if (file) goto close_and_free_vma; @@ -2000,8 +2002,9 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) /* Check if current node has a suitable gap */ if (gap_start > high_limit) return -ENOMEM; - if (gap_end >= low_limit && - gap_end > gap_start && gap_end - gap_start >= length) + if ((gap_end >= low_limit && + gap_end > gap_start && gap_end - gap_start >= length) && + (xpm_region_outer_hook(gap_start, gap_end, info->flags))) goto found; /* Visit right subtree if it looks promising */ @@ -2104,8 +2107,9 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; - if (gap_start <= high_limit && - gap_end > gap_start && gap_end - gap_start >= length) + if ((gap_start <= high_limit && + gap_end > gap_start && gap_end - gap_start >= length) && + (xpm_region_outer_hook(gap_start, gap_end, info->flags))) goto found; /* Visit left subtree if it looks promising */ @@ -2187,6 +2191,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { + unsigned long xpm_addr; struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; @@ -2195,6 +2200,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (len > mmap_end - mmap_min_addr) return -ENOMEM; + xpm_addr = xpm_get_unmapped_area_hook(addr, len, flags, 0); + if (xpm_addr) + return xpm_addr; + if (flags & MAP_FIXED) return addr; @@ -2203,7 +2212,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && - (!prev || addr >= vm_end_gap(prev))) + (!prev || addr >= vm_end_gap(prev)) && + (xpm_region_outer_hook(addr, addr + len, 0))) return addr; } @@ -2227,6 +2237,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { + unsigned long xpm_addr; struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; @@ -2236,6 +2247,11 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, if (len > mmap_end - mmap_min_addr) return -ENOMEM; + xpm_addr = xpm_get_unmapped_area_hook(addr, len, flags, + VM_UNMAPPED_AREA_TOPDOWN); + if (xpm_addr) + return xpm_addr; + if (flags & MAP_FIXED) return addr; @@ -2245,7 +2261,8 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && - (!prev || addr >= vm_end_gap(prev))) + (!prev || addr >= vm_end_gap(prev)) && + (xpm_region_outer_hook(addr, addr + len, 0))) return addr; } diff --git a/mm/mprotect.c b/mm/mprotect.c index a5ba7333f16389bba61c1a5f1586227e9f245f50..d7131fcffc3b0ddcb3a15fee28a061a2a20769fa 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include "internal.h" @@ -138,6 +139,13 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, !(vma->vm_flags & VM_SOFTDIRTY))) { ptent = pte_mkwrite(ptent); } + + /* if exec added, check xpm integrity before set pte */ + if(pte_user_mkexec(oldpte, ptent) && + unlikely(xpm_integrity_validate_hook(vma, 0, addr, + vm_normal_page(vma, addr, oldpte)))) + continue; + ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); pages++; } else if (is_swap_pte(oldpte)) { diff --git a/security/Kconfig b/security/Kconfig index 9893c316da8979cb489e1ac83a85a9661de64f2e..43cd1c19a90ade762db74b7fbe09983812dc7c73 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -230,6 +230,7 @@ source "security/loadpin/Kconfig" source "security/yama/Kconfig" source "security/safesetid/Kconfig" source "security/lockdown/Kconfig" +source "security/xpm/Kconfig" source "security/integrity/Kconfig" diff --git a/security/Makefile b/security/Makefile index 3baf435de5411b2d2f5965a75faf327b4b79355b..3f01136d7b1fd4edd53c54ae863ae1c639e30717 100644 --- a/security/Makefile +++ b/security/Makefile @@ -13,6 +13,7 @@ subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin subdir-$(CONFIG_SECURITY_SAFESETID) += safesetid subdir-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown subdir-$(CONFIG_BPF_LSM) += bpf +subdir-$(CONFIG_SECURITY_XPM) += xpm # always enable default capabilities obj-y += commoncap.o @@ -32,6 +33,7 @@ obj-$(CONFIG_SECURITY_SAFESETID) += safesetid/ obj-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown/ obj-$(CONFIG_CGROUPS) += device_cgroup.o obj-$(CONFIG_BPF_LSM) += bpf/ +obj-$(CONFIG_SECURITY_XPM) += xpm/ # Object integrity file lists subdir-$(CONFIG_INTEGRITY) += integrity diff --git a/security/security.c b/security/security.c index 8ea826ea6167e7173130ebaf7ed45af593a3df72..6c5f9a7c6b592e936d024fd62b3d2b67cca67252 100644 --- a/security/security.c +++ b/security/security.c @@ -2575,3 +2575,10 @@ int security_perf_event_write(struct perf_event *event) return call_int_hook(perf_event_write, 0, event); } #endif /* CONFIG_PERF_EVENTS */ + +#ifdef CONFIG_SECURITY_XPM +int security_mmap_region(struct vm_area_struct *vma) +{ + return call_int_hook(mmap_region, 0, vma); +} +#endif diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 7271a0e05966f4a81ebdf231d6bc0d4cdff14325..1c6c59b92ab5c79c73352c7246bce957c01e60dd 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -250,6 +250,8 @@ struct security_class_mapping secclass_map[] = { { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } }, { "lockdown", { "integrity", "confidentiality", NULL } }, + { "xpm", + { "exec_no_sign", "exec_anon_mem", NULL } }, { NULL } }; diff --git a/security/xpm/Kconfig b/security/xpm/Kconfig new file mode 100755 index 0000000000000000000000000000000000000000..7a8dffd6ce3901e3673966da1305225d265a118f --- /dev/null +++ b/security/xpm/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright (c) 2023 Huawei Device Co., Ltd. +# +# Config for the excutable permission manager +# + +menu "Executable permission manager" + +config SECURITY_XPM + bool "Enables excutable permission manager feature" + default n + help + The Executable Permission Manager(XPM) control process execution + by inserting control poliy into the security hook list, such as execv, + mmap and etc. It can control not to execute an illegal signature + process. + +config SECURITY_XPM_DEBUG + bool "Enables excutable permission manager debug mode" + depends on SECURITY_XPM + default n + help + This option should only be enabled for debug test which can enable + some debug interfaces to obtain detailed information. +endmenu +# a blank line must be existed \ No newline at end of file diff --git a/security/xpm/Makefile b/security/xpm/Makefile new file mode 100755 index 0000000000000000000000000000000000000000..55e72cebad332e0d05a3920abff1d50667a13d41 --- /dev/null +++ b/security/xpm/Makefile @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Copyright (c) 2023 Huawei Device Co., Ltd. +# +# Makefile for the ecutable permission manager module +# + +obj-$(CONFIG_SECURITY_XPM) += \ + core/xpm_module.o \ + core/xpm_misc.o \ + core/xpm_hck.o \ + core/xpm_report.o \ + validator/elf_code_segment_info.o \ + validator/exec_signature_info.o + +obj-$(CONFIG_SECURITY_XPM_DEBUG) += \ + core/xpm_debugfs.o + +ccflags-$(CONFIG_SECURITY_XPM) += \ + -I$(srctree)/security/xpm/include \ + -I$(srctree)/security/selinux/include \ + -I$(srctree)/security/selinux + +$(addprefix $(obj)/,$(obj-y)): $(obj)/flask.h + +quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h + cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h + +targets += flask.h av_permissions.h +$(obj)/flask.h: $(srctree)/security/selinux/include/classmap.h FORCE + $(call if_changed,flask) diff --git a/security/xpm/README_zh.md b/security/xpm/README_zh.md new file mode 100644 index 0000000000000000000000000000000000000000..8215bd610a39048fe640c1aba91fed56d8267a52 --- /dev/null +++ b/security/xpm/README_zh.md @@ -0,0 +1,78 @@ +## 背景 + +当前许多应用滥用热更新机制以绕过应用市场审核,在端侧实现包括但不限于窃取用户数据、弹窗广告、静默安装应用、挖矿等恶意行为。这类恶意行为,极大降低了用户产品使用体验,并最终导致大量投诉,引发舆情公关危机。 + +为从源头上堵住恶意应用作恶途径,提高平台的核心竞争,OH有必要构建强制代码签名机制,具体措施包括: + +1. 应用市场对通过安全审核的代码签名 +2. 端侧强制验证代码签名,拒绝执行不合法的代码 +3. 端侧提供代码完整性保护 ,防止代码内容在运行时被恶意篡改 + +## XPM(eXecutable Permission Manager)模块 + +XPM模块通过扩展内核能力,为应用的二进制和abc代码提供运行时的管控,强制仅包括合法签名的代码允许分配可执行权限(二进制)/通过方舟运行时检查(abc),相关代码才能被执行(abc代码为解释执行)。XPM主要通过三个功能实现上述能力,具体内容如下: + +### 1.执行权限检查 + +通过hook特定系统调用函数,XPM在代码内存映射操作前,强制验证文件的代码签名合法性,拒绝将未包含合法签名的文件映射到可执行内存(见下图一)。此外,XPM还会通过解析文件头,获取文件的代码段信息,限制应用将通过签名验签的文件中数据内容映射到可执行内存。 + +![执行权限检查](figures/xpm_check.png) + +### 2.XPM验签地址区 + +在HAP应用被拉起时,XPM会在进程的地址空间内保留一段验签地址范围,任何尝试被映射到该地址范围的文件都会被校验代码签名,无签名或签名不合法的文件映射操作将会失败(如下图二)。此外,运行时虚拟机在解释执行字节码时通过检查相应内容是否处于该验签地址区域判断代码是否合法。 + +![验签地址区](figures/abc_check.png) + +### 3.代码完整性保护 + +为阻止应用在运行时通过映射代码到可写内存区,篡改已完成校验的代码内容,XPM基于写污点标记的代码执行权限冲突检查,为代码提供运行时的完整性保护。 + +![Integrity](figures/integrity_check.png) + +新增两个页标识,只读代码页被标记为readonly,任何被映射到写内存区域的页都会被标记为writetainted,并且整个生命周期都不会被消除,直到该页被回收。当writetained的页被映射为只读代码(需标记为readonly),或者readonly的页被映射到可写区页(需标记为writetainted),将产生xpm标识冲突,此时将阻止页映射,禁止该访问发生。 + +## 目录 + +XPM执行权限管控的主要代码目录结构如下: + +``` +# 代码路径 /kernel/linux/common_modules/xpm +├── include # XPM 头文件 +├── src +│ ├── core # XPM 管控代码 +│ └── validator # XPM 代码签名检查和代码解析模块 +├── figures # ReadMe 内嵌图例 +├── Konfig +├── Makefile +``` + +## XPM配置指导 + +1. XPM使能 + `CONFIG_SECURITY_XPM=y` + +2. XPM禁用 + `CONFIG_SECURITY_XPM=n` + +3. XPM调试信息 + `CONFIG_SECURITY_XPM_DEBUG=y` + +## 管控规则说明 + +针对当前不同应用的运行需求,通过selinux对相应的应用做标签化处理(exec_no_sign & execmem_anon),实施不同的管控策略,具体如下: + +1. 普通应用类:强制检查二进制可执行文件和abc字节码的合法代码签名,限制申请匿名可执行内存 +2. webview类:强制二进制可执行文件和abc字节码的合法代码签名,不限制匿名可执行内存的申请,允许拥有JIT能力 +3. 调测类:二进制可执行文件和abc字节码不包含合法代码签名,不限制匿名可执行内存的申请,允许拥有JIT能力 +4. 沙箱类:不限制二进制可执行文件和abc字节码包含合法代码签名,不限制匿名可执行内存的申请,允许拥有JIT能力 + +## 相关仓 + +[内核子系统](https://gitee.com/openharmony/docs/blob/master/zh-cn/readme/%E5%86%85%E6%A0%B8%E5%AD%90%E7%B3%BB%E7%BB%9F.md) + +[kernel_linux_5.10](https://gitee.com/openharmony/kernel_linux_5.10) + +[kernel_linux_config](https://gitee.com/openharmony/kernel_linux_config) + +[kernel_linux_build](https://gitee.com/openharmony/kernel_linux_build) diff --git a/security/xpm/apply_xpm.sh b/security/xpm/apply_xpm.sh new file mode 100755 index 0000000000000000000000000000000000000000..e8947a93534ad7d7b6734d7dcdb44766b8700f72 --- /dev/null +++ b/security/xpm/apply_xpm.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2022 Huawei Device Co., Ltd. +# + +set -e + +OHOS_SOURCE_ROOT=$1 +KERNEL_BUILD_ROOT=$2 +PRODUCT_NAME=$3 +KERNEL_VERSION=$4 +XPM_SOURCE_ROOT=$OHOS_SOURCE_ROOT/kernel/linux/common_modules/xpm + +function main() +{ + pushd . + + if [ ! -d " $KERNEL_BUILD_ROOT/security/xpm" ]; then + mkdir $KERNEL_BUILD_ROOT/security/xpm + fi + + cd $KERNEL_BUILD_ROOT/security/xpm + ln -s -f $(realpath --relative-to=$KERNEL_BUILD_ROOT/security/xpm/ $XPM_SOURCE_ROOT)/* ./ + + popd +} + +main diff --git a/security/xpm/core/xpm_debugfs.c b/security/xpm/core/xpm_debugfs.c new file mode 100755 index 0000000000000000000000000000000000000000..7902b5074cdea72b642ab39274ee2490e664e0f7 --- /dev/null +++ b/security/xpm/core/xpm_debugfs.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#include +#include "xpm_log.h" +#include "xpm_debugfs.h" + +extern uint8_t xpm_mode; +static struct dentry *xpm_dir; + +int xpm_debugfs_init(void) +{ + xpm_dir = debugfs_create_dir("xpm", NULL); + if (!xpm_dir) { + xpm_log_error("create xpm debugfs dir failed"); + return -EINVAL; + } + + debugfs_create_u8("xpm_mode", 0600, xpm_dir, &xpm_mode); + + return 0; +} + +void xpm_debugfs_exit(void) +{ + debugfs_remove_recursive(xpm_dir); +} diff --git a/security/xpm/core/xpm_hck.c b/security/xpm/core/xpm_hck.c new file mode 100644 index 0000000000000000000000000000000000000000..b64e18c19873ca50d7e80bcaa5baf82c905f9583 --- /dev/null +++ b/security/xpm/core/xpm_hck.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "avc.h" +#include "objsec.h" +#include "xpm_hck.h" +#include "xpm_log.h" +#include "xpm_report.h" +#include "exec_signature_info.h" + +uint8_t xpm_mode = XPM_PERMISSIVE_MODE; + +static int xpm_value(int value) +{ + return (xpm_mode == XPM_PERMISSIVE_MODE ? 0 : value); +} + +static bool xpm_is_anonymous_vma(struct vm_area_struct *vma) +{ + return vma_is_anonymous(vma) || vma_is_shmem(vma); +} + +static int xpm_avc_has_perm(u16 tclass, u32 requested) +{ + struct av_decision avd; + u32 sid = current_sid(); + int rc, rc2; + + rc = avc_has_perm_noaudit(&selinux_state, sid, sid, tclass, requested, + AVC_STRICT, &avd); + rc2 = avc_audit(&selinux_state, sid, sid, tclass, requested, &avd, rc, + NULL, AVC_STRICT); + if (rc2) + return rc2; + + return rc; +} + +static int xpm_validate_signature(struct vm_area_struct *vma, + struct exec_file_signature_info *info) +{ + if (IS_ERR_OR_NULL(info)) + return xpm_avc_has_perm(SECCLASS_XPM, XPM__EXEC_NO_SIGN); + + return 0; +} + +static int xpm_check_code_segment(bool is_exec, struct vm_area_struct *vma, + struct exec_file_signature_info *info) +{ + int i; + unsigned long vm_addr_start, vm_addr_end; + unsigned long seg_addr_start, seg_addr_end; + struct exec_segment_info *segments = info->code_segments; + + if (!is_exec) + return 0; + + if (!segments) { + xpm_log_error("code segments is NULL"); + return -EINVAL; + } + + vm_addr_start = vma->vm_pgoff << PAGE_SHIFT; + vm_addr_end = vm_addr_start + (vma->vm_end - vma->vm_start); + + for (i = 0; i < info->code_segment_count; i++) { + seg_addr_start = ALIGN_DOWN(segments[i].file_offset, PAGE_SIZE); + seg_addr_end = PAGE_ALIGN(segments[i].file_offset + + segments[i].size); + + if ((vm_addr_start >= seg_addr_start) && + (vm_addr_end <= seg_addr_end)) + return 0; + } + + return xpm_avc_has_perm(SECCLASS_XPM, XPM__EXEC_NO_SIGN); +} + +static int xpm_check_signature(struct vm_area_struct *vma, unsigned long prot) +{ + int ret; + struct exec_file_signature_info *info = NULL; + bool is_exec = !xpm_is_anonymous_vma(vma) && (prot & PROT_EXEC); + + /* vma is non-executable or mmap in xpm region just return */ + if (!((vma->vm_flags & VM_XPM) || is_exec)) + return 0; + + /* validate signature when vma is mmap in xpm region or executable */ + ret = get_exec_file_signature_info(vma->vm_file, is_exec, &info); + if (ret) { + xpm_log_error("xpm get executable file signature info failed"); + report_file_event(TYPE_FORMAT_UNDEF, vma->vm_file); + return ret; + } + + ret = xpm_validate_signature(vma, info); + if (ret) { + xpm_log_error("xpm validate signature info failed"); + report_mmap_event(TYPE_SIGN_INVALID, vma, is_exec, prot); + goto exit; + } + + ret = xpm_check_code_segment(is_exec, vma, info); + if (ret) { + xpm_log_error("xpm check executable vma mmap code segment failed"); + report_mmap_event(TYPE_DATA_MMAP_CODE, vma, is_exec, prot); + goto exit; + } +exit: + put_exec_file_signature_info(info); + return ret; +} + +static int xpm_check_prot(struct vm_area_struct *vma, unsigned long prot) +{ + int ret; + bool is_anon = xpm_is_anonymous_vma(vma); + + if ((vma->vm_flags & VM_XPM) && (is_anon || (prot & PROT_WRITE) || + (prot & PROT_EXEC))) { + xpm_log_error("xpm region mmap not allow anonymous/exec/write permission"); + return -EPERM; + } + + /* anonymous executable permission need controled by selinux */ + if (is_anon && (prot & PROT_EXEC)) { + ret = xpm_avc_has_perm(SECCLASS_XPM, XPM__EXEC_ANON_MEM); + if (ret) { + xpm_log_error("anonymous mmap not allow exec permission"); + report_mmap_event(TYPE_ANON_EXEC, vma, TYPE_ANON, prot); + return -EPERM; + } + } + + if (!is_anon && (prot & PROT_WRITE) && (prot & PROT_EXEC)) { + xpm_log_error("file mmap not allow write & exec permission"); + return -EPERM; + } + + return 0; +} + +static int xpm_common_check(struct vm_area_struct *vma, unsigned long prot) +{ + int ret; + + do { + ret = xpm_check_prot(vma, prot); + if (ret) + break; + + ret = xpm_check_signature(vma, prot); + } while (0); + + return xpm_value(ret); +} + +static int xpm_mmap_check(struct vm_area_struct *vma) +{ + return xpm_common_check(vma, vma->vm_flags); +} + +static int xpm_mprotect_check(struct vm_area_struct *vma, + unsigned long reqprot, unsigned long prot) +{ + (void)reqprot; + + return xpm_common_check(vma, prot); +} + +void xpm_delete_cache_node(struct inode *file_node) +{ + delete_exec_file_signature_info(file_node); +} + +static void xpm_region_outer(unsigned long addr_start, unsigned long addr_end, + unsigned long flags, bool *ret) +{ + struct mm_struct *mm = current->mm; + + if (!mm) + return; + + /* Already in xpm region, just return without judge */ + if (flags & VM_UNMAPPED_AREA_XPM) + return; + + *ret = ((addr_start >= mm->xpm_region.addr_end) || + (addr_end <= mm->xpm_region.addr_start)); +} + +void xpm_get_unmapped_area(unsigned long addr, unsigned long len, + unsigned long map_flags, unsigned long unmapped_flags, + unsigned long *ret) +{ + struct vm_unmapped_area_info info; + struct mm_struct *mm = current->mm; + + if (!mm) + return; + + if ((map_flags & MAP_FIXED) && !(addr >= mm->xpm_region.addr_end || + addr + len <= mm->xpm_region.addr_start)) { + xpm_log_error("xpm region not allow mmap with MAP_FIXED"); + *ret = -EFAULT; + return; + } + + if (map_flags & MAP_XPM) { + if (addr) { + xpm_log_error("xpm region not allow specify addr"); + *ret = -EPERM; + return; + } + + info.flags = VM_UNMAPPED_AREA_XPM | unmapped_flags; + info.length = len; + info.low_limit = mm->xpm_region.addr_start; + info.high_limit = mm->xpm_region.addr_end; + info.align_mask = 0; + info.align_offset = 0; + + *ret = vm_unmapped_area(&info); + } +} + +/* + * A xpm readonly region is an area where any page mapped + * will be marked with XPMReadonly. + * Return 1 if a region is readonly, otherwise, return 0. + */ +static bool is_xpm_readonly_region(struct vm_area_struct *vma) +{ + /* 1. xpm region */ + if (vma->vm_flags & VM_XPM) + return true; + + /* 2. !anonymous && executable */ + if (!xpm_is_anonymous_vma(vma) && (vma->vm_flags & VM_EXEC)) + return true; + + return false; +} + +void xpm_integrity_check(struct vm_area_struct *vma, unsigned int vflags, + unsigned long addr, struct page *page, vm_fault_t *ret) +{ + if (!page) + return; + + /* integrity violation: write a readonly page */ + if ((vflags & FAULT_FLAG_WRITE) && (vma->vm_flags & VM_WRITE) && + PageXPMReadonly(page)) { + report_integrity_event(TYPE_INTEGRITY_RO, vma, page); + *ret = xpm_value(VM_FAULT_SIGSEGV); + return; + } + + /* integrity violation: execute a writetained page */ + if (PageXPMWritetainted(page) && is_xpm_readonly_region(vma)) { + report_integrity_event(TYPE_INTEGRITY_WT, vma, page); + *ret = xpm_value(VM_FAULT_SIGSEGV); + return; + } +} + +void xpm_integrity_update(struct vm_area_struct *vma, unsigned int vflags, + struct page *page) +{ + /* set writetainted only if a real write occurred */ + if ((vflags & FAULT_FLAG_WRITE) && (vma->vm_flags & VM_WRITE) && + !PageXPMWritetainted(page)) { + SetPageXPMWritetainted(page); + return; + } + + /* set xpm readonly flag */ + if (is_xpm_readonly_region(vma) && !PageXPMReadonly(page)) + SetPageXPMReadonly(page); +} + +void xpm_integrity_validate(struct vm_area_struct *vma, unsigned int vflags, + unsigned long addr, struct page *page, vm_fault_t *ret) +{ + if (!page) + return; + + xpm_integrity_check(vma, vflags, addr, page, ret); + if (!*ret) + xpm_integrity_update(vma, vflags, page); +} + +/* + * check the integrity of these two pages, return true if equal, + * otherwise false + */ +void xpm_integrity_equal(struct page *page, struct page *kpage, bool *ret) +{ + if (!page || !kpage) + return; + + *ret = !((PageXPMWritetainted(page) != PageXPMWritetainted(kpage)) || + (PageXPMReadonly(page) != PageXPMReadonly(kpage))); +} + +static struct security_hook_list xpm_hooks[] __lsm_ro_after_init = { + LSM_HOOK_INIT(mmap_region, xpm_mmap_check), + LSM_HOOK_INIT(file_mprotect, xpm_mprotect_check), +}; + +void xpm_register_xpm_hooks(void) +{ + security_add_hooks(xpm_hooks, ARRAY_SIZE(xpm_hooks), "xpm"); +} + +void xpm_register_hck_hooks(void) +{ + REGISTER_HCK_LITE_HOOK(xpm_delete_cache_node_lhck, + xpm_delete_cache_node); + + REGISTER_HCK_LITE_HOOK(xpm_region_outer_lhck, xpm_region_outer); + REGISTER_HCK_LITE_HOOK(xpm_get_unmapped_area_lhck, + xpm_get_unmapped_area); + + /* xpm integrity*/ + REGISTER_HCK_LITE_HOOK(xpm_integrity_equal_lhck, xpm_integrity_equal); + REGISTER_HCK_LITE_HOOK(xpm_integrity_check_lhck, xpm_integrity_check); + REGISTER_HCK_LITE_HOOK(xpm_integrity_update_lhck, xpm_integrity_update); + REGISTER_HCK_LITE_HOOK(xpm_integrity_validate_lhck, + xpm_integrity_validate); +} diff --git a/security/xpm/core/xpm_misc.c b/security/xpm/core/xpm_misc.c new file mode 100755 index 0000000000000000000000000000000000000000..73bf05eb6e2a07669367e4ccfbe363fbd5b85c75 --- /dev/null +++ b/security/xpm/core/xpm_misc.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#include "xpm_misc.h" + +#include +#include +#include +#include +#include +#include +#include +#include "xpm_log.h" +#include "xpm_report.h" + +#define XPM_SET_REGION _IOW('x', 0x01, struct xpm_region_info) + +static int xpm_set_region(unsigned long addr_base, unsigned long length) +{ + int ret = 0; + unsigned long addr; + struct mm_struct *mm = current->mm; + + if (!mm) + return -EINVAL; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + if ((mm->xpm_region.addr_start != 0) || + (mm->xpm_region.addr_end != 0)) { + xpm_log_info("xpm region has been set"); + goto exit; + } + + addr = get_unmapped_area(NULL, addr_base, length, 0, 0); + if (IS_ERR_VALUE(addr) || (ULONG_MAX - addr_base < length)) { + xpm_log_error("xpm get unmmaped area failed"); + ret = -EINVAL; + goto exit; + } + + mm->xpm_region.addr_start = addr; + mm->xpm_region.addr_end = addr + length; +exit: + mmap_write_unlock(mm); + return ret; +} + +static long xpm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + struct xpm_region_info info = {0}; + + if (unlikely(copy_from_user(&info, (void __user *)(uintptr_t)arg, + sizeof(struct xpm_region_info)))) + return -EFAULT; + + switch (cmd) { + case XPM_SET_REGION: + ret = xpm_set_region(info.addr_base, info.length); + break; + default: + xpm_log_error("xpm ioctl cmd error, cmd = %d", cmd); + ret = -EINVAL; + break; + } + + return ret; +} + +#ifdef CONFIG_COMPAT +static long xpm_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return xpm_ioctl(file, cmd, (uintptr_t)compat_ptr(arg)); +} +#endif + +static int xpm_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static int xpm_release(struct inode *inode, struct file *file) +{ + return 0; +} + +static const struct file_operations xpm_fops = { + .owner = THIS_MODULE, + .open = xpm_open, + .release = xpm_release, + .unlocked_ioctl = xpm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = xpm_compat_ioctl, +#endif +}; + +static struct miscdevice xpm_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "xpm", + .fops = &xpm_fops, +}; + +int xpm_register_misc_device(void) +{ + return misc_register(&xpm_misc); +} + +void xpm_deregister_misc_device(void) +{ + misc_deregister(&xpm_misc); +} diff --git a/security/xpm/core/xpm_module.c b/security/xpm/core/xpm_module.c new file mode 100755 index 0000000000000000000000000000000000000000..498932c2dc29f2b73f749b62f5ab1094b4ffe32e --- /dev/null +++ b/security/xpm/core/xpm_module.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#include +#include +#include +#include +#include "xpm_log.h" +#include "xpm_hck.h" +#include "xpm_misc.h" +#include "xpm_report.h" +#include "xpm_debugfs.h" + +static int __init xpm_module_init(void) +{ + int ret; + + ret = xpm_register_misc_device(); + if (ret) { + xpm_log_error("xpm register misc device failed, ret = %d", ret); + report_init_event(TYPE_DEVICEFS_UNINIT); + return ret; + } + + ret = xpm_debugfs_init(); + if (ret) { + xpm_log_error("xpm init debugfs failed, ret = %d", ret); + xpm_deregister_misc_device(); + report_init_event(TYPE_DEBUGFS_UNINIT); + return ret; + } + + xpm_register_xpm_hooks(); + xpm_register_hck_hooks(); + + xpm_log_info("xpm module init success"); + return 0; +} + +static void __exit xpm_module_exit(void) +{ + xpm_deregister_misc_device(); + xpm_debugfs_exit(); + xpm_log_info("xpm module exit success"); +} + +module_init(xpm_module_init); +module_exit(xpm_module_exit); +MODULE_LICENSE("GPL"); diff --git a/security/xpm/core/xpm_report.c b/security/xpm/core/xpm_report.c new file mode 100644 index 0000000000000000000000000000000000000000..c1a1432ad55f71ef3041f25a501ca239d59971b6 --- /dev/null +++ b/security/xpm/core/xpm_report.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_HW_KERNEL_SG +#include +#endif +#include "xpm_log.h" +#include "xpm_report.h" + +#ifndef CONFIG_KERNEL_SG +typedef struct { + unsigned long event_id; + unsigned int version; + unsigned int content_len; + char content[0]; +} event_info; + +unsigned int report_security_info(const event_info *event) +{ + xpm_log_info("%d: %s", event->event_id, event->content); + return 0; +} +#endif + +static char *xpm_get_filename(struct xpm_event_param *param, char *buf, int len) +{ + char *filename = NULL; + struct file *file = NULL; + + if (param->file) + file = param->file; + else if (param->vma && param->vma->vm_file) + file = param->vma->vm_file; + else + return NULL; + + filename = d_absolute_path(&file->f_path, buf, len); + if (IS_ERR(filename)) { + xpm_log_error("xpm get absolute path failed"); + return NULL; + } + + return filename; +} + +static int set_init_content(struct xpm_event_param *param, + uint8_t *content, uint32_t content_len) +{ + int len; + + len = snprintf(content, content_len, + "{ "JSTR_PAIR(event_type, %s)", "JVAL_PAIR(timestamp, %llu)" }", + param->event_type, param->timestamp); + + if (len < 0 || len > content_len) { + xpm_log_error("snprintf init content failed"); + return -EINVAL; + } + + return 0; +} + +#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) +const static char *code_type[] = { + [TYPE_ABC] = "ABC", + [TYPE_ELF] = "ELF", + [TYPE_ANON] = "ANON" +}; +static int set_mmap_content(struct xpm_event_param *param, uint8_t *content, + uint32_t content_len) +{ + int len; + + if (!param->vma) { + xpm_log_error("input vma is NULL"); + return -EINVAL; + } + + len = snprintf(content, content_len, + "{ "JSTR_PAIR(event_type, %s)", "JVAL_PAIR(timestamp, %llu)", " + JVAL_PAIR(pid, %u)", "JSTR_PAIR(filename, %s)", " + JSTR_PAIR(code_type, %s)", "JVAL_PAIR(prot, %lu)"," + JVAL_PAIR(pgoff, %lu)", "JVAL_PAIR(size, %lu)" }", + param->event_type, param->timestamp, param->pid, + param->filename ? param->filename : "", + code_type[param->code], param->prot & PROT_MASK, + param->vma->vm_pgoff, + param->vma->vm_end - param->vma->vm_start); + + if (len < 0 || len > content_len) { + xpm_log_error("snprintf code mmap content failed"); + return -EINVAL; + } + + return 0; +} + +static int set_file_content(struct xpm_event_param *param, + uint8_t *content, uint32_t content_len) +{ + int len; + + len = snprintf(content, content_len, + "{ "JSTR_PAIR(event_type, %s)", "JVAL_PAIR(timestamp, %llu)", " + JVAL_PAIR(pid, %u)", "JSTR_PAIR(filename, %s)" }", + param->event_type, param->timestamp, param->pid, + param->filename ? param->filename : ""); + + if (len < 0 || len > content_len) { + xpm_log_error("snprintf file format content failed"); + return -EINVAL; + } + + return 0; +} + +static int set_integrity_content(struct xpm_event_param *param, + uint8_t *content, uint32_t content_len) +{ + int len; + char *page_type; + + if (!param->vma || !param->page) { + xpm_log_error("input vma or page is NULL"); + return -EINVAL; + } + + page_type = PageKsm(param->page) ? + "[ksm]" : PageAnon(param->page) ? "[anon]" : "[file]"; + + len = snprintf(content, content_len, + "{ " JSTR_PAIR(event_type, %s)", "JVAL_PAIR(timestamp, %llu)", " + JVAL_PAIR(pid, %u)","JSTR_PAIR(page_type, %s)", " + JSTR_PAIR(filename, %s)", "JVAL_PAIR(page_index, %lu)"," + JVAL_PAIR(page_prot, %lu)" }", + param->event_type, param->timestamp, param->pid, page_type, + param->filename ? param->filename : "", param->page->index, + param->vma->vm_page_prot.pgprot & PROT_MASK); + + if (len < 0 || len > content_len) { + xpm_log_error("snprintf init integrity failed"); + return -EINVAL; + } + + return 0; +} + +static const struct xpm_event_info xpm_event[] = { + [TYPE_DEVICEFS_UNINIT] = { "devicefs uninitialized", + EVENT_INIT, set_init_content }, + [TYPE_DEBUGFS_UNINIT] = { "debugfs uninitialized", + EVENT_INIT, set_init_content }, + [TYPE_FORMAT_UNDEF] = { "unkown file format", + EVENT_FILE, set_file_content }, + [TYPE_ANON_EXEC] = { "anon executed", + EVENT_MMAP, set_file_content }, + [TYPE_SIGN_INVALID] = { "invalid signature", + EVENT_MMAP, set_mmap_content }, + [TYPE_DATA_MMAP_CODE] = { "data mmap code", + EVENT_MMAP, set_mmap_content }, + [TYPE_INTEGRITY_RO] = { "code tampered", + EVENT_INTEGRITY, set_integrity_content }, + [TYPE_INTEGRITY_WT] = { "data executed", + EVENT_INTEGRITY, set_integrity_content }, +}; + +static int report_event_inner(xpm_event_type type, + struct xpm_event_param *param, event_info *event) +{ + int ret; + + ret = xpm_event[type].set_content(param, event->content, + MAX_CONTENT_LEN); + if (ret) { + xpm_log_error("type [%d] set content failed", type); + return ret; + } + event->content_len = strlen(event->content); + event->event_id = xpm_event[type].event_id; + event->version = XPM_EVENT_VERSION; + + ret = report_security_info(event); + if (ret) { + xpm_log_error("type [%d] report security info failed", type); + return ret; + } + + return 0; +} + +static int xpm_report_event(xpm_event_type type, struct xpm_event_param *param) +{ + int ret; + event_info *sg_event; + char *buf; + + if (!(xpm_event[type].set_content)) { + xpm_log_error("type [%d] set content func invalid", type); + return -EINVAL; + } + + sg_event = kzalloc(sizeof(event_info) + MAX_CONTENT_LEN, GFP_KERNEL); + if (!sg_event) { + xpm_log_error("alloc security guard event failed"); + return -ENOMEM; + } + + buf = __getname(); + if (!buf) { + xpm_log_error("alloc file name buf failed"); + kfree(sg_event); + return -ENOMEM; + } + + param->event_type = xpm_event[type].event_type; + param->filename = xpm_get_filename(param, buf, PATH_MAX); + param->timestamp = ktime_get_real_seconds(); + param->pid = current->pid; + + ret = report_event_inner(type, param, sg_event); + + __putname(buf); + kfree(sg_event); + return ret; +} + +void report_init_event(xpm_event_type type) +{ + struct xpm_event_param param = {0}; + + xpm_report_ratelimited(xpm_report_event, type, ¶m); +} + +void report_file_event(xpm_event_type type, struct file *file) +{ + struct xpm_event_param param = {0}; + + param.file = file; + xpm_report_ratelimited(xpm_report_event, type, ¶m); +} + +void report_mmap_event(xpm_event_type type, struct vm_area_struct *vma, + int code, int prot) +{ + struct xpm_event_param param = {0}; + + param.vma = vma; + param.code = code; + param.prot = prot; + xpm_report_ratelimited(xpm_report_event, type, ¶m); +} + +void report_integrity_event(xpm_event_type type, struct vm_area_struct *vma, + struct page *page) +{ + struct xpm_event_param param = {0}; + + param.vma = vma; + param.page = page; + xpm_report_ratelimited(xpm_report_event, type, ¶m); +} diff --git a/security/xpm/figures/abc_check.png b/security/xpm/figures/abc_check.png new file mode 100644 index 0000000000000000000000000000000000000000..01e2dff3871fb2a6dee309b049e9c0af1aa55fbf Binary files /dev/null and b/security/xpm/figures/abc_check.png differ diff --git a/security/xpm/figures/integrity_check.png b/security/xpm/figures/integrity_check.png new file mode 100644 index 0000000000000000000000000000000000000000..19af000e028f98e7e8b9d82f4e3387beed3dd196 Binary files /dev/null and b/security/xpm/figures/integrity_check.png differ diff --git a/security/xpm/figures/xpm_check.png b/security/xpm/figures/xpm_check.png new file mode 100644 index 0000000000000000000000000000000000000000..e515aced995bdb0d6bf0a706e995398d3ed5aa94 Binary files /dev/null and b/security/xpm/figures/xpm_check.png differ diff --git a/security/xpm/include/exec_signature_info.h b/security/xpm/include/exec_signature_info.h new file mode 100644 index 0000000000000000000000000000000000000000..d6545073a1091908c2e72dd3de0b9f77ec5ea5e1 --- /dev/null +++ b/security/xpm/include/exec_signature_info.h @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ +#ifndef _EXEC_SIGNATURE_INFO_H +#define _EXEC_SIGNATURE_INFO_H + +#include +#include +#include +#include + +struct exec_segment_info { + uintptr_t file_offset; + size_t size; +}; + +#define FILE_SIGNATURE_INVALID 0 +#define FILE_SIGNATURE_FS_VERITY 1 +#define FILE_SIGNATURE_DM_VERITY 2 +#define FILE_SIGNATURE_MASK 0x0000000F +#define FILE_SIGNATURE_DELETE 0x80000000 + +struct exec_file_signature_info { + struct rb_node rb_node; + atomic_t reference; + unsigned int type; + uintptr_t inode; + unsigned int code_segment_count; + struct exec_segment_info *code_segments; +}; + +static inline bool exec_file_signature_is_fs_verity(const struct exec_file_signature_info *signature_info) +{ + return (signature_info->type & FILE_SIGNATURE_MASK) == FILE_SIGNATURE_FS_VERITY; +} + +static inline bool exec_file_signature_is_dm_verity(const struct exec_file_signature_info *signature_info) +{ + return (signature_info->type & FILE_SIGNATURE_MASK) == FILE_SIGNATURE_DM_VERITY; +} + +static inline bool exec_file_signature_is_verity(const struct exec_file_signature_info *signature_info) +{ + return (signature_info->type & FILE_SIGNATURE_MASK) == FILE_SIGNATURE_DM_VERITY || + (signature_info->type & FILE_SIGNATURE_MASK) == FILE_SIGNATURE_FS_VERITY; +} + +static inline bool exec_file_signature_is_delete(const struct exec_file_signature_info *signature_info) +{ + return !!(signature_info->type & FILE_SIGNATURE_DELETE); +} + +int parse_elf_code_segment_info(struct file *file, struct exec_file_signature_info **code_segment_info); +int get_exec_file_signature_info(struct file *file, bool is_exec, struct exec_file_signature_info **info_ptr); +int put_exec_file_signature_info(struct exec_file_signature_info *exec_info); +void delete_exec_file_signature_info(struct inode *file_node); +#endif diff --git a/security/xpm/include/xpm_debugfs.h b/security/xpm/include/xpm_debugfs.h new file mode 100755 index 0000000000000000000000000000000000000000..7466f66ce23c3c654e21bbb9be255622b395c552 --- /dev/null +++ b/security/xpm/include/xpm_debugfs.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#ifndef _XPM_DEBUGFS_H + +#ifdef CONFIG_SECURITY_XPM_DEBUG +int xpm_debugfs_init(void); +void xpm_debugfs_exit(void); + +#else +static inline int xpm_debugfs_init(void) +{ + return 0; +} + +static inline void xpm_debugfs_exit(void) +{ +} +#endif + +#endif /* _XPM_DEBUGFS_H */ diff --git a/security/xpm/include/xpm_hck.h b/security/xpm/include/xpm_hck.h new file mode 100644 index 0000000000000000000000000000000000000000..46bf62d4069e2c54062f0d018b1aa601258660aa --- /dev/null +++ b/security/xpm/include/xpm_hck.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#ifndef _XPM_HCK_H +#define _XPM_HCK_H + +#define XPM_PERMISSIVE_MODE 0 +#define XPM_ENFORCE_MODE 1 + +void xpm_register_xpm_hooks(void); + +void xpm_register_hck_hooks(void); + +#endif /* _XPM_HCK_H */ diff --git a/security/xpm/include/xpm_log.h b/security/xpm/include/xpm_log.h new file mode 100644 index 0000000000000000000000000000000000000000..5638e750ff2e90fce65322bf6807bc9123291df5 --- /dev/null +++ b/security/xpm/include/xpm_log.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#ifndef _XPM_LOG_H +#define _XPM_LOG_H + +#define XPM_CHECK_FAILED (-1024) + +#define XPM_TAG "xpm_kernel" +#define XPM_INFO_TAG "I" +#define XPM_ERROR_TAG "E" +#define XPM_DEBUG_TAG "D" + +#define xpm_log_info(fmt, args...) pr_info("[%s/%s]%s: " fmt "\n", \ + XPM_INFO_TAG, XPM_TAG, __func__, ##args) + +#define xpm_log_error(fmt, args...) pr_err("[%s/%s]%s: " fmt "\n", \ + XPM_ERROR_TAG, XPM_TAG, __func__, ##args) + +#ifdef CONFIG_SECURITY_XPM_DEBUG +#define xpm_log_debug(fmt, args...) pr_info("[%s/%s]%s: " fmt "\n", \ + XPM_DEBUG_TAG, XPM_TAG, __func__, ##args) +#else +#define xpm_log_debug(fmt, args...) no_printk(fmt, ##args) +#endif + +#endif /* _XPM_LOG_H */ diff --git a/security/xpm/include/xpm_misc.h b/security/xpm/include/xpm_misc.h new file mode 100755 index 0000000000000000000000000000000000000000..fbdf45e033bbb185414ed661009f9d04e9a5f282 --- /dev/null +++ b/security/xpm/include/xpm_misc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#ifndef _XPM_MISC_H +#define _XPM_MISC_H + +struct xpm_region_info { + unsigned long addr_base; + unsigned long length; +}; + +int xpm_register_misc_device(void); +void xpm_deregister_misc_device(void); + +#endif /* _XPM_MISC_H */ diff --git a/security/xpm/include/xpm_report.h b/security/xpm/include/xpm_report.h new file mode 100644 index 0000000000000000000000000000000000000000..1c1d996033569b9ba9bf8e27b1c2443f9103a7c1 --- /dev/null +++ b/security/xpm/include/xpm_report.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ + +#ifndef _XPM_REPORT_H +#define _XPM_REPORT_H + +#include +#include + + +#define CODE_TYPE_ABC "ABC" +#define CODE_TYPE_ELF "ELF" + +typedef enum { + EVENT_INIT = 1011009110, + EVENT_FILE = 1011009111, + EVENT_MMAP = 1011009112, + EVENT_INTEGRITY = 1011009113, +} xpm_event_id; + +typedef enum { + TYPE_DEVICEFS_UNINIT = 0, + TYPE_DEBUGFS_UNINIT, + TYPE_FORMAT_UNDEF, + TYPE_ANON_EXEC, + TYPE_SIGN_INVALID, + TYPE_DATA_MMAP_CODE, + TYPE_INTEGRITY_RO, + TYPE_INTEGRITY_WT, +} xpm_event_type; + +enum { + TYPE_ABC, + TYPE_ELF, + TYPE_ANON, +}; + +struct xpm_event_param { + char *event_type; + char *filename; + ktime_t timestamp; + pid_t pid; + + struct vm_area_struct *vma; + struct page *page; + struct file *file; + int code; + unsigned long prot; +}; + +struct xpm_event_info { + char *event_type; + xpm_event_id event_id; + int (*set_content)(struct xpm_event_param *param, uint8_t *content, + uint32_t content_len); +}; + +#define MAX_CONTENT_LEN 900 +#define XPM_EVENT_VERSION 0 + +#ifndef CONFIG_SECURITY_XPM_DEBUG + +#define xpm_report_ratelimited(func, fmt, ...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + func(fmt, ##__VA_ARGS__); \ + } while (0) +#else +#define xpm_report_ratelimited(func, fmt, ...) \ + func(fmt, ##__VA_ARGS__); + +#endif + +#define JSTR(val) "\""#val"\"" +#define JVAL_PAIR(val, format) JSTR(val) ": " #format +#define JSTR_PAIR(val, format) JSTR(val) ": " JSTR(format) + +void report_init_event(xpm_event_type type); +void report_file_event(xpm_event_type type, struct file *file); +void report_mmap_event(xpm_event_type type, struct vm_area_struct *vma, + int code, int prot); +void report_integrity_event(xpm_event_type type, struct vm_area_struct *vma, + struct page *page); + +#endif /* _XPM_REPORT_H */ diff --git a/security/xpm/validator/elf_code_segment_info.c b/security/xpm/validator/elf_code_segment_info.c new file mode 100644 index 0000000000000000000000000000000000000000..ef7bf9c63800d22c47c3cb76bbe68f14abf68726 --- /dev/null +++ b/security/xpm/validator/elf_code_segment_info.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ +#include +#include +#include +#include +#include "exec_signature_info.h" + +#if ELF_EXEC_PAGESIZE > PAGE_SIZE +#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE +#else +#define ELF_MIN_ALIGN PAGE_SIZE +#endif + +struct elf_info { + struct elfhdr elf_ehdr; + uint16_t type; + uint16_t e_phnum; + size_t e_phsize; + uintptr_t e_phoff; +}; + +static int read_elf_info(struct file *file, void *buffer, size_t read_size, loff_t pos) +{ + size_t len; + + len = kernel_read(file, buffer, read_size, &pos); + if (unlikely(len != read_size)) + return -EIO; + + return 0; +} + +static uint64_t elf64_to_cpu(const struct elfhdr *ehdr, uint64_t value) +{ + if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) + value = le64_to_cpu(value); + else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) + value = be64_to_cpu(value); + + return value; +} + +static uint32_t elf32_to_cpu(const struct elfhdr *ehdr, uint32_t value) +{ + if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) + value = le32_to_cpu(value); + else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) + value = be32_to_cpu(value); + + return value; +} + +static uint16_t elf16_to_cpu(const struct elfhdr *ehdr, uint16_t value) +{ + if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB) + value = le16_to_cpu(value); + else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB) + value = be16_to_cpu(value); + + return value; +} + +static int get_elf32_code_segment_count(struct elf32_phdr *elf_phdr, + struct elf_info *elf_info) +{ + int i; + int count = 0; + struct elf32_phdr *phdr_info; + uint32_t p_flags; + + for (i = 0; i < elf_info->e_phnum; i++) { + phdr_info = elf_phdr + i; + p_flags = elf32_to_cpu(&elf_info->elf_ehdr, phdr_info->p_flags); + if (!(p_flags & PF_X)) + continue; + + count++; + } + return count; +} + +static int get_elf32_code_segment(struct elf32_phdr *elf_phdr, struct elf_info *elf_info, + struct exec_file_signature_info *exec_file_info) +{ + int i; + struct elf32_phdr *phdr_info; + uint32_t p_flags; + uint32_t p_offset; + uint32_t p_filesz; + uint32_t p_memsz; + uint32_t p_addr; + + for (i = 0; i < elf_info->e_phnum; i++) { + phdr_info = elf_phdr + i; + p_flags = elf32_to_cpu(&elf_info->elf_ehdr, phdr_info->p_flags); + if (!(p_flags & PF_X)) + continue; + + p_offset = elf32_to_cpu(&elf_info->elf_ehdr, phdr_info->p_offset); + p_filesz = elf32_to_cpu(&elf_info->elf_ehdr, phdr_info->p_filesz); + p_addr = elf32_to_cpu(&elf_info->elf_ehdr, phdr_info->p_paddr); + p_memsz = elf32_to_cpu(&elf_info->elf_ehdr, phdr_info->p_memsz); + if (p_offset + p_filesz < p_offset || p_addr + p_memsz < p_addr) + return -ENOEXEC; + + exec_file_info->code_segments[exec_file_info->code_segment_count].file_offset = p_offset; + exec_file_info->code_segments[exec_file_info->code_segment_count].size = p_filesz; + exec_file_info->code_segment_count++; + } + return 0; +} + +static int get_elf64_code_segment_count(struct elf64_phdr *elf_phdr, struct elf_info *elf_info) +{ + int i; + int count = 0; + struct elf64_phdr *phdr_info; + uint32_t p_flags; + + for (i = 0; i < elf_info->e_phnum; i++) { + phdr_info = elf_phdr + i; + p_flags = elf32_to_cpu(&elf_info->elf_ehdr, phdr_info->p_flags); + if (!(p_flags & PF_X)) + continue; + + count++; + } + return count; +} + +static int get_elf64_code_segment(struct elf64_phdr *elf_phdr, struct elf_info *elf_info, + struct exec_file_signature_info *exec_file_info) +{ + int i; + struct elf64_phdr *phdr_info; + uint32_t p_flags; + uint64_t p_offset; + uint64_t p_filesz; + uint64_t p_memsz; + uint64_t p_addr; + + for (i = 0; i < elf_info->e_phnum; i++) { + phdr_info = elf_phdr + i; + p_flags = elf32_to_cpu(&elf_info->elf_ehdr, phdr_info->p_flags); + if (!(p_flags & PF_X)) + continue; + + p_offset = elf64_to_cpu(&elf_info->elf_ehdr, phdr_info->p_offset); + p_filesz = elf64_to_cpu(&elf_info->elf_ehdr, phdr_info->p_filesz); + p_addr = elf64_to_cpu(&elf_info->elf_ehdr, phdr_info->p_paddr); + p_memsz = elf64_to_cpu(&elf_info->elf_ehdr, phdr_info->p_memsz); + if (p_offset + p_filesz < p_offset || p_addr + p_memsz < p_addr) + return -ENOEXEC; + + exec_file_info->code_segments[exec_file_info->code_segment_count].file_offset = p_offset; + exec_file_info->code_segments[exec_file_info->code_segment_count].size = p_filesz; + exec_file_info->code_segment_count++; + } + return 0; +} + +static int elf_check_and_get_code_segment_offset(struct file *file, struct elf_info *elf_info) +{ + struct elf32_hdr *elf32_ehdr; + struct elf64_hdr *elf64_ehdr; + uint32_t e32_phoff; + uint32_t e32_phsize; + uint64_t e64_phoff; + uint64_t e64_phsize; + uint16_t type; + uint16_t e_ehsize; + struct elfhdr *elf_ehdr = &elf_info->elf_ehdr; + int ret; + + ret = read_elf_info(file, (void *)elf_ehdr, sizeof(struct elfhdr), 0); + if (ret < 0) + return ret; + + if (memcmp(elf_ehdr->e_ident, ELFMAG, SELFMAG) != 0) + return -ENOEXEC; + + type = elf16_to_cpu(elf_ehdr, elf_ehdr->e_type); + if (type != ET_EXEC && type != ET_DYN) + return -ENOEXEC; + + if (elf_ehdr->e_ident[EI_CLASS] == ELFCLASS32) { + elf_info->type = ELFCLASS32; + elf32_ehdr = (struct elf32_hdr *)elf_ehdr; + e_ehsize = elf16_to_cpu(elf_ehdr, elf32_ehdr->e_ehsize); + if (e_ehsize != sizeof(struct elf32_hdr)) + return -ENOEXEC; + + elf_info->e_phnum = elf16_to_cpu(elf_ehdr, elf32_ehdr->e_phnum); + e32_phsize = sizeof(struct elf32_phdr) * elf_info->e_phnum; + if (e32_phsize == 0 || e32_phsize > 65536 || e32_phsize > ELF_MIN_ALIGN) + return -ENOEXEC; + + e32_phoff = elf32_to_cpu(elf_ehdr, elf32_ehdr->e_phoff); + if (e32_phoff + e32_phsize < e32_phoff) + return -ENOEXEC; + + elf_info->e_phsize = e32_phsize; + elf_info->e_phoff = e32_phoff; + } else if (elf_ehdr->e_ident[EI_CLASS] == ELFCLASS64) { + elf_info->type = ELFCLASS64; + elf64_ehdr = (struct elf64_hdr *)elf_ehdr; + e_ehsize = elf16_to_cpu(elf_ehdr, elf64_ehdr->e_ehsize); + if (e_ehsize != sizeof(struct elf64_hdr)) + return -ENOEXEC; + + elf_info->e_phnum = elf16_to_cpu(elf_ehdr, elf64_ehdr->e_phnum); + e64_phsize = sizeof(struct elf64_phdr) * elf_info->e_phnum; + if (e64_phsize == 0 || e64_phsize > 65536 || e64_phsize > ELF_MIN_ALIGN) + return -ENOEXEC; + + e64_phoff = elf64_to_cpu(elf_ehdr, elf64_ehdr->e_phoff); + if (e64_phoff + e64_phsize < e64_phoff) + return -ENOEXEC; + + elf_info->e_phsize = e64_phsize; + elf_info->e_phoff = e64_phoff; + } else + return -ENOEXEC; + + return 0; +} + +static int find_elf_code_segment_info(const char *phdr_info, struct elf_info *elf_info, + struct exec_file_signature_info **file_info) +{ + int ret; + size_t size; + struct exec_file_signature_info *exec_file_info; + int segment_count; + + if (elf_info->type == ELFCLASS32) + segment_count = get_elf32_code_segment_count((struct elf32_phdr *)phdr_info, elf_info); + else + segment_count = get_elf64_code_segment_count((struct elf64_phdr *)phdr_info, elf_info); + + if (segment_count == 0) + return -ENOEXEC; + + size = sizeof(struct exec_file_signature_info) + segment_count * sizeof(struct exec_segment_info); + exec_file_info = (struct exec_file_signature_info *)kzalloc(size, GFP_KERNEL); + if (exec_file_info == NULL) + return -ENOMEM; + + exec_file_info->code_segments = (struct exec_segment_info *)((char *)exec_file_info + + sizeof(struct exec_file_signature_info)); + if (elf_info->type == ELFCLASS32) + ret = get_elf32_code_segment((struct elf32_phdr *)phdr_info, elf_info, exec_file_info); + else + ret = get_elf64_code_segment((struct elf64_phdr *)phdr_info, elf_info, exec_file_info); + + if (ret < 0) { + kfree(exec_file_info); + return ret; + } + *file_info = exec_file_info; + return 0; +} + +int parse_elf_code_segment_info(struct file *file, + struct exec_file_signature_info **code_segment_info) +{ + const char *phdr_info; + struct elf_info elf_info = {0}; + int ret; + + ret = elf_check_and_get_code_segment_offset(file, &elf_info); + if (ret < 0) + return ret; + + phdr_info = kzalloc(elf_info.e_phsize, GFP_KERNEL); + if (phdr_info == NULL) + return -ENOMEM; + + ret = read_elf_info(file, (void *)phdr_info, elf_info.e_phsize, elf_info.e_phoff); + if (ret < 0) { + kfree(phdr_info); + return ret; + } + + ret = find_elf_code_segment_info(phdr_info, &elf_info, code_segment_info); + kfree(phdr_info); + return ret; +} diff --git a/security/xpm/validator/exec_signature_info.c b/security/xpm/validator/exec_signature_info.c new file mode 100644 index 0000000000000000000000000000000000000000..e616e62a029b40829e7ba06f371009a06d266e42 --- /dev/null +++ b/security/xpm/validator/exec_signature_info.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include "exec_signature_info.h" + +#define VERITY_NODE_CACHE_LIMITS 10000 +#define VERITY_NODE_CACHE_RECYCLE_NUM 200 + +static DEFINE_RWLOCK(dm_verity_tree_lock); +static struct rb_root dm_verity_tree = RB_ROOT; +static int dm_verity_node_count; +static DEFINE_RWLOCK(fs_verity_tree_lock); +static struct rb_root fs_verity_tree = RB_ROOT; +static int fs_verity_node_count; + +#ifdef CONFIG_FS_VERITY +static bool is_fs_verity(struct file *file) +{ + struct inode *file_node; + + file_inode = file_inode(file); + if (file_node == NULL) + return false; + + if (file_node->i_verity_info == NULL) + return false; + + return true; +} +#endif + +static int check_exec_file_is_verity(struct file *file) +{ +#ifdef CONFIG_FS_VERITY + if (is_fs_verity(file)) + return FILE_SIGNATURE_FS_VERITY; +#endif + + return FILE_SIGNATURE_DM_VERITY; +} + +static struct exec_file_signature_info *rb_search_node(struct rb_root *root, uintptr_t file_inode) +{ + struct rb_node *node = root->rb_node; + struct exec_file_signature_info *file_node; + + while (node != NULL) { + file_node = rb_entry(node, struct exec_file_signature_info, rb_node); + if (file_inode < file_node->inode) { + node = file_node->rb_node.rb_left; + } else if (file_inode > file_node->inode) { + node = file_node->rb_node.rb_right; + } else { + atomic_inc(&file_node->reference); + return file_node; + } + } + return NULL; +} + +static struct exec_file_signature_info *rb_add_node(struct rb_root *root, int *node_count, + struct exec_file_signature_info *node) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct exec_file_signature_info *file; + + while (*p != NULL) { + parent = *p; + file = rb_entry(parent, struct exec_file_signature_info, rb_node); + if (node->inode < file->inode) { + p = &(*p)->rb_left; + } else if (node->inode > file->inode) { + p = &(*p)->rb_right; + } else { + atomic_inc(&file->reference); + return file; + } + } + + rb_link_node(&node->rb_node, parent, p); + rb_insert_color(&node->rb_node, root); + atomic_inc(&node->reference); + (*node_count)++; + return NULL; +} + +static void rb_erase_node(struct rb_root *root, int *node_count, + struct exec_file_signature_info *node) +{ + rb_erase(&node->rb_node, root); + (*node_count)--; +} + +static int find_idle_nodes(struct rb_root *root, uintptr_t *ilde_nodes, size_t count) +{ + int i = 0; + struct exec_file_signature_info *code_segment; + struct rb_node *node; + + for (node = rb_first(root); node != NULL && i < count; node = rb_next(node)) { + code_segment = rb_entry(node, struct exec_file_signature_info, rb_node); + if (atomic_read(&code_segment->reference) > 0) + continue; + + ilde_nodes[i++] = (uintptr_t)code_segment; + } + return i; +} + +static void clear_code_segment_info_cache(struct rb_root *root, int *node_count) +{ + struct exec_file_signature_info *code_segment_info; + uintptr_t *code_segments; + int i = 0; + int count = VERITY_NODE_CACHE_RECYCLE_NUM; + + code_segments = kzalloc(count * sizeof(uintptr_t), GFP_KERNEL); + if (code_segments == NULL) + return; + + count = find_idle_nodes(root, code_segments, count); + while (i < count) { + code_segment_info = (struct exec_file_signature_info *)code_segments[i]; + rb_erase_node(root, node_count, code_segment_info); + kfree(code_segment_info); + i++; + } + kfree(code_segments); +} + +static void rm_code_segment_info(void) +{ + if (dm_verity_node_count + fs_verity_node_count < VERITY_NODE_CACHE_LIMITS) + return; + + if (dm_verity_node_count > fs_verity_node_count) { + write_lock(&dm_verity_tree_lock); + clear_code_segment_info_cache(&dm_verity_tree, &dm_verity_node_count); + write_unlock(&dm_verity_tree_lock); + return; + } + + write_lock(&fs_verity_tree_lock); + clear_code_segment_info_cache(&fs_verity_tree, &fs_verity_node_count); + write_unlock(&fs_verity_tree_lock); +} + +static int get_elf_code_segment_info(struct file *file, bool is_exec, int type, + struct exec_file_signature_info **code_segment_info) +{ + int ret; + struct rb_root *root; + rwlock_t *verity_lock; + int *node_count; + struct inode *file_node; + struct exec_file_signature_info *new_info; + struct exec_file_signature_info *tmp_info; + + if (type == FILE_SIGNATURE_DM_VERITY) { + root = &dm_verity_tree; + verity_lock = &dm_verity_tree_lock; + node_count = &dm_verity_node_count; + } else if (type == FILE_SIGNATURE_FS_VERITY) { + verity_lock = &fs_verity_tree_lock; + root = &fs_verity_tree; + node_count = &fs_verity_node_count; + } else { + return -EINVAL; + } + + file_node = file_inode(file); + if (file_node == NULL) + return -EINVAL; + + read_lock(verity_lock); + tmp_info = rb_search_node(root, (uintptr_t)file_node); + read_unlock(verity_lock); + if (tmp_info != NULL) { + if (is_exec && tmp_info->code_segments == NULL) + goto need_parse; + + *code_segment_info = tmp_info; + return 0; + } + +need_parse: + rm_code_segment_info(); + + if (!is_exec) { + new_info = (struct exec_file_signature_info *)kzalloc(sizeof(struct exec_file_signature_info), GFP_KERNEL); + if (new_info == NULL) + return -ENOMEM; + } else { + ret = parse_elf_code_segment_info(file, &new_info); + if (ret < 0) + return ret; + } + + new_info->type = type; + new_info->inode = (uintptr_t)file_node; + RB_CLEAR_NODE(&new_info->rb_node); + if (tmp_info != NULL) { + write_lock(verity_lock); + rb_erase_node(root, node_count, tmp_info); + tmp_info->type |= FILE_SIGNATURE_DELETE; + write_unlock(verity_lock); + if (atomic_sub_return(1, &tmp_info->reference) <= 0) + kfree(tmp_info); + } + + write_lock(verity_lock); + tmp_info = rb_add_node(root, node_count, new_info); + write_unlock(verity_lock); + if (tmp_info != NULL) { + kfree(new_info); + new_info = tmp_info; + } + *code_segment_info = new_info; + return 0; +} + +int get_exec_file_signature_info(struct file *file, bool is_exec, + struct exec_file_signature_info **info_ptr) +{ + int type; + + if (file == NULL || info_ptr == NULL) + return -EINVAL; + + type = check_exec_file_is_verity(file); + return get_elf_code_segment_info(file, is_exec, type, info_ptr); +} + +int put_exec_file_signature_info(struct exec_file_signature_info *exec_info) +{ + if ((exec_info == NULL) || + !exec_file_signature_is_verity(exec_info)) + return -EINVAL; + + if (atomic_sub_return(1, &exec_info->reference) <= 0 && + exec_file_signature_is_delete(exec_info)) + kfree(exec_info); + return 0; +} + +static struct exec_file_signature_info *elf_code_segment_info_delete(struct rb_root *root, + int *node_count, struct inode *file_node) +{ + struct exec_file_signature_info *signature_info; + + signature_info = rb_search_node(root, (uintptr_t)file_node); + if (signature_info != NULL) { + rb_erase_node(root, node_count, signature_info); + if (atomic_sub_return(1, &signature_info->reference) > 0) + signature_info->type |= FILE_SIGNATURE_DELETE; + else + kfree(signature_info); + } + return signature_info; +} + +void delete_exec_file_signature_info(struct inode *file_node) +{ + struct exec_file_signature_info *signature_info; + + if (file_node == NULL) + return; + + write_lock(&fs_verity_tree_lock); + signature_info = elf_code_segment_info_delete(&fs_verity_tree, &fs_verity_node_count, file_node); + write_unlock(&fs_verity_tree_lock); + if (signature_info != NULL) + return; + + write_lock(&dm_verity_tree_lock); + signature_info = elf_code_segment_info_delete(&dm_verity_tree, &dm_verity_node_count, file_node); + write_unlock(&dm_verity_tree_lock); +} + +#ifdef CONFIG_XPM_DEBUG +int test_delete_elf_code_segment_info(struct exec_file_signature_info *code_segment_info) +{ + struct rb_root *root; + rwlock_t *verity_lock; + int *node_count; + struct exec_file_signature_info *segment_info = NULL; + + if (code_segment_info == NULL) + return -EINVAL; + + if (exec_file_signature_is_dm_verity(code_segment_info)) { + root = &dm_verity_tree; + verity_lock = &dm_verity_tree_lock; + node_count = &dm_verity_node_count; + } else if (exec_file_signature_is_fs_verity(code_segment_info)) { + verity_lock = &fs_verity_tree_lock; + root = &fs_verity_tree; + node_count = &fs_verity_node_count; + } else { + return -EINVAL; + } + + write_lock(verity_lock); + segment_info = rb_search_node(root, code_segment_info->inode); + if (segment_info == NULL) { + write_unlock(verity_lock); + return -EINVAL; + } + rb_erase_node(root, node_count, code_segment_info); + write_unlock(verity_lock); + kfree(code_segment_info); + return 0; +} + +static int test_destroy_elf_code_segment_tree(struct rb_root *root, int *node_count) +{ + struct rb_node *node; + struct exec_file_signature_info *file_node; + + do { + node = rb_first(root); + if (node == NULL) + return 0; + file_node = rb_entry(node, struct exec_file_signature_info, rb_node); + if (atomic_read(&file_node->reference) > 0) + return -EPERM; + rb_erase_node(root, node_count, file_node); + } while (1); + return 0; +} + +int test_destroy_elf_code_segment_info_cache(void) +{ + int ret; + int count = 0; + + write_lock(&dm_verity_tree_lock); + count += dm_verity_node_count; + ret = test_destroy_elf_code_segment_tree(&dm_verity_tree, &dm_verity_node_count); + write_unlock(&dm_verity_tree_lock); + if (ret < 0) + return ret; + + write_lock(&fs_verity_tree_lock); + count += fs_verity_node_count; + ret = test_destroy_elf_code_segment_tree(&fs_verity_tree, &fs_verity_node_count); + write_unlock(&fs_verity_tree_lock); + if (ret < 0) + return ret; + return count; +} + +void test_rm_elf_code_segment_info_cache(void) +{ + if (dm_verity_node_count > fs_verity_node_count) { + write_lock(&dm_verity_tree_lock); + clear_code_segment_info_cache(&dm_verity_tree, &dm_verity_node_count); + write_unlock(&dm_verity_tree_lock); + return; + } + + write_lock(&fs_verity_tree_lock); + clear_code_segment_info_cache(&fs_verity_tree, &fs_verity_node_count); + write_unlock(&fs_verity_tree_lock); +} + +static size_t test_elf_code_segment_info_size(struct rb_root *root) +{ + size_t size = 0; + struct exec_file_signature_info *file_node; + struct rb_node *node; + + for (node = rb_first(root); node != NULL; node = rb_next(node)) { + file_node = rb_entry(node, struct exec_file_signature_info, rb_node); + size += sizeof(struct exec_file_signature_info) + + file_node->code_segment_count * sizeof(struct exec_segment_info); + } + return size; +} + +void test_get_elf_code_segment_info_cache_size(void) +{ + size_t cache_size = 0; + int count = 0; + + read_lock(&dm_verity_tree_lock); + cache_size += test_elf_code_segment_info_size(&dm_verity_tree); + count += dm_verity_node_count; + read_unlock(&dm_verity_tree_lock); + + read_lock(&fs_verity_tree_lock); + cache_size += test_elf_code_segment_info_size(&fs_verity_tree); + count += fs_verity_node_count; + read_unlock(&fs_verity_tree_lock); + + pr_info("[exec signature cache] count=%d, cache size=%d KB\n", count, cache_size / 1024); +} + +void test_print_elf_code_segment_info(const char *file_path, + const struct exec_file_signature_info *file_info) +{ + int i; + + for (i = 0; i < file_info->code_segment_count; i++) { + pr_info("%s -> offset: 0x%llx size: 0x%lx\n", + file_path, file_info->code_segments->file_offset, file_info->code_segments->size); + } +} +#endif diff --git a/security/xpm/validator/exec_signature_info.h b/security/xpm/validator/exec_signature_info.h new file mode 100644 index 0000000000000000000000000000000000000000..d6545073a1091908c2e72dd3de0b9f77ec5ea5e1 --- /dev/null +++ b/security/xpm/validator/exec_signature_info.h @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + */ +#ifndef _EXEC_SIGNATURE_INFO_H +#define _EXEC_SIGNATURE_INFO_H + +#include +#include +#include +#include + +struct exec_segment_info { + uintptr_t file_offset; + size_t size; +}; + +#define FILE_SIGNATURE_INVALID 0 +#define FILE_SIGNATURE_FS_VERITY 1 +#define FILE_SIGNATURE_DM_VERITY 2 +#define FILE_SIGNATURE_MASK 0x0000000F +#define FILE_SIGNATURE_DELETE 0x80000000 + +struct exec_file_signature_info { + struct rb_node rb_node; + atomic_t reference; + unsigned int type; + uintptr_t inode; + unsigned int code_segment_count; + struct exec_segment_info *code_segments; +}; + +static inline bool exec_file_signature_is_fs_verity(const struct exec_file_signature_info *signature_info) +{ + return (signature_info->type & FILE_SIGNATURE_MASK) == FILE_SIGNATURE_FS_VERITY; +} + +static inline bool exec_file_signature_is_dm_verity(const struct exec_file_signature_info *signature_info) +{ + return (signature_info->type & FILE_SIGNATURE_MASK) == FILE_SIGNATURE_DM_VERITY; +} + +static inline bool exec_file_signature_is_verity(const struct exec_file_signature_info *signature_info) +{ + return (signature_info->type & FILE_SIGNATURE_MASK) == FILE_SIGNATURE_DM_VERITY || + (signature_info->type & FILE_SIGNATURE_MASK) == FILE_SIGNATURE_FS_VERITY; +} + +static inline bool exec_file_signature_is_delete(const struct exec_file_signature_info *signature_info) +{ + return !!(signature_info->type & FILE_SIGNATURE_DELETE); +} + +int parse_elf_code_segment_info(struct file *file, struct exec_file_signature_info **code_segment_info); +int get_exec_file_signature_info(struct file *file, bool is_exec, struct exec_file_signature_info **info_ptr); +int put_exec_file_signature_info(struct exec_file_signature_info *exec_info); +void delete_exec_file_signature_info(struct inode *file_node); +#endif