diff --git a/dynamic_jump/Makefile b/dynamic_jump/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..294ca5d20bd975b9a8170ef199e46e52adaa6685 --- /dev/null +++ b/dynamic_jump/Makefile @@ -0,0 +1,10 @@ +PWD := $(shell pwd) +KERNELRELEASE=$(shell uname -r) +MODNAME=dynamic_jump_example +KERNEL_BUILD_PATH := /usr/src/kernels/$(KERNELRELEASE)/ +obj-m += $(MODNAME).o +ccflags-y += -I$(PWD)/../include +all: + make -C $(KERNEL_BUILD_PATH) M=$(PWD) modules +clean: + make -C $(KERNEL_BUILD_PATH) M=$(PWD) clean diff --git a/dynamic_jump/README.md b/dynamic_jump/README.md new file mode 100644 index 0000000000000000000000000000000000000000..50b3ebcbac8a202028faa8343ff8fa972bf609a6 --- /dev/null +++ b/dynamic_jump/README.md @@ -0,0 +1,72 @@ +# Dynamic jump +## What is dynamic jump? +In pratice, we can use livepatch for linux kernel debuging or bug fixing. However, live-patching mechanism only effected on the ftraceable function. If we want to debug or fix the un-ftraceable function, maybe we can have nothing to do with it. +Therefore, dynamic jump function provided a method to replace the un-ftraceable function which can not use live-patching mechanism. With dynammic jump, we can not only debug the un-ftraceable function, but also replace the un-ftraceable function. + +## Principle +Dynamic jump mainly depends on the newly-written function of the target function. We rewrite the target function with debug information or bugfix in it. Then, we build it base on the same gcc version to make sure the optimize of the function will be the same principle. At last, we replace the old function with the new one. +We force to jump from the start of the old function to the new one, thanks to the nop instruction at the front of the function. + +## How to jump? +In order to tell you how to jump, you can see the example under this folder called `dynamic_jump_no_exported_symbol.c` and `dynamic_jump_example.c` + +### dynamic_jump_no_exported_symbol.c +This example mainly tells you how to jump facing the function that is not definded as 'EXPORT_SYMBOL' or under linux upstream kernel, the 'kall_syms_lookup_name' is not exported which can not be used as oot (Out-Of-Tree) module. + +Here, I will tell how to jump with this example. Please follow the code of `dynamic_jump_no_exported_symbol.c` +In this case, we are going to replace the function of `apply_relocate_add`, which is not a suitable for livepatching because this function is on the path of livepatching. + +#### Step 1 +Make sure which function you are going to jump. In this case, we will jump the function `apply_relocate_add`. +Rewrite the new function in the way you want, and rename it as `new_##function_name`. Here, we rewrite `apply_relocate_add` as `new_apply_relocate_add`. + +Ready module init and exit function. In this case, these function is named as `apply_relocate_add_init` and `apply_relocate_add_exit`. + +##### apply_relocate_add_init +This is the init function of jump code module. + +1. We called lookup_sym() to lookup the original functions' location. +The macros `SYM_SET_ORIG_FUNCTION_ADDRESS(function_name, location)` is used to set the function_name with the given location. +In this case, function `apply_relocate_add` is not exported, and `kall_syms_lookup_name` is not exported, we can not lookup the target function's location with `kall_syms_lookup_name`. + +How can we the target function's location? There are tow ways I can find:(1) grep the target function from /proc/kallsyms;(2) Use crash to debug /proc/kcore with vmlinux and find the location of the target function. + +There is one thing you should keep in mind, before you use `SYM_SET_ORIG_FUNCTION_ADDRESS(function_name, location)` to init the target function's location, you should define it first, or gcc will raise an error. + +#### Step 2 +Find the function of what you need. +In the example, you can see may function written such as `do_reloc`, `reloc_insn_imm`, etc. This is because these functions are defined as `static` function in kernel. We can not grep its location from /proc/kallsyms. Static function can not be seen outsize the function. So, to use these functions, we just need to copy realization from the source file, without any changes. + +However, there are still many function you need to call in the process. To fullfill them, some function we can find in the system. For example, `aarch64_insn_patch_text` and `aarch64_insn_encode_immediate`. We just need to define it with the start of `orig_##function_name`, the defination will handle autmaticlly by 'dynamic_jump.h'. + +Using `orig_##function_name` to define the original function, then, call `SYM_SET_ORIG_FUNCTION_ADDRESS` macros to init it , and you will be free to use it in your module. + +What's more, you need to use `DEFINE_ORIG_FUNC(rt, function_name, parameter_numbers, ...)` to define your target function. + +#### Step 3 +Jump to the target function. + +Now, you are ready to jump to the target. +Use `DEFINE_SYSFS_BASE();` at before the original function defination, to get sysfs proc interface ready for your patch module. +Then, use `JUMP_INIT(function_name)` to do init things before we jump function `function_name`. + +##### try_enable_patch +This function should handle may exception in the process of jumping to the new function. I just simplify the code here, not consider too much safety. +`JUMP_INSTALL(function_name)` execute the jump to the `new_function_name`. What we do here is to rewrite the nop instruction manually, make it froce to jump +to the new function. + +After try_enable_patch, this jump is finished. + +##### JUMP_REMOVE +At the process of function `apply_relocate_add_exit`, this function is called at the process of uninstall the module. +Here we use macros `JUMP_REMOVE` to remove the jump code and restore everything. + +This is the process of how to jump the code manually without the help of livepatching. + +### dynamic_jump_example.c +This example show you how to jump a un-ftraceable function when 'kall_syms_lookup_name' is exported and can be called by oot module. + +`I will provide it in the future...` + +## Effect +The un-ftraceable function can be replaced, the bug can be fixed or the debug information can output to see. diff --git a/dynamic_jump/dynamic_jump.h b/dynamic_jump/dynamic_jump.h new file mode 100644 index 0000000000000000000000000000000000000000..078cbbd7664cdf0f43049afe4227f7ca924bb6c8 --- /dev/null +++ b/dynamic_jump/dynamic_jump.h @@ -0,0 +1,281 @@ +#ifndef DYNAMIC_JUMP_H +#define DYNAMIC_JUMP_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "symbol.h" + +#define MAX_FUNC_COUNT 16 +#define FUNC_NAME_LENGTH 255 + +/* + * DEFINE_SYSFS_BASE + * define the base sysfs kobject + * for each patch function should have a + * node record in the sysfs + */ +#define DEFINE_SYSFS_BASE() \ + static struct kobject *patch_root_kobj; \ + static struct kobject *patch_funcs_kobj[MAX_FUNC_COUNT]; \ + static int patch_funcs_kobj_cnt\ + +/* + * INIT_SYSFS_BASE + * before excatually patch a function, + * create the patch function's kobject node + */ +#define INIT_SYSFS_BASE() do { \ + char name[FUNC_NAME_LENGTH]; \ + snprintf(name, FUNC_NAME_LENGTH, "manual_%s", THIS_MODULE->name); \ + if (patch_root_kobj) { printk("patch object root kobject exist.\n"); break; } \ + patch_root_kobj = kobject_create_and_add(name, kernel_kobj); \ + if (!patch_root_kobj) return -ENOMEM; \ + patch_funcs_kobj_cnt = 0; \ + memset(patch_funcs_kobj, 0, sizeof(patch_funcs_kobj)); \ +} while(0) + +/* + * + * LOOKUP_SYM_CONV_TO_NAME + * lookup a symbol by kallsyms_lookup_name + * and use as orig_{func_name} as conversion + */ +#define LOOKUP_SYM_CONV_TO_NAME(name, sym) do { \ + orig_##name = (void *)kallsyms_lookup_name(sym); \ + if (!orig_##name) { \ + pr_err("kallsyms_lookup_name: %s\n", #name); \ + return -EINVAL; \ + } \ +} while(0) + +/* + * LOOKUP_SYMS + * + * lookup symbol + */ +#define LOOKUP_SYMS(name) LOOKUP_SYM_CONV_TO_NAME(name, #name) +#define LOOKUP_SYMS_RETURN LOOKUP_SYM_CONV_TO_NAME(name, #name) + +/* + * ORIG_FUNC + * + * define the original function + * before calling or using it in the file + */ +#define ORIG_FUNC(rt, name, x , ...) \ + rt (*orig_##name)(__MAP(x, __SC_DECL, __VA_ARGS__)); + +/* + * ORIG_NOINPUT_FUNC + * + * define the original function with no + * input parameter, befor calling or + * using it in the file + */ +#define ORIG_NOINPU_FUNC(rt, name) \ + rt (*orig_##name)(void); + +/* + * SYM_SET_ORIG_FUNCTION_ADDRESS + * + * to handle the situation of patching + * the unexported function. Force to + * jump by setting the function address + * manually. + */ +#define SYM_SET_ORIG_FUNCTION_ADDRESS(func_name, addr) \ + orig_##func_name = (void *)addr + +#if CONFIG_ARM64 + +#define RELATIVEJUMP_SIZE 8 + +#define JUMP_INIT(func) do { \ + INIT_SYSFS_BASE(); \ + unsigned long long addr = (unsigned long long)&new_##func; \ + unsigned long long orig_addr = (unsigned long long)orig_##func; \ + int size = 0; \ + /* stp x29, x30, [sp,#-16]! */ \ + e9_##func[0] = 0xa9bf7bfdu; \ + /* mov x29, #0x0 */ \ + e9_##func[1] = 0xd280001du | ((addr & 0xffff) << 5); \ + /* movk x29, #0x0, lsl #16 */ \ + e9_##func[2] = 0xf2a0001du | (((addr & 0xffff0000) >> 16) << 5); \ + /* movk x29, #0x0, lsl #32 */ \ + e9_##func[3] = 0xf2c0001du | (((addr & 0xffff00000000) >> 32) << 5); \ + /* movk x29, #0x0, lsl #48 */ \ + e9_##func[4] = 0xf2e0001du | (((addr & 0xffff000000000000) >> 48) << 5); \ + /* blr x29 */ \ + e9_##func[5] = 0xd63f03a0u; \ + /* ldp x29, x30, [sp],#16 */ \ + e9_##func[6] = 0xa8c17bfdu; \ + /* ret */ \ + e9_##func[7] = 0xd65f03c0u; \ + for (; size < RELATIVEJUMP_SIZE; size++) {\ + addr_##func[size] = orig_addr + size*4;\ + } \ + } while (0) + +#define JUMP_INSTALL(func) do { \ + memcpy(inst_##func, orig_##func, sizeof(inst_##func)); \ + orig_aarch64_insn_patch_text((void **)addr_##func, (u32 *)e9_##func, RELATIVEJUMP_SIZE); \ + patch_funcs_kobj_##func = kobject_create_and_add(#func, patch_root_kobj); \ + if (!patch_funcs_kobj_##func) { \ + int i = 0; \ + for (i = 0; i < patch_funcs_kobj_cnt; i++) { \ + kobject_put(patch_funcs_kobj[i]); \ + } \ + kobject_put(patch_root_kobj); \ + printk(KERN_ERR "create patch_funcs_kobj_" #func "failed\n"); \ + } \ + patch_funcs_kobj[patch_funcs_kobj_cnt++] = patch_funcs_kobj_##func; \ +} while (0) + +#define JUMP_REMOVE_SYM(func) do { \ + if (patch_funcs_kobj_cnt > 0 ) \ + patch_funcs_kobj[patch_funcs_kobj_cnt--] = NULL; \ + if (0 == patch_funcs_kobj_cnt) \ + kobject_put(patch_root_kobj); \ +}while(0) + +#define JUMP_REMOVE(func) do{ \ + orig_aarch64_insn_patch_text((void **)addr_##func, (u32 *)inst_##func, RELATIVEJUMP_SIZE); \ + kobject_put(patch_funcs_kobj_##func); \ + patch_funcs_kobj[patch_funcs_kobj_cnt--] = NULL; \ + if (0 == patch_funcs_kobj_cnt) \ + kobject_put(patch_root_kobj); \ +}while(0) + +#define DEFINE_ORIG_FUNC(rt, name, x, ...) \ + static unsigned int e9_##name[RELATIVEJUMP_SIZE]; \ + static void *addr_##name[RELATIVEJUMP_SIZE]; \ + static unsigned int inst_##name[RELATIVEJUMP_SIZE]; \ + static rt new_##name(__MAP(x, __SC_DECL, __VA_ARGS__)); \ + static rt (*orig_##name)(__MAP(x, __SC_DECL, __VA_ARGS__)); \ + static struct kobject *patch_funcs_kobj_##name + +#else + +/* X86_64 or other arch*/ +#define RELATIVEJUMP_SIZE 5 + +#define JUMP_INIT(func) do { \ + INIT_SYSFS_BASE(); \ + e9_##func[0] = 0xe9; \ + (*(int *)(&e9_##func[1])) = (long)new_##func - (long) orig_##func - RELATIVEJUMP_SIZE; \ +} while (0) + + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) +extern void *(*orig_text_poke_smp)(void *, const void *, size_t); + +#define JUMP_INSTALL(func) do { \ + memcpy(inst_##func, orig_##func, RELATIVEJUMP_SIZE); \ + orig_text_poke_smp(orig_##func, e9_##func, RELATIVEJUMP_SIZE); \ + patch_funcs_kobj_##func = kobject_create_and_add(#func, patch_root_kobj); \ + if (!patch_funcs_kobj_##func) { \ + int i = 0; \ + for (i = 0; i < patch_funcs_kobj_cnt; i++) { \ + kobject_put(patch_funcs_kobj[i]); \ + } \ + kobject_put(patch_root_kobj); \ + printk(KERN_ERR "create patch_funcs_kobj_" #func "failed\n"); \ + } \ + patch_funcs_kobj[patch_funcs_kobj_cnt++] = patch_funcs_kobj_##func; \ +} while (0) + +#define JUMP_REMOVE(func) do { \ + orig_text_poke_smp(orig_##func, inst_##func, RELATIVEJUMP_SIZE); \ + kobject_put(patch_funcs_kobj_##func); \ + patch_funcs_kobj[patch_funcs_kobj_cnt--] = NULL; \ + if (0 == patch_funcs_kobj_cnt) \ + kobject_put(patch_root_kobj); \ +}while(0); + +#else +/*LINUX_VERSION_CODE > 3.12*/ +extern void *(*orig_text_poke_bp)(void *addr, const void *opcode, size_t len, void *handler); + +#define JUMP_INSTALL(func) do { \ + memcpy(inst_##func, orig_##func, RELATIVEJUMP_SIZE); \ + orig_text_poke_bp(orig_##func, e9_##func, \ + RELATIVEJUMP_SIZE, new_##func); \ + patch_funcs_kobj_##func = kobject_create_and_add(#func, patch_root_kobj); \ + if (!patch_funcs_kobj_##func) { \ + int i = 0; \ + for (i = 0; i < patch_funcs_kobj_cnt; i++) { \ + kobject_put(patch_funcs_kobj[i]); \ + } \ + kobject_put(patch_root_kobj); \ + printk(KERN_ERR "create patch_funcs_kobj_" #func "failed\n"); \ + } \ + patch_funcs_kobj[patch_funcs_kobj_cnt++] = patch_funcs_kobj_##func; \ +} while (0) + +#define JUMP_REMOVE(func) do { \ + orig_text_poke_bp(orig_##func, inst_##func, RELATIVEJUMP_SIZE, new_##func); \ + kobject_put(patch_funcs_kobj_##func); \ + patch_funcs_kobj[patch_funcs_kobj_cnt--] = NULL; \ + if (0 == patch_funcs_kobj_cnt) \ + kobject_put(patch_root_kobj); \ +}while(0) + +#endif + +#define DEFINE_ORIG_FUNC(rt, name, x, ...) \ + unsigned char e9_##name[RELATIVEJUMP_SIZE]; \ + unsigned char inst_##name[RELATIVEJUMP_SIZE]; \ + extern rt new_##name(__MAP(x, __SC_DECL, __VA_ARGS__)); \ + rt (*orig_##name)(__MAP(x, __SC_DECL, __VA_ARGS__)); \ + static struct kobject *patch_funcs_kobj_##name +#endif //CONFIG_ARM64 + +#define DEFINE_ORIG_FUNC0(rt, name) \ + unsigned char e9_##name[RELATIVEJUMP_SIZE]; \ + unsigned char inst_##name[RELATIVEJUMP_SIZE]; \ + extern rt new_##name(void); \ + rt (*orig_##name)(void); \ + static struct kobject *patch_funcs_kobj_##name + +#define DEFINE_ORIG_NOINPUT_FUNC(rt, name) \ + unsigned char e9_##name[RELATIVEJUMP_SIZE]; \ + unsigned char inst_##name[RELATIVEJUMP_SIZE]; \ + extern rt new_##name(void); \ + rt (*orig_##name)(void); \ + static struct kobject *patch_funcs_kobj_##name +#endif + +#define BACKTRACE_DEPTH 50 + +#define TRACE_DUMP_STACK() \ + do { \ + unsigned long trace_buf[BACKTRACE_DEPTH]; \ + \ + ali_print_stack_trace(current, trace_buf); \ + } while (0) + +struct proc_dir_entry; +static inline struct proc_dir_entry *ali_proc_mkdir(const char *name, + struct proc_dir_entry *parent) +{ + struct proc_dir_entry *ret = NULL; + struct file *file; + char full_name[FUNC_NAME_LENGTH]; + + snprintf(full_name, FUNC_NAME_LENGTH, "/proc/%s", name); + file = filp_open(full_name, O_RDONLY, 0); + if (IS_ERR(file)) { + ret = proc_mkdir(name, parent); + } else { + fput(file); + } + + return ret; +} diff --git a/dynamic_jump/dynamic_jump_example.c b/dynamic_jump/dynamic_jump_example.c new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dynamic_jump/dynamic_jump_no_exported_symbol.c b/dynamic_jump/dynamic_jump_no_exported_symbol.c new file mode 100644 index 0000000000000000000000000000000000000000..d29d82d6d519a107a0a8bb1bdd65eaad5e051c59 --- /dev/null +++ b/dynamic_jump/dynamic_jump_no_exported_symbol.c @@ -0,0 +1,536 @@ +/* + * Alibaba's ftrace_make_call bug fixing hotfix + * + * Copyright (C) 2023 Alibaba Ltd. + * + * Author: yongdezhang + * + * License terms: GNU General Public License (GPL) version 2 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "hotfix_util.h" + +enum aarch64_reloc_op { + RELOC_OP_NONE, + RELOC_OP_ABS, + RELOC_OP_PREL, + RELOC_OP_PAGE, +}; + +enum aarch64_insn_movw_imm_type { + AARCH64_INSN_IMM_MOVNZ, + AARCH64_INSN_IMM_MOVKZ, +}; + +DEFINE_SYSFS_BASE(); +__maybe_unused static atomic_t ali_nr_running = ATOMIC_INIT(0); +int (*orig_aarch64_insn_patch_text)(void **addr, u32 *insn, int cnt); +u64 (*orig_module_emit_veneer_for_adrp)(struct module *mod, Elf64_Shdr *sechdrs, + void *loc, u64 val); +u32 (*orig_aarch64_insn_encode_immediate)(enum aarch64_insn_imm_type type, + u32 insn, u64 imm); +u64 (*orig_module_emit_plt_entry)(struct module *mod, Elf64_Shdr *sechdrs, + void *loc, const Elf64_Rela *rela, + Elf64_Sym *sym); +u32 (*orig_aarch64_insn_gen_branch_imm)(unsigned long pc, unsigned long addr, + enum aarch64_insn_branch_type type); +DEFINE_ORIG_FUNC(int, apply_relocate_add, 5, Elf64_Shdr *, sechdrs, const char *, strtab, + unsigned int, symindex, unsigned int, relsec, struct module *, me); + +#define MAX_STACK_ENTRIES 100 +#define STACK_ERR_BUF_SIZE 128 + +static unsigned long old_func_addr; +static unsigned long old_func_size; + +static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val) +{ + switch (reloc_op) { + case RELOC_OP_ABS: + return val; + case RELOC_OP_PREL: + return val - (u64)place; + case RELOC_OP_PAGE: + return (val & ~0xfff) - ((u64)place & ~0xfff); + case RELOC_OP_NONE: + return 0; + } + + pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); + return 0; +} + +static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, + int lsb, int len, enum aarch64_insn_imm_type imm_type) +{ + u64 imm, imm_mask; + s64 sval; + u32 insn = le32_to_cpu(*place); + + /* Calculate the relocation value. */ + sval = do_reloc(op, place, val); + sval >>= lsb; + + /* Extract the value bits and shift them to bit 0. */ + imm_mask = (BIT(lsb + len) - 1) >> lsb; + imm = sval & imm_mask; + + /* Update the instruction's immediate field. */ + insn = orig_aarch64_insn_encode_immediate(imm_type, insn, imm); + + *place = cpu_to_le32(insn); + + /* + * Extract the upper value bits (including the sign bit) and + * shift them to bit 0. + */ + sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); + + /* + * Overflow has occurred if the upper bits are not all equal to + * the sign bit of the value. + */ + if ((u64)(sval + 1) >= 2) + return -ERANGE; + + return 0; +} + +static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, + __le32 *place, u64 val) +{ + u32 insn; + + if (!is_forbidden_offset_for_adrp(place)) + return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21, + AARCH64_INSN_IMM_ADR); + + /* patch ADRP to ADR if it is in range */ + if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21, + AARCH64_INSN_IMM_ADR)) { + insn = le32_to_cpu(*place); + insn &= ~BIT(31); + } else { + /* out of range for ADR -> emit a veneer */ + val = orig_module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff); + if (!val) + return -ENOEXEC; + insn = orig_aarch64_insn_gen_branch_imm((u64)place, val, + AARCH64_INSN_BRANCH_NOLINK); + } + + *place = cpu_to_le32(insn); + return 0; +} + +static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val, + int lsb, enum aarch64_insn_movw_imm_type imm_type) +{ + u64 imm; + s64 sval; + u32 insn = le32_to_cpu(*place); + + sval = do_reloc(op, place, val); + imm = sval >> lsb; + + if (imm_type == AARCH64_INSN_IMM_MOVNZ) { + /* + * For signed MOVW relocations, we have to manipulate the + * instruction encoding depending on whether or not the + * immediate is less than zero. + */ + insn &= ~(3 << 29); + if (sval >= 0) { + /* >=0: Set the instruction to MOVZ (opcode 10b). */ + insn |= 2 << 29; + } else { + /* + * <0: Set the instruction to MOVN (opcode 00b). + * Since we've masked the opcode already, we + * don't need to do anything other than + * inverting the new immediate field. + */ + imm = ~imm; + } + } + + /* Update the instruction with the new encoding. */ + insn = orig_aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); + *place = cpu_to_le32(insn); + + if (imm > U16_MAX) + return -ERANGE; + + return 0; +} + +static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) +{ + s64 sval = do_reloc(op, place, val); + + /* + * The ELF psABI for AArch64 documents the 16-bit and 32-bit place + * relative and absolute relocations as having a range of [-2^15, 2^16) + * or [-2^31, 2^32), respectively. However, in order to be able to + * detect overflows reliably, we have to choose whether we interpret + * such quantities as signed or as unsigned, and stick with it. + * The way we organize our address space requires a signed + * interpretation of 32-bit relative references, so let's use that + * for all R_AARCH64_PRELxx relocations. This means our upper + * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX. + */ + + switch (len) { + case 16: + *(s16 *)place = sval; + switch (op) { + case RELOC_OP_ABS: + if (sval < 0 || sval > U16_MAX) + return -ERANGE; + break; + case RELOC_OP_PREL: + if (sval < S16_MIN || sval > S16_MAX) + return -ERANGE; + break; + default: + pr_err("Invalid 16-bit data relocation (%d)\n", op); + return 0; + } + break; + case 32: + *(s32 *)place = sval; + switch (op) { + case RELOC_OP_ABS: + if (sval < 0 || sval > U32_MAX) + return -ERANGE; + break; + case RELOC_OP_PREL: + if (sval < S32_MIN || sval > S32_MAX) + if (sval < S32_MIN || sval > S32_MAX) + return -ERANGE; + break; + default: + pr_err("Invalid 32-bit data relocation (%d)\n", op); + return 0; + } + break; + case 64: + *(s64 *)place = sval; + break; + default: + pr_err("Invalid length (%d) for data relocation\n", len); + return 0; + } + return 0; +} + + + +static int try_enable_patch(void *data){ + int ret = 0; + + JUMP_INSTALL(apply_relocate_add); +out: + return ret; +} + +int new_apply_relocate_add(Elf64_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + atomic_inc(&ali_nr_running); + + unsigned int i; + int ovf; + bool overflow_check; + Elf64_Sym *sym; + void *loc; + u64 val; + Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; + printk("new_apply_relocate_add\n"); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* loc corresponds to P in the AArch64 ELF document. */ + loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + + /* sym is the ELF symbol we're referring to. */ + sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + + ELF64_R_SYM(rel[i].r_info); + + /* val corresponds to (S + A) in the AArch64 ELF document. */ + val = sym->st_value + rel[i].r_addend; + + /* Check for overflow by default. */ + overflow_check = true; + + /* Perform the static relocation. */ + switch (ELF64_R_TYPE(rel[i].r_info)) { + /* Null relocations. */ + case R_ARM_NONE: + case R_AARCH64_NONE: + ovf = 0; + break; + + /* Data relocations. */ + case R_AARCH64_ABS64: + overflow_check = false; + ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); + break; + case R_AARCH64_ABS32: + ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); + break; + case R_AARCH64_ABS16: + ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); + break; + case R_AARCH64_PREL64: + overflow_check = false; + ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); + break; + case R_AARCH64_PREL32: + ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); + break; + case R_AARCH64_PREL16: + ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); + break; + + /* MOVW instruction relocations. */ + case R_AARCH64_MOVW_UABS_G0_NC: + overflow_check = false; + fallthrough; + case R_AARCH64_MOVW_UABS_G0: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_UABS_G1_NC: + overflow_check = false; + fallthrough; + case R_AARCH64_MOVW_UABS_G1: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_UABS_G2_NC: + overflow_check = false; + fallthrough; + case R_AARCH64_MOVW_UABS_G2: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_UABS_G3: + /* We're using the top bits so we can't overflow. */ + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_SABS_G0: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_SABS_G1: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_SABS_G2: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G0_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_PREL_G0: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G1_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_PREL_G1: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G2_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, + AARCH64_INSN_IMM_MOVKZ); + break; + case R_AARCH64_MOVW_PREL_G2: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, + AARCH64_INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G3: + /* We're using the top bits so we can't overflow. */ + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, + AARCH64_INSN_IMM_MOVNZ); + break; + + /* Immediate instruction relocations. */ + case R_AARCH64_LD_PREL_LO19: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, + AARCH64_INSN_IMM_19); + break; + case R_AARCH64_ADR_PREL_LO21: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, + AARCH64_INSN_IMM_ADR); + break; + case R_AARCH64_ADR_PREL_PG_HI21_NC: + overflow_check = false; + fallthrough; + case R_AARCH64_ADR_PREL_PG_HI21: + ovf = reloc_insn_adrp(me, sechdrs, loc, val); + if (ovf && ovf != -ERANGE) + return ovf; + break; + case R_AARCH64_ADD_ABS_LO12_NC: + case R_AARCH64_LDST8_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_LDST16_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_LDST32_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_LDST64_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_LDST128_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, + AARCH64_INSN_IMM_12); + break; + case R_AARCH64_TSTBR14: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, + AARCH64_INSN_IMM_14); + break; + case R_AARCH64_CONDBR19: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, + AARCH64_INSN_IMM_19); + break; + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, + AARCH64_INSN_IMM_26); + if (ovf == -ERANGE) { + val = orig_module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym); + if (!val) + return -ENOEXEC; + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, + 26, AARCH64_INSN_IMM_26); + } + break; + + default: + pr_err("module %s: unsupported RELA relocation: %llu\n", + me->name, ELF64_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + + if (overflow_check && ovf == -ERANGE) { + if (overflow_check) + printk("overflow_check!!\n"); + if (ovf == -ERANGE) + printk("OVF!!\n"); + goto overflow; + } + + } + atomic_dec(&ali_nr_running); + return 0; +overflow: + atomic_dec(&ali_nr_running); + pr_err("module %s: overflow in relocation type %d val %Lx\n", + me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); + return -ENOEXEC; +} + +static int lookup_syms(void) +{ + SYM_SET_ORIG_FUNCTION_ADDRESS(apply_relocate_add, 0xffff800080029278); + SYM_SET_ORIG_FUNCTION_ADDRESS(aarch64_insn_patch_text,0xffff800080d15908); + SYM_SET_ORIG_FUNCTION_ADDRESS(aarch64_insn_encode_immediate,0xffff800080d16f60); + SYM_SET_ORIG_FUNCTION_ADDRESS(module_emit_veneer_for_adrp, 0xffff80008002a0e8); + SYM_SET_ORIG_FUNCTION_ADDRESS(aarch64_insn_gen_branch_imm,0xffff800080d17030); + SYM_SET_ORIG_FUNCTION_ADDRESS(module_emit_plt_entry, 0xffff800080029f38); + return 0; +} + +static int apply_relocate_add_init(void) +{ + int ret = 0; + if (lookup_syms()) + return -EINVAL; + JUMP_INIT(apply_relocate_add); + ret = try_enable_patch(NULL); + if (ret){ + pr_err("manal_hotfix for ftrace_location_range execute failed ... \n"); + JUMP_REMOVE_SYM(apply_relocate_add); + return ret; + } + pr_warn("ftrace_location_range load.\n"); + + return 0; +} + +static void apply_relocate_add_exit(void) +{ + JUMP_REMOVE(apply_relocate_add); + do{ + msleep(10); + } while(atomic_read(&ali_nr_running) > 0); + pr_err("arm ftrace plt unload.\n"); +} + +module_init(apply_relocate_add_init); +module_exit(apply_relocate_add_exit); + +MODULE_DESCRIPTION("Example of dynamically jump to replace the unexported symbol"); +MODULE_AUTHOR("yongdezhang "); +MODULE_LICENSE("GPL v2"); + diff --git a/dynamic_jump/symbol.h b/dynamic_jump/symbol.h new file mode 100644 index 0000000000000000000000000000000000000000..a171f2221e98863c1c347f3c6199b3d8cd98906b --- /dev/null +++ b/dynamic_jump/symbol.h @@ -0,0 +1,131 @@ +/* + * Linux内核诊断工具--内核态符号表头文件 + * + * Copyright (C) 2020 Alibaba Ltd. + * + * 作者: Baoyou Xie + * + * License terms: GNU General Public License (GPL) version 3 + * + * Link: https://github.com/alibaba/diagnose-tools/blob/master/SOURCE/module/symbol.h + */ + +#ifndef __DIAG_SYMBOL_H +#define __DIAG_SYMBOL_H + +extern unsigned long (*diag_kallsyms_lookup_name)(const char *name); +extern int diag_get_symbol_count(char *symbol); +extern int diag_init_symbol(void); + +struct mutex; +struct stack_trace; +struct pid_namespace; +extern struct mutex *orig_text_mutex; +extern rwlock_t *orig_tasklist_lock; + +extern unsigned long (*diag_kallsyms_lookup_name)(const char *name); + +#if defined(DIAG_ARM64) +extern void (*orig___flush_dcache_area)(void *addr, size_t len); +extern int (*orig_aarch64_insn_patch_text)(void *addrs[], u32 insns[], int cnt); +#else +extern void *(*orig_text_poke_smp)(void *, const void *, size_t); +extern void *(*orig_text_poke_bp)(void *addr, const void *opcode, size_t len, void *handler); +#endif + +extern void (*orig___show_regs)(struct pt_regs *regs, int all); +extern struct list_head *orig_ptype_all; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0) || defined(CENTOS_4_18_193) +extern unsigned int (*orig_stack_trace_save_tsk)(struct task_struct *task, + unsigned long *store, unsigned int size, + unsigned int skipnr); +#ifdef CONFIG_USER_STACKTRACE_SUPPORT +extern unsigned int (*orig_stack_trace_save_user)(unsigned long *store, unsigned int size); +#endif +#else +extern void (*orig_save_stack_trace_tsk)(struct task_struct *tsk, + struct stack_trace *trace); +#ifdef CONFIG_USER_STACKTRACE_SUPPORT +extern void (*orig_save_stack_trace_user)(struct stack_trace *trace); +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) +extern void (*orig___do_page_fault)(struct pt_regs *regs, + unsigned long address, unsigned long error_code); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) +extern void __kprobes +(*orig___do_page_fault)(struct pt_regs *regs, unsigned long error_code); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0) +extern void +(*orig___do_page_fault)(struct pt_regs *regs, unsigned long error_code, + unsigned long address); +#else +extern void +(*orig___do_page_fault)(struct pt_regs *regs, unsigned long error_code, + unsigned long address); +#endif +extern struct task_struct *(*orig_find_task_by_vpid)(pid_t nr); +extern struct task_struct *(*orig_find_task_by_pid_ns)(pid_t nr, struct pid_namespace *ns); +extern struct task_struct *(*orig_idle_task)(int cpu); +struct class; +struct device_type; +extern struct class *orig_block_class; +extern struct device_type *orig_disk_type; +struct gendisk; +extern char *(*orig_disk_name)(struct gendisk *hd, int partno, char *buf); +extern int (*orig_access_remote_vm)(struct mm_struct *mm, unsigned long addr, + void *buf, int len, unsigned int gup_flags); +struct rq; +extern struct rq *orig_runqueues; +struct sched_entity; +extern int (*orig_get_task_type)(struct sched_entity *se); +struct kernfs_node; +extern int (*orig_kernfs_name)(struct kernfs_node *kn, char *buf, size_t buflen); +#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 33) +extern struct page *(*orig_follow_page_mask)(struct vm_area_struct *vma, + unsigned long address, unsigned int foll_flags, + unsigned int *page_mask); +#else +extern struct page *(*orig_follow_page)(struct vm_area_struct *vma, unsigned long address, + unsigned int flags); +#endif + +struct cpuacct; +extern struct cpuacct *orig_root_cpuacct; +struct cgroup_subsys_state; +extern struct cgroup_subsys_state * +(*orig_css_next_descendant_pre)(struct cgroup_subsys_state *pos, + struct cgroup_subsys_state *root); + +struct cgroup_subsys; +extern struct cgroup_subsys *orig_cpuacct_subsys; +extern struct cgroup_subsys_state * +(*orig_css_get_next)(struct cgroup_subsys *ss, int id, + struct cgroup_subsys_state *root, int *foundid); + +struct files_struct; +extern struct files_struct *(*orig_get_files_struct)(struct task_struct *task); +extern void (*orig_put_files_struct)(struct files_struct *files); + +struct dentry; +struct inode; +extern struct dentry * (*orig_d_find_any_alias)(struct inode *inode); +extern int (*orig_task_statm)(struct mm_struct *mm, + unsigned long *shared, unsigned long *text, + unsigned long *data, unsigned long *resident); + +extern unsigned int (*orig_stack_trace_save_tsk)(struct task_struct *task, + unsigned long *store, unsigned int size, + unsigned int skipnr); +extern unsigned int (*orig_stack_trace_save_user)(unsigned long *store, unsigned int size); + +extern int *orig_kptr_restrict; +struct sched_class; +extern struct sched_class *orig_idle_sched_class; +extern struct x86_pmu *orig_x86_pmu; +int alidiagnose_symbols_init(void); +void alidiagnose_symbols_exit(void); + +#endif /* __DIAG_SYMBOL_H */