diff --git a/backport-libbpf-Do-not-resolve-size-on-duplicate-FUNCs.patch b/backport-libbpf-Do-not-resolve-size-on-duplicate-FUNCs.patch new file mode 100644 index 0000000000000000000000000000000000000000..7300737df224f8bd01fe0435fbee5716f6bf9671 --- /dev/null +++ b/backport-libbpf-Do-not-resolve-size-on-duplicate-FUNCs.patch @@ -0,0 +1,33 @@ +From ecf998ed8ff51efd3887ff7caca0a0cc56a88082 Mon Sep 17 00:00:00 2001 +From: Eric Long +Date: Wed, 2 Oct 2024 14:25:06 +0800 +Subject: [PATCH] libbpf: Do not resolve size on duplicate FUNCs + +FUNCs do not have sizes, thus currently btf__resolve_size will fail +with -EINVAL. Add conditions so that we only update size when the BTF +object is not function or function prototype. + +Signed-off-by: Eric Long +Signed-off-by: Andrii Nakryiko +Link: https://lore.kernel.org/bpf/20241002-libbpf-dup-extern-funcs-v4-1-560eb460ff90@hack3r.moe +--- + src/linker.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/src/linker.c b/src/linker.c +index 81dbbdd79..f83c1c299 100644 +--- a/src/linker.c ++++ b/src/linker.c +@@ -2451,6 +2451,10 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj) + if (glob_sym && glob_sym->var_idx >= 0) { + __s64 sz; + ++ /* FUNCs don't have size, nothing to update */ ++ if (btf_is_func(t)) ++ continue; ++ + dst_var = &dst_sec->sec_vars[glob_sym->var_idx]; + /* Because underlying BTF type might have + * changed, so might its size have changed, so + + diff --git a/backport-libbpf-Fix-bpf_object__open_skeleton-s-mishandling-o.patch b/backport-libbpf-Fix-bpf_object__open_skeleton-s-mishandling-o.patch new file mode 100644 index 0000000000000000000000000000000000000000..f437fb985202feb72a2024037a35276d34f0b8c0 --- /dev/null +++ b/backport-libbpf-Fix-bpf_object__open_skeleton-s-mishandling-o.patch @@ -0,0 +1,149 @@ +From f6f24022d3054d2855612e642f8fe9f1148b4275 Mon Sep 17 00:00:00 2001 +From: Andrii Nakryiko +Date: Tue, 27 Aug 2024 13:37:21 -0700 +Subject: [PATCH] libbpf: Fix bpf_object__open_skeleton()'s mishandling of + options +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +We do an ugly copying of options in bpf_object__open_skeleton() just to +be able to set object name from skeleton's recorded name (while still +allowing user to override it through opts->object_name). + +This is not just ugly, but it also is broken due to memcpy() that +doesn't take into account potential skel_opts' and user-provided opts' +sizes differences due to backward and forward compatibility. This leads +to copying over extra bytes and then failing to validate options +properly. It could, technically, lead also to SIGSEGV, if we are unlucky. + +So just get rid of that memory copy completely and instead pass +default object name into bpf_object_open() directly, simplifying all +this significantly. The rule now is that obj_name should be non-NULL for +bpf_object_open() when called with in-memory buffer, so validate that +explicitly as well. + +We adopt bpf_object__open_mem() to this as well and generate default +name (based on buffer memory address and size) outside of bpf_object_open(). + +Fixes: d66562fba1ce ("libbpf: Add BPF object skeleton support") +Reported-by: Daniel Müller +Signed-off-by: Andrii Nakryiko +Signed-off-by: Daniel Borkmann +Reviewed-by: Daniel Müller +Acked-by: Eduard Zingerman +Link: https://lore.kernel.org/bpf/20240827203721.1145494-1-andrii@kernel.org +--- + src/libbpf.c | 52 +++++++++++++++++++--------------------------------- + 1 file changed, 19 insertions(+), 33 deletions(-) + +diff --git a/src/libbpf.c b/src/libbpf.c +index e55353887..d3a542649 100644 +--- a/src/libbpf.c ++++ b/src/libbpf.c +@@ -7905,16 +7905,19 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object + } + + static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz, ++ const char *obj_name, + const struct bpf_object_open_opts *opts) + { +- const char *obj_name, *kconfig, *btf_tmp_path; ++ const char *kconfig, *btf_tmp_path; + struct bpf_object *obj; +- char tmp_name[64]; + int err; + char *log_buf; + size_t log_size; + __u32 log_level; + ++ if (obj_buf && !obj_name) ++ return ERR_PTR(-EINVAL); ++ + if (elf_version(EV_CURRENT) == EV_NONE) { + pr_warn("failed to init libelf for %s\n", + path ? : "(mem buf)"); +@@ -7924,16 +7927,12 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, + if (!OPTS_VALID(opts, bpf_object_open_opts)) + return ERR_PTR(-EINVAL); + +- obj_name = OPTS_GET(opts, object_name, NULL); ++ obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name; + if (obj_buf) { +- if (!obj_name) { +- snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", +- (unsigned long)obj_buf, +- (unsigned long)obj_buf_sz); +- obj_name = tmp_name; +- } + path = obj_name; + pr_debug("loading object '%s' from buffer\n", obj_name); ++ } else { ++ pr_debug("loading object from %s\n", path); + } + + log_buf = OPTS_GET(opts, kernel_log_buf, NULL); +@@ -8017,9 +8016,7 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) + if (!path) + return libbpf_err_ptr(-EINVAL); + +- pr_debug("loading %s\n", path); +- +- return libbpf_ptr(bpf_object_open(path, NULL, 0, opts)); ++ return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts)); + } + + struct bpf_object *bpf_object__open(const char *path) +@@ -8031,10 +8028,15 @@ struct bpf_object * + bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, + const struct bpf_object_open_opts *opts) + { ++ char tmp_name[64]; ++ + if (!obj_buf || obj_buf_sz == 0) + return libbpf_err_ptr(-EINVAL); + +- return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts)); ++ /* create a (quite useless) default "name" for this memory buffer object */ ++ snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz); ++ ++ return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts)); + } + + static int bpf_object_unload(struct bpf_object *obj) +@@ -13761,29 +13763,13 @@ static int populate_skeleton_progs(const struct bpf_object *obj, + int bpf_object__open_skeleton(struct bpf_object_skeleton *s, + const struct bpf_object_open_opts *opts) + { +- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts, +- .object_name = s->name, +- ); + struct bpf_object *obj; + int err; + +- /* Attempt to preserve opts->object_name, unless overriden by user +- * explicitly. Overwriting object name for skeletons is discouraged, +- * as it breaks global data maps, because they contain object name +- * prefix as their own map name prefix. When skeleton is generated, +- * bpftool is making an assumption that this name will stay the same. +- */ +- if (opts) { +- memcpy(&skel_opts, opts, sizeof(*opts)); +- if (!opts->object_name) +- skel_opts.object_name = s->name; +- } +- +- obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); +- err = libbpf_get_error(obj); +- if (err) { +- pr_warn("failed to initialize skeleton BPF object '%s': %d\n", +- s->name, err); ++ obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts); ++ if (IS_ERR(obj)) { ++ err = PTR_ERR(obj); ++ pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err); + return libbpf_err(err); + } + + + diff --git a/backport-libbpf-Fix-segfault-due-to-libelf-functions-not-sett.patch b/backport-libbpf-Fix-segfault-due-to-libelf-functions-not-sett.patch new file mode 100644 index 0000000000000000000000000000000000000000..63f2537674977358a6ab40439203c81964f114ab --- /dev/null +++ b/backport-libbpf-Fix-segfault-due-to-libelf-functions-not-sett.patch @@ -0,0 +1,138 @@ +From 984dcc97ae50c566924277aedc4967e1222e38c2 Mon Sep 17 00:00:00 2001 +From: Quentin Monnet +Date: Thu, 5 Dec 2024 13:59:42 +0000 +Subject: [PATCH] libbpf: Fix segfault due to libelf functions not setting + errno + +Libelf functions do not set errno on failure. Instead, it relies on its +internal _elf_errno value, that can be retrieved via elf_errno (or the +corresponding message via elf_errmsg()). From "man libelf": + + If a libelf function encounters an error it will set an internal + error code that can be retrieved with elf_errno. Each thread + maintains its own separate error code. The meaning of each error + code can be determined with elf_errmsg, which returns a string + describing the error. + +As a consequence, libbpf should not return -errno when a function from +libelf fails, because an empty value will not be interpreted as an error +and won't prevent the program to stop. This is visible in +bpf_linker__add_file(), for example, where we call a succession of +functions that rely on libelf: + + err = err ?: linker_load_obj_file(linker, filename, opts, &obj); + err = err ?: linker_append_sec_data(linker, &obj); + err = err ?: linker_append_elf_syms(linker, &obj); + err = err ?: linker_append_elf_relos(linker, &obj); + err = err ?: linker_append_btf(linker, &obj); + err = err ?: linker_append_btf_ext(linker, &obj); + +If the object file that we try to process is not, in fact, a correct +object file, linker_load_obj_file() may fail with errno not being set, +and return 0. In this case we attempt to run linker_append_elf_sysms() +and may segfault. + +This can happen (and was discovered) with bpftool: + + $ bpftool gen object output.o sample_ret0.bpf.c + libbpf: failed to get ELF header for sample_ret0.bpf.c: invalid `Elf' handle + zsh: segmentation fault (core dumped) bpftool gen object output.o sample_ret0.bpf.c + +Fix the issue by returning a non-null error code (-EINVAL) when libelf +functions fail. + +Conflict:Context adapt +Reference: https://github.com/libbpf/libbpf/commit/984dcc97ae50c566924277aedc4967e1222e38c2 + +Fixes: faf6ed321cf6 ("libbpf: Add BPF static linker APIs") +Signed-off-by: Quentin Monnet +Signed-off-by: Andrii Nakryiko +Link: https://lore.kernel.org/bpf/20241205135942.65262-1-qmo@kernel.org +--- + src/linker.c | 22 ++++++++-------------- + 1 file changed, 8 insertions(+), 14 deletions(-) + +diff --git a/src/linker.c b/src/linker.c +index cf71d149f..e56ba6e67 100644 +--- a/src/linker.c ++++ b/src/linker.c +@@ -566,17 +566,15 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, + } + obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL); + if (!obj->elf) { +- err = -errno; + pr_warn_elf("failed to parse ELF file '%s'", filename); +- return err; ++ return -EINVAL; + } + + /* Sanity check ELF file high-level properties */ + ehdr = elf64_getehdr(obj->elf); + if (!ehdr) { +- err = -errno; + pr_warn_elf("failed to get ELF header for %s", filename); +- return err; ++ return -EINVAL; + } + if (ehdr->e_ident[EI_DATA] != host_endianness) { + err = -EOPNOTSUPP; +@@ -606,9 +604,8 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, + } + + if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) { +- err = -errno; + pr_warn_elf("failed to get SHSTRTAB section index for %s", filename); +- return err; ++ return -EINVAL; + } + + scn = NULL; +@@ -618,26 +615,23 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, + + shdr = elf64_getshdr(scn); + if (!shdr) { +- err = -errno; + pr_warn_elf("failed to get section #%zu header for %s", + sec_idx, filename); +- return err; ++ return -EINVAL; + } + + sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name); + if (!sec_name) { +- err = -errno; + pr_warn_elf("failed to get section #%zu name for %s", + sec_idx, filename); +- return err; ++ return -EINVAL; + } + + data = elf_getdata(scn, 0); + if (!data) { +- err = -errno; + pr_warn_elf("failed to get section #%zu (%s) data from %s", + sec_idx, sec_name, filename); +- return err; ++ return -EINVAL; + } + + sec = add_src_sec(obj, sec_name); +@@ -2680,14 +2674,14 @@ int bpf_linker__finalize(struct bpf_linker *linker) + + /* Finalize ELF layout */ + if (elf_update(linker->elf, ELF_C_NULL) < 0) { +- err = -errno; ++ err = -EINVAL; + pr_warn_elf("failed to finalize ELF layout"); + return libbpf_err(err); + } + + /* Write out final ELF contents */ + if (elf_update(linker->elf, ELF_C_WRITE) < 0) { +- err = -errno; ++ err = -EINVAL; + pr_warn_elf("failed to write ELF contents"); + return libbpf_err(err); + } + + diff --git a/backport-libbpf-Fixed-getting-wrong-return-address-on-arm64-a.patch b/backport-libbpf-Fixed-getting-wrong-return-address-on-arm64-a.patch new file mode 100644 index 0000000000000000000000000000000000000000..acfb8f94910d2b6040b625fd7517a65be3ad926b --- /dev/null +++ b/backport-libbpf-Fixed-getting-wrong-return-address-on-arm64-a.patch @@ -0,0 +1,32 @@ +From 81ac790dc831a5b753b310138f2201f87b55169b Mon Sep 17 00:00:00 2001 +From: Shuyi Cheng +Date: Sun, 8 Sep 2024 17:23:53 +0800 +Subject: [PATCH] libbpf: Fixed getting wrong return address on arm64 + architecture + +ARM64 has a separate lr register to store the return address, so here +you only need to read the lr register to get the return address, no need +to dereference it again. + +Signed-off-by: Shuyi Cheng +Signed-off-by: Andrii Nakryiko +Link: https://lore.kernel.org/bpf/1725787433-77262-1-git-send-email-chengshuyi@linux.alibaba.com +--- + src/bpf_tracing.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/bpf_tracing.h b/src/bpf_tracing.h +index 4eab132a9..aa3b04f55 100644 +--- a/src/bpf_tracing.h ++++ b/src/bpf_tracing.h +@@ -522,7 +522,7 @@ struct pt_regs; + #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) + #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP + +-#elif defined(bpf_target_sparc) ++#elif defined(bpf_target_sparc) || defined(bpf_target_arm64) + + #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) + #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP + + diff --git a/backport-libbpf-fix-sym_is_subprog-logic-for-weak-global-subp.patch b/backport-libbpf-fix-sym_is_subprog-logic-for-weak-global-subp.patch new file mode 100644 index 0000000000000000000000000000000000000000..4e2b958a2561c911e39b82c82879d998568d4095 --- /dev/null +++ b/backport-libbpf-fix-sym_is_subprog-logic-for-weak-global-subp.patch @@ -0,0 +1,43 @@ +From 0e3971339f06c23aa9402a33057ecb3aac7795aa Mon Sep 17 00:00:00 2001 +From: Andrii Nakryiko +Date: Tue, 8 Oct 2024 18:15:54 -0700 +Subject: [PATCH] libbpf: fix sym_is_subprog() logic for weak global subprogs + +sym_is_subprog() is incorrectly rejecting relocations against *weak* +global subprogs. Fix that by realizing that STB_WEAK is also a global +function. + +While it seems like verifier doesn't support taking an address of +non-static subprog right now, it's still best to fix support for it on +libbpf side, otherwise users will get a very confusing error during BPF +skeleton generation or static linking due to misinterpreted relocation: + + libbpf: prog 'handle_tp': bad map relo against 'foo' in section '.text' + Error: failed to open BPF object file: Relocation failed + +It's clearly not a map relocation, but is treated and reported as such +without this fix. + +Fixes: 53eddb5e04ac ("libbpf: Support subprog address relocation") +Signed-off-by: Andrii Nakryiko +Link: https://lore.kernel.org/r/20241009011554.880168-1-andrii@kernel.org +Signed-off-by: Alexei Starovoitov +--- + src/libbpf.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/libbpf.c b/src/libbpf.c +index 712b95e88..05ad264ff 100644 +--- a/src/libbpf.c ++++ b/src/libbpf.c +@@ -4013,7 +4013,7 @@ static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx) + return true; + + /* global function */ +- return bind == STB_GLOBAL && type == STT_FUNC; ++ return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC; + } + + static int find_extern_btf_id(const struct btf *btf, const char *ext_name) + + diff --git a/backport-libbpf-move-global-data-mmap-ing-into-bpf_object__lo.patch b/backport-libbpf-move-global-data-mmap-ing-into-bpf_object__lo.patch new file mode 100644 index 0000000000000000000000000000000000000000..7b8e34986701d44af14e6a9e38ac2ce37e66719c --- /dev/null +++ b/backport-libbpf-move-global-data-mmap-ing-into-bpf_object__lo.patch @@ -0,0 +1,167 @@ +From 2dea4b86ee82a48912e54b49ac4c255eca592067 Mon Sep 17 00:00:00 2001 +From: Andrii Nakryiko +Date: Tue, 22 Oct 2024 21:39:07 -0700 +Subject: [PATCH] libbpf: move global data mmap()'ing into bpf_object__load() + + Since BPF skeleton inception libbpf has been doing mmap()'ing of global + data ARRAY maps in bpf_object__load_skeleton() API, which is used by + code generated .skel.h files (i.e., by BPF skeletons only). + + This is wrong because if BPF object is loaded through generic + bpf_object__load() API, global data maps won't be re-mmap()'ed after + load step, and memory pointers returned from bpf_map__initial_value() + would be wrong and won't reflect the actual memory shared between BPF + program and user space. + + bpf_map__initial_value() return result is rarely used after load, so + this went unnoticed for a really long time, until bpftrace project + attempted to load BPF object through generic bpf_object__load() API and + then used BPF subskeleton instantiated from such bpf_object. It turned + out that .data/.rodata/.bss data updates through such subskeleton was + "blackholed", all because libbpf wouldn't re-mmap() those maps during + bpf_object__load() phase. + + Long story short, this step should be done by libbpf regardless of BPF + skeleton usage, right after BPF map is created in the kernel. This patch + moves this functionality into bpf_object__populate_internal_map() to + achieve this. And bpf_object__load_skeleton() is now simple and almost + trivial, only propagating these mmap()'ed pointers into user-supplied + skeleton structs. + + We also do trivial adjustments to error reporting inside + bpf_object__populate_internal_map() for consistency with the rest of + libbpf's map-handling code. + + Reported-by: Alastair Robertson + Reported-by: Jonathan Wiepert + Fixes: d66562fba1ce ("libbpf: Add BPF object skeleton support") + Signed-off-by: Andrii Nakryiko + Link: https://lore.kernel.org/r/20241023043908.3834423-3-andrii@kernel.org + Signed-off-by: Alexei Starovoitov + + Conflict:In the original patch, the function code is moved from the bpf object __load_skeleton to the bpf object __populate_internal_map. The implementation details of the function code are different due to version changes. Therefore, the function code is moved again according to this method. + Reference: https://github.com/libbpf/libbpf/commit/2dea4b86ee82a48912e54b49ac4c255eca592067 +--- + src/libbpf.c | 81 ++++++++++++++++++++++++++-------------------------- + 1 file changed, 41 insertions(+), 40 deletions(-) + +diff --git a/src/libbpf.c b/src/libbpf.c +index 8d63238..cd8203f 100644 +--- a/src/libbpf.c ++++ b/src/libbpf.c +@@ -4971,6 +4971,7 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) + enum libbpf_map_type map_type = map->libbpf_type; + char *cp, errmsg[STRERR_BUFSIZE]; + int err, zero = 0; ++ size_t mmap_sz; + + if (obj->gen_loader) { + bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps, +@@ -4983,8 +4984,8 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) + if (err) { + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); +- pr_warn("Error setting initial map(%s) contents: %s\n", +- map->name, cp); ++ pr_warn("map '%s': failed to set initial contents: %s\n", ++ bpf_map__name(map), cp); + return err; + } + +@@ -4994,11 +4995,45 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) + if (err) { + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); +- pr_warn("Error freezing map(%s) as read-only: %s\n", +- map->name, cp); ++ pr_warn("map '%s': failed to freeze as read-only: %s\n", ++ bpf_map__name(map), cp); + return err; + } + } ++ ++ /* Remap anonymous mmap()-ed "map initialization image" as ++ * a BPF map-backed mmap()-ed memory, but preserving the same ++ * memory address. This will cause kernel to change process' ++ * page table to point to a different piece of kernel memory, ++ * but from userspace point of view memory address (and its ++ * contents, being identical at this point) will stay the ++ * same. This mapping will be released by bpf_object__close() ++ * as per normal clean up procedure, so we don't need to worry ++ * about it from skeleton's clean up perspective. ++ */ ++ mmap_sz = bpf_map_mmap_sz(map); ++ if (map->def.map_flags & BPF_F_MMAPABLE) { ++ void *mmaped; ++ int prot; ++ ++ if (map->def.map_flags & BPF_F_RDONLY_PROG) ++ prot = PROT_READ; ++ else ++ prot = PROT_READ | PROT_WRITE; ++ mmaped = mmap(map->mmaped, mmap_sz, prot, ++ MAP_SHARED | MAP_FIXED, map->fd, 0); ++ if (mmaped == MAP_FAILED) { ++ err = -errno; ++ mmaped = NULL; ++ pr_warn("failed to re-mmap() map '%s': %d\n", ++ bpf_map__name(map), err); ++ return libbpf_err(err); ++ } ++ } else if (map->mmaped) { ++ munmap(map->mmaped, mmap_sz); ++ map->mmaped = NULL; ++ } ++ + return 0; + } + +@@ -13128,44 +13163,10 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s) + + for (i = 0; i < s->map_cnt; i++) { + struct bpf_map *map = *s->maps[i].map; +- size_t mmap_sz = bpf_map_mmap_sz(map); +- int prot, map_fd = bpf_map__fd(map); +- void **mmaped = s->maps[i].mmaped; +- +- if (!mmaped) ++ if (!s->maps[i].mmaped) + continue; +- +- if (!(map->def.map_flags & BPF_F_MMAPABLE)) { +- *mmaped = NULL; +- continue; +- } +- +- if (map->def.map_flags & BPF_F_RDONLY_PROG) +- prot = PROT_READ; +- else +- prot = PROT_READ | PROT_WRITE; +- +- /* Remap anonymous mmap()-ed "map initialization image" as +- * a BPF map-backed mmap()-ed memory, but preserving the same +- * memory address. This will cause kernel to change process' +- * page table to point to a different piece of kernel memory, +- * but from userspace point of view memory address (and its +- * contents, being identical at this point) will stay the +- * same. This mapping will be released by bpf_object__close() +- * as per normal clean up procedure, so we don't need to worry +- * about it from skeleton's clean up perspective. +- */ +- *mmaped = mmap(map->mmaped, mmap_sz, prot, +- MAP_SHARED | MAP_FIXED, map_fd, 0); +- if (*mmaped == MAP_FAILED) { +- err = -errno; +- *mmaped = NULL; +- pr_warn("failed to re-mmap() map '%s': %d\n", +- bpf_map__name(map), err); +- return libbpf_err(err); +- } ++ *s->maps[i].mmaped = map->mmaped; + } +- + return 0; + } + +-- +2.33.0 diff --git a/libbpf.spec b/libbpf.spec index caa48c43f43945b91784676223050a510f36bc99..8c1cbff9bfe542944990f1e5b5a1a15c69b01664 100644 --- a/libbpf.spec +++ b/libbpf.spec @@ -4,7 +4,7 @@ Name: %{githubname} Version: %{githubver} -Release: 6 +Release: 7 Summary: Libbpf library License: LGPLv2 or BSD @@ -22,6 +22,12 @@ Patch0005: backport-libbpf-Avoid-uninitialized-value-in-BPF_CORE_READ_BI.pa Patch0006: backport-libbpf-Add-NULL-checks-to-bpf_object__prev_map,next_.patch Patch0007: backport-libbpf-Apply-map_set_def_max_entries-for-inner_maps-.patch Patch0008: backport-libbpf-Dont-take-direct-pointers-into-BTF-data-from-.patch +Patch0009: backport-libbpf-Do-not-resolve-size-on-duplicate-FUNCs.patch +Patch0010: backport-libbpf-Fix-bpf_object__open_skeleton-s-mishandling-o.patch +Patch0011: backport-libbpf-Fix-segfault-due-to-libelf-functions-not-sett.patch +Patch0012: backport-libbpf-Fixed-getting-wrong-return-address-on-arm64-a.patch +Patch0013: backport-libbpf-fix-sym_is_subprog-logic-for-weak-global-subp.patch +Patch0014: backport-libbpf-move-global-data-mmap-ing-into-bpf_object__lo.patch # This package supersedes libbpf from kernel-tools, # which has default Epoch: 0. By having Epoch: 1 @@ -74,6 +80,15 @@ developing applications that use %{name} %{_libdir}/libbpf.a %changelog +* Fri Mar 14 2025 zhangmingyi 2:1.2.2-7 +- backport patch from upstream: + backport-libbpf-Do-not-resolve-size-on-duplicate-FUNCs.patch + backport-libbpf-Fix-bpf_object__open_skeleton-s-mishandling-o.patch + backport-libbpf-Fix-segfault-due-to-libelf-functions-not-sett.patch + backport-libbpf-Fixed-getting-wrong-return-address-on-arm64-a.patch + backport-libbpf-fix-sym_is_subprog-logic-for-weak-global-subp.patch + backport-libbpf-move-global-data-mmap-ing-into-bpf_object__lo.patch + * Mon Dec 23 2024 zhangmingyi 2:1.2.2-6 - backport patch from upstream: backport-libbpf-Dont-take-direct-pointers-into-BTF-data-from-.patch