diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 5074bd1d37b5f6b8cf128c8ddba52bdcfb148321..7c5156e7d31ea5ef8a97814e3325848f1eb40da8 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1952,7 +1952,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx); if (flags & BPF_TRAMP_F_CALL_ORIG) { - emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); + emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); emit_call((const u64)__bpf_tramp_enter, ctx); } @@ -1996,7 +1996,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, if (flags & BPF_TRAMP_F_CALL_ORIG) { im->ip_epilogue = ctx->image + ctx->idx; - emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); + emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); emit_call((const u64)__bpf_tramp_exit, ctx); } diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index dad909547179f8d283645681fd1bd995bd09fe1c..2476774af7382bae36096d2f621923b1467a2ecf 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -587,6 +587,7 @@ struct thermal_zone_device *thermal_zone_get_by_id(int id) mutex_lock(&thermal_list_lock); list_for_each_entry(tz, &thermal_tz_list, node) { if (tz->id == id) { + get_device(&tz->device); match = tz; break; } diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 024e82ebf5920f109a0a29dd07b9ab627259f52d..cf88b7d0c4901b0542fb38e8512697bd49f4b7a5 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -56,6 +56,9 @@ int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *), struct thermal_zone_device *thermal_zone_get_by_id(int id); +DEFINE_CLASS(thermal_zone_get_by_id, struct thermal_zone_device *, + if (_T) put_device(&_T->device), thermal_zone_get_by_id(id), int id) + struct thermal_attr { struct device_attribute attr; char name[THERMAL_NAME_LENGTH]; diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index 08bc46c3ec7bc191e598597ae8b864423bf3f696..8d07531d95197682c3a93389bd927a0beab75f84 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -450,7 +450,6 @@ static int thermal_genl_cmd_tz_get_id(struct param *p) static int thermal_genl_cmd_tz_get_trip(struct param *p) { struct sk_buff *msg = p->msg; - struct thermal_zone_device *tz; struct nlattr *start_trip; struct thermal_trip trip; int ret, i, id; @@ -460,7 +459,7 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); - tz = thermal_zone_get_by_id(id); + CLASS(thermal_zone_get_by_id, tz)(id); if (!tz) return -EINVAL; @@ -498,7 +497,6 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) static int thermal_genl_cmd_tz_get_temp(struct param *p) { struct sk_buff *msg = p->msg; - struct thermal_zone_device *tz; int temp, ret, id; if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) @@ -506,7 +504,7 @@ static int thermal_genl_cmd_tz_get_temp(struct param *p) id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); - tz = thermal_zone_get_by_id(id); + CLASS(thermal_zone_get_by_id, tz)(id); if (!tz) return -EINVAL; @@ -524,7 +522,6 @@ static int thermal_genl_cmd_tz_get_temp(struct param *p) static int thermal_genl_cmd_tz_get_gov(struct param *p) { struct sk_buff *msg = p->msg; - struct thermal_zone_device *tz; int id, ret = 0; if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID]) @@ -532,7 +529,7 @@ static int thermal_genl_cmd_tz_get_gov(struct param *p) id = nla_get_u32(p->attrs[THERMAL_GENL_ATTR_TZ_ID]); - tz = thermal_zone_get_by_id(id); + CLASS(thermal_zone_get_by_id, tz)(id); if (!tz) return -EINVAL; diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c index 3f0a9795c0d45d150f2371f77554f558e23b812b..e61bad79d0e9a5faff56f4a62eecaac0ab094850 100644 --- a/drivers/usb/gadget/function/uvc_v4l2.c +++ b/drivers/usb/gadget/function/uvc_v4l2.c @@ -121,6 +121,9 @@ static struct uvcg_format *find_format_by_pix(struct uvc_device *uvc, list_for_each_entry(format, &uvc->header->formats, entry) { const struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt); + if (IS_ERR(fmtdesc)) + continue; + if (fmtdesc->fcc == pixelformat) { uformat = format->fmt; break; @@ -240,6 +243,7 @@ uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt) struct uvc_video *video = &uvc->video; struct uvcg_format *uformat; struct uvcg_frame *uframe; + const struct uvc_format_desc *fmtdesc; u8 *fcc; if (fmt->type != video->queue.queue.type) @@ -260,12 +264,29 @@ uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt) if (!uframe) return -EINVAL; - fmt->fmt.pix.width = uframe->frame.w_width; - fmt->fmt.pix.height = uframe->frame.w_height; + if (uformat->type == UVCG_UNCOMPRESSED) { + struct uvcg_uncompressed *u = + to_uvcg_uncompressed(&uformat->group.cg_item); + if (!u) + return 0; + + v4l2_fill_pixfmt(&fmt->fmt.pix, fmt->fmt.pix.pixelformat, + uframe->frame.w_width, uframe->frame.w_height); + + if (fmt->fmt.pix.sizeimage != (uvc_v4l2_get_bytesperline(uformat, uframe) * + uframe->frame.w_height)) + return -EINVAL; + } else { + fmt->fmt.pix.width = uframe->frame.w_width; + fmt->fmt.pix.height = uframe->frame.w_height; + fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(uformat, uframe); + fmt->fmt.pix.sizeimage = uvc_get_frame_size(uformat, uframe); + fmtdesc = to_uvc_format(uformat); + if (IS_ERR(fmtdesc)) + return PTR_ERR(fmtdesc); + fmt->fmt.pix.pixelformat = fmtdesc->fcc; + } fmt->fmt.pix.field = V4L2_FIELD_NONE; - fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(uformat, uframe); - fmt->fmt.pix.sizeimage = uvc_get_frame_size(uformat, uframe); - fmt->fmt.pix.pixelformat = to_uvc_format(uformat)->fcc; fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; fmt->fmt.pix.priv = 0; @@ -375,6 +396,9 @@ uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f) return -EINVAL; fmtdesc = to_uvc_format(uformat); + if (IS_ERR(fmtdesc)) + return PTR_ERR(fmtdesc); + f->pixelformat = fmtdesc->fcc; return 0; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f019ce64eba48e410317feedf4af0b9da633d93b..41d7b9fd6e3782baa8781856f64d0103ec25d3a8 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3626,6 +3626,13 @@ int ext4_feature_set_ok(struct super_block *sb, int readonly) return 0; } #endif + if (EXT4_SB(sb)->s_es->s_def_hash_version == DX_HASH_SIPHASH && + !ext4_has_feature_casefold(sb)) { + ext4_msg(sb, KERN_ERR, + "Filesystem without casefold feature cannot be " + "mounted with siphash"); + return 0; + } if (readonly) return 1; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 035e627f94f62d6f10668e297b27d67a8b97c08d..706060aa44312ad1673168b401969f45a2fa8285 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1245,8 +1245,12 @@ int bpf_dynptr_check_size(u32 size); u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); #ifdef CONFIG_BPF_JIT -int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); -int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); +int bpf_trampoline_link_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog); +int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog); struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info); void bpf_trampoline_put(struct bpf_trampoline *tr); @@ -1327,12 +1331,14 @@ void bpf_jit_uncharge_modmem(u32 size); bool bpf_prog_has_trampoline(const struct bpf_prog *prog); #else static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, - struct bpf_trampoline *tr) + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { return -ENOTSUPP; } static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, - struct bpf_trampoline *tr) + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { return -ENOTSUPP; } @@ -1430,6 +1436,9 @@ struct bpf_prog_aux { bool sleepable; bool tail_call_reachable; bool xdp_has_frags; + bool is_extended; /* true if extended by freplace program */ + u64 prog_array_member_cnt; /* counts how many times as member of prog_array */ + struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */ /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; /* function name for valid attach_btf_id */ diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 1811efcfbd6e3b2f5983ab2160a685311e10bcda..7f1ef37c67be45890e2bc845878e32a81c78ced9 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -909,22 +909,44 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map, struct file *map_file, int fd) { struct bpf_prog *prog = bpf_prog_get(fd); + bool is_extended; if (IS_ERR(prog)) return prog; - if (!bpf_prog_map_compatible(map, prog)) { + if (prog->type == BPF_PROG_TYPE_EXT || + !bpf_prog_map_compatible(map, prog)) { bpf_prog_put(prog); return ERR_PTR(-EINVAL); } + mutex_lock(&prog->aux->ext_mutex); + is_extended = prog->aux->is_extended; + if (!is_extended) + prog->aux->prog_array_member_cnt++; + mutex_unlock(&prog->aux->ext_mutex); + if (is_extended) { + /* Extended prog can not be tail callee. It's to prevent a + * potential infinite loop like: + * tail callee prog entry -> tail callee prog subprog -> + * freplace prog entry --tailcall-> tail callee prog entry. + */ + bpf_prog_put(prog); + return ERR_PTR(-EBUSY); + } + return prog; } static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) { + struct bpf_prog *prog = ptr; + + mutex_lock(&prog->aux->ext_mutex); + prog->aux->prog_array_member_cnt--; + mutex_unlock(&prog->aux->ext_mutex); /* bpf_prog is freed after one RCU or tasks trace grace period */ - bpf_prog_put(ptr); + bpf_prog_put(prog); } static u32 prog_fd_array_sys_lookup_elem(void *ptr) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 02f327f05fd6195938ac74801df29114d18d5ace..855523318f91e64f95edf996242027d0119f0917 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -122,6 +122,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); mutex_init(&fp->aux->used_maps_mutex); + mutex_init(&fp->aux->ext_mutex); mutex_init(&fp->aux->dst_mutex); return fp; diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index 246559c3e93d0d104506b97b003ab7b6db9ded39..00688545742f141608c29d6825cb0c640429ec48 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -29,7 +29,7 @@ struct bpf_ringbuf { u64 mask; struct page **pages; int nr_pages; - spinlock_t spinlock ____cacheline_aligned_in_smp; + raw_spinlock_t spinlock ____cacheline_aligned_in_smp; /* For user-space producer ring buffers, an atomic_t busy bit is used * to synchronize access to the ring buffers in the kernel, rather than * the spinlock that is used for kernel-producer ring buffers. This is @@ -173,7 +173,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node) if (!rb) return NULL; - spin_lock_init(&rb->spinlock); + raw_spin_lock_init(&rb->spinlock); atomic_set(&rb->busy, 0); init_waitqueue_head(&rb->waitq); init_irq_work(&rb->work, bpf_ringbuf_notify); @@ -421,10 +421,10 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) cons_pos = smp_load_acquire(&rb->consumer_pos); if (in_nmi()) { - if (!spin_trylock_irqsave(&rb->spinlock, flags)) + if (!raw_spin_trylock_irqsave(&rb->spinlock, flags)) return NULL; } else { - spin_lock_irqsave(&rb->spinlock, flags); + raw_spin_lock_irqsave(&rb->spinlock, flags); } pend_pos = rb->pending_pos; @@ -450,7 +450,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) */ if (new_prod_pos - cons_pos > rb->mask || new_prod_pos - pend_pos > rb->mask) { - spin_unlock_irqrestore(&rb->spinlock, flags); + raw_spin_unlock_irqrestore(&rb->spinlock, flags); return NULL; } @@ -462,7 +462,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) /* pairs with consumer's smp_load_acquire() */ smp_store_release(&rb->producer_pos, new_prod_pos); - spin_unlock_irqrestore(&rb->spinlock, flags); + raw_spin_unlock_irqrestore(&rb->spinlock, flags); return (void *)hdr + BPF_RINGBUF_HDR_SZ; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ba38c08a9a059a9e4630241802af0fdbbbe87bad..e7981a9e2af623bbd01d5b09729806d93dd2975e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3107,7 +3107,8 @@ static void bpf_tracing_link_release(struct bpf_link *link) container_of(link, struct bpf_tracing_link, link.link); WARN_ON_ONCE(bpf_trampoline_unlink_prog(&tr_link->link, - tr_link->trampoline)); + tr_link->trampoline, + tr_link->tgt_prog)); bpf_trampoline_put(tr_link->trampoline); @@ -3242,7 +3243,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, * in prog->aux * * - if prog->aux->dst_trampoline is NULL, the program has already been - * attached to a target and its initial target was cleared (below) + * attached to a target and its initial target was cleared (below) * * - if tgt_prog != NULL, the caller specified tgt_prog_fd + * target_btf_id using the link_create API. @@ -3317,7 +3318,7 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog, if (err) goto out_unlock; - err = bpf_trampoline_link_prog(&link->link, tr); + err = bpf_trampoline_link_prog(&link->link, tr, tgt_prog); if (err) { bpf_link_cleanup(&link_primer); link = NULL; diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index e97aeda3a86b55522a73e53279543cb5b4df6919..7c0abf19b52fc5fbf6dcc232f034017aca08d451 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -510,7 +510,27 @@ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog) } } -static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) +static int bpf_freplace_check_tgt_prog(struct bpf_prog *tgt_prog) +{ + struct bpf_prog_aux *aux = tgt_prog->aux; + + guard(mutex)(&aux->ext_mutex); + if (aux->prog_array_member_cnt) + /* Program extensions can not extend target prog when the target + * prog has been updated to any prog_array map as tail callee. + * It's to prevent a potential infinite loop like: + * tgt prog entry -> tgt prog subprog -> freplace prog entry + * --tailcall-> tgt prog entry. + */ + return -EBUSY; + + aux->is_extended = true; + return 0; +} + +static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { enum bpf_tramp_prog_type kind; struct bpf_tramp_link *link_exiting; @@ -531,6 +551,9 @@ static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_tr /* Cannot attach extension if fentry/fexit are in use. */ if (cnt) return -EBUSY; + err = bpf_freplace_check_tgt_prog(tgt_prog); + if (err) + return err; tr->extension_prog = link->link.prog; return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL, link->link.prog->bpf_func); @@ -557,17 +580,21 @@ static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_tr return err; } -int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) +int bpf_trampoline_link_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { int err; mutex_lock(&tr->mutex); - err = __bpf_trampoline_link_prog(link, tr); + err = __bpf_trampoline_link_prog(link, tr, tgt_prog); mutex_unlock(&tr->mutex); return err; } -static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) +static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { enum bpf_tramp_prog_type kind; int err; @@ -578,6 +605,8 @@ static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_ err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, tr->extension_prog->bpf_func, NULL); tr->extension_prog = NULL; + guard(mutex)(&tgt_prog->aux->ext_mutex); + tgt_prog->aux->is_extended = false; return err; } hlist_del_init(&link->tramp_hlist); @@ -586,12 +615,14 @@ static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_ } /* bpf_trampoline_unlink_prog() should never fail. */ -int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr) +int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { int err; mutex_lock(&tr->mutex); - err = __bpf_trampoline_unlink_prog(link, tr); + err = __bpf_trampoline_unlink_prog(link, tr, tgt_prog); mutex_unlock(&tr->mutex); return err; } @@ -606,7 +637,7 @@ static void bpf_shim_tramp_link_release(struct bpf_link *link) if (!shim_link->trampoline) return; - WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline)); + WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline, NULL)); bpf_trampoline_put(shim_link->trampoline); } @@ -720,7 +751,7 @@ int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, goto err; } - err = __bpf_trampoline_link_prog(&shim_link->link, tr); + err = __bpf_trampoline_link_prog(&shim_link->link, tr, NULL); if (err) goto err; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d6a4102312fadd63e9fd467c61cdb41fc40a27ef..1a872b05d0b8c3c13ff6f8698810aee4cdc888e4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -18898,13 +18898,46 @@ static int do_misc_fixups(struct bpf_verifier_env *env) int i, ret, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { - /* Make divide-by-zero exceptions impossible. */ + /* Make sdiv/smod divide-by-minus-one exceptions impossible. */ + if ((insn->code == (BPF_ALU64 | BPF_MOD | BPF_K) || + insn->code == (BPF_ALU64 | BPF_DIV | BPF_K) || + insn->code == (BPF_ALU | BPF_MOD | BPF_K) || + insn->code == (BPF_ALU | BPF_DIV | BPF_K)) && + insn->off == 1 && insn->imm == -1) { + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; + bool isdiv = BPF_OP(insn->code) == BPF_DIV; + struct bpf_insn *patchlet; + struct bpf_insn chk_and_sdiv[] = { + BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_NEG | BPF_K, insn->dst_reg, + 0, 0, 0), + }; + struct bpf_insn chk_and_smod[] = { + BPF_MOV32_IMM(insn->dst_reg, 0), + }; + + patchlet = isdiv ? chk_and_sdiv : chk_and_smod; + cnt = isdiv ? ARRAY_SIZE(chk_and_sdiv) : ARRAY_SIZE(chk_and_smod); + + new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + continue; + } + + /* Make divide-by-zero and divide-by-minus-one exceptions impossible. */ if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_MOD | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; bool isdiv = BPF_OP(insn->code) == BPF_DIV; + bool is_sdiv = isdiv && insn->off == 1; + bool is_smod = !isdiv && insn->off == 1; struct bpf_insn *patchlet; struct bpf_insn chk_and_div[] = { /* [R,W]x div 0 -> 0 */ @@ -18924,10 +18957,62 @@ static int do_misc_fixups(struct bpf_verifier_env *env) BPF_JMP_IMM(BPF_JA, 0, 0, 1), BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), }; + struct bpf_insn chk_and_sdiv[] = { + /* [R,W]x sdiv 0 -> 0 + * LLONG_MIN sdiv -1 -> LLONG_MIN + * INT_MIN sdiv -1 -> INT_MIN + */ + BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), + BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_ADD | BPF_K, BPF_REG_AX, + 0, 0, 1), + BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JGT | BPF_K, BPF_REG_AX, + 0, 4, 1), + BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JEQ | BPF_K, BPF_REG_AX, + 0, 1, 0), + BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_MOV | BPF_K, insn->dst_reg, + 0, 0, 0), + /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */ + BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_NEG | BPF_K, insn->dst_reg, + 0, 0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + *insn, + }; + struct bpf_insn chk_and_smod[] = { + /* [R,W]x mod 0 -> [R,W]x */ + /* [R,W]x mod -1 -> 0 */ + BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), + BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | + BPF_ADD | BPF_K, BPF_REG_AX, + 0, 0, 1), + BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JGT | BPF_K, BPF_REG_AX, + 0, 3, 1), + BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | + BPF_JEQ | BPF_K, BPF_REG_AX, + 0, 3 + (is64 ? 0 : 1), 1), + BPF_MOV32_IMM(insn->dst_reg, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + *insn, + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), + }; - patchlet = isdiv ? chk_and_div : chk_and_mod; - cnt = isdiv ? ARRAY_SIZE(chk_and_div) : - ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); + if (is_sdiv) { + patchlet = chk_and_sdiv; + cnt = ARRAY_SIZE(chk_and_sdiv); + } else if (is_smod) { + patchlet = chk_and_smod; + cnt = ARRAY_SIZE(chk_and_smod) - (is64 ? 2 : 0); + } else { + patchlet = isdiv ? chk_and_div : chk_and_mod; + cnt = isdiv ? ARRAY_SIZE(chk_and_div) : + ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); + } new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); if (!new_prog) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ee6d4c3db6a3c1c745ea8869ec9c68097852bf2e..542f2125467ff1a12057a185a5994c356f087388 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8563,7 +8563,7 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) bool empty = !cpumask_and(new_mask, new_mask, ctx->user_mask); - if (WARN_ON_ONCE(empty)) + if (empty) cpumask_copy(new_mask, cpus_allowed); } __set_cpus_allowed_ptr(p, ctx); diff --git a/mm/slub.c b/mm/slub.c index d2544c88a5c43c8d4f3d9589df5b8703515699cd..9c6d8f28552cd0c239fac6b5822f91a2e8293336 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -452,6 +452,26 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); } +/* + * See comment in calculate_sizes(). + */ +static inline bool freeptr_outside_object(struct kmem_cache *s) +{ + return s->offset >= s->inuse; +} + +/* + * Return offset of the end of info block which is inuse + free pointer if + * not overlapping with object. + */ +static inline unsigned int get_info_end(struct kmem_cache *s) +{ + if (freeptr_outside_object(s)) + return s->inuse + sizeof(void *); + else + return s->inuse; +} + /* Loop over all objects in a slab */ #define for_each_object(__p, __s, __addr, __objects) \ for (__p = fixup_red_left(__s, __addr); \ @@ -627,6 +647,50 @@ static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, return false; } +/* + * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API + * family will round up the real request size to these fixed ones, so + * there could be an extra area than what is requested. Save the original + * request size in the meta data area, for better debug and sanity check. + */ +static inline void set_orig_size(struct kmem_cache *s, + void *object, unsigned int orig_size) +{ + void *p = kasan_reset_tag(object); + + if (!slub_debug_orig_size(s)) + return; + +#ifdef CONFIG_KASAN_GENERIC + /* + * KASAN can save its free meta data inside of the object at offset 0. + * If this meta data size is larger than 'orig_size', it will overlap + * the data redzone in [orig_size+1, object_size]. Thus, we adjust + * 'orig_size' to be as at least as big as KASAN's meta data. + */ + if (kasan_metadata_size(s, true) > orig_size) + orig_size = kasan_meta_size; +#endif + + p += get_info_end(s); + p += sizeof(struct track) * 2; + + *(unsigned int *)p = orig_size; +} + +static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) +{ + void *p = kasan_reset_tag(object); + + if (!slub_debug_orig_size(s)) + return s->object_size; + + p += get_info_end(s); + p += sizeof(struct track) * 2; + + return *(unsigned int *)p; +} + #ifdef CONFIG_SLUB_DEBUG static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; static DEFINE_SPINLOCK(object_map_lock); @@ -740,26 +804,6 @@ static void print_section(char *level, char *text, u8 *addr, metadata_access_disable(); } -/* - * See comment in calculate_sizes(). - */ -static inline bool freeptr_outside_object(struct kmem_cache *s) -{ - return s->offset >= s->inuse; -} - -/* - * Return offset of the end of info block which is inuse + free pointer if - * not overlapping with object. - */ -static inline unsigned int get_info_end(struct kmem_cache *s) -{ - if (freeptr_outside_object(s)) - return s->inuse + sizeof(void *); - else - return s->inuse; -} - static struct track *get_track(struct kmem_cache *s, void *object, enum track_item alloc) { @@ -860,50 +904,6 @@ static void print_slab_info(const struct slab *slab) folio_flags(folio, 0)); } -/* - * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API - * family will round up the real request size to these fixed ones, so - * there could be an extra area than what is requested. Save the original - * request size in the meta data area, for better debug and sanity check. - */ -static inline void set_orig_size(struct kmem_cache *s, - void *object, unsigned int orig_size) -{ - void *p = kasan_reset_tag(object); - - if (!slub_debug_orig_size(s)) - return; - -#ifdef CONFIG_KASAN_GENERIC - /* - * KASAN could save its free meta data in object's data area at - * offset 0, if the size is larger than 'orig_size', it will - * overlap the data redzone in [orig_size+1, object_size], and - * the check should be skipped. - */ - if (kasan_metadata_size(s, true) > orig_size) - orig_size = s->object_size; -#endif - - p += get_info_end(s); - p += sizeof(struct track) * 2; - - *(unsigned int *)p = orig_size; -} - -static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) -{ - void *p = kasan_reset_tag(object); - - if (!slub_debug_orig_size(s)) - return s->object_size; - - p += get_info_end(s); - p += sizeof(struct track) * 2; - - return *(unsigned int *)p; -} - void skip_orig_size_check(struct kmem_cache *s, const void *object) { set_orig_size(s, (void *)object, s->object_size); @@ -1755,7 +1755,6 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) {} static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} - #ifndef CONFIG_SLUB_TINY static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, void **freelist, void *nextfree) @@ -1795,12 +1794,19 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, */ if (init) { int rsize; + unsigned int orig_size; + orig_size = get_orig_size(s, x); if (!kasan_has_integrated_init()) - memset(kasan_reset_tag(x), 0, s->object_size); + memset(kasan_reset_tag(x), 0, orig_size); rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; memset((char *)kasan_reset_tag(x) + s->inuse, 0, s->size - s->inuse - rsize); + /* + * Restore orig_size, otherwize kmalloc redzone overwritten + * would be reported + */ + set_orig_size(s, x, orig_size); } /* KASAN might put x into memory quarantine, delaying its reuse. */ return kasan_slab_free(s, x, init); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 70da78ab952027523ef08d961c3ec66e444b71ae..65297b9ed74c64a0ccfff7771d9a2f613f107ed0 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -387,6 +387,7 @@ int l2tp_session_register(struct l2tp_session *session, l2tp_tunnel_inc_refcount(tunnel); } + WRITE_ONCE(session->tunnel, tunnel); hlist_add_head_rcu(&session->hlist, head); spin_unlock_bh(&tunnel->hlist_lock); @@ -698,7 +699,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, if (!session->lns_mode && !session->send_seq) { trace_session_seqnum_lns_enable(session); session->send_seq = 1; - l2tp_session_set_header_len(session, tunnel->version); + l2tp_session_set_header_len(session, tunnel->version, + tunnel->encap); } } else { /* No sequence numbers. @@ -719,7 +721,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, if (!session->lns_mode && session->send_seq) { trace_session_seqnum_lns_disable(session); session->send_seq = 0; - l2tp_session_set_header_len(session, tunnel->version); + l2tp_session_set_header_len(session, tunnel->version, + tunnel->encap); } else if (session->send_seq) { pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n", session->name); @@ -1574,7 +1577,8 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete); /* We come here whenever a session's send_seq, cookie_len or * l2specific_type parameters are set. */ -void l2tp_session_set_header_len(struct l2tp_session *session, int version) +void l2tp_session_set_header_len(struct l2tp_session *session, int version, + enum l2tp_encap_type encap) { if (version == L2TP_HDR_VER_2) { session->hdr_len = 6; @@ -1583,7 +1587,7 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version) } else { session->hdr_len = 4 + session->cookie_len; session->hdr_len += l2tp_get_l2specific_len(session); - if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP) + if (encap == L2TP_ENCAPTYPE_UDP) session->hdr_len += 4; } } @@ -1597,7 +1601,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL); if (session) { session->magic = L2TP_SESSION_MAGIC; - session->tunnel = tunnel; session->session_id = session_id; session->peer_session_id = peer_session_id; @@ -1633,7 +1636,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len); } - l2tp_session_set_header_len(session, tunnel->version); + l2tp_session_set_header_len(session, tunnel->version, tunnel->encap); refcount_set(&session->ref_count, 1); diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 91ebf0a3f49975921b9ac4476a400cb6dc68d53a..61f402c3e7f46f0aa8762544896f3bf8c50861b5 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -261,7 +261,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); /* Transmit path helpers for sending packets over the tunnel socket. */ -void l2tp_session_set_header_len(struct l2tp_session *session, int version); +void l2tp_session_set_header_len(struct l2tp_session *session, int version, + enum l2tp_encap_type encap); int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb); /* Pseudowire management. diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index e27e00cb16c6b1ab2fdb12da15fdd2134c459a72..05314419c7bbb8083121d4e147c91fefd6e73e7b 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -690,8 +690,10 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); if (info->attrs[L2TP_ATTR_SEND_SEQ]) { + struct l2tp_tunnel *tunnel = session->tunnel; + session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); - l2tp_session_set_header_len(session, session->tunnel->version); + l2tp_session_set_header_len(session, tunnel->version, tunnel->encap); } if (info->attrs[L2TP_ATTR_LNS_MODE]) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 6146e4e67bbb5438ebe9eb0046b1e5dbfce6f7ca..eebec10a156d5f80d74c5b1027dbc66bd908fbab 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1203,7 +1203,8 @@ static int pppol2tp_session_setsockopt(struct sock *sk, po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; } - l2tp_session_set_header_len(session, session->tunnel->version); + l2tp_session_set_header_len(session, session->tunnel->version, + session->tunnel->encap); break; case PPPOL2TP_SO_LNSMODE: diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 09c189761926c7268e66ccb310a88bcacb3daa78..3693a6857699c7b2f0d32f4dbbe83808dc831427 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include "tailcall_freplace.skel.h" +#include "tc_bpf2bpf.skel.h" /* test_tailcall_1 checks basic functionality by patching multiple locations * in a single program for a single tail call slot with nop->jmp, jmp->nop @@ -884,6 +886,156 @@ static void test_tailcall_bpf2bpf_6(void) tailcall_bpf2bpf6__destroy(obj); } +/* test_tailcall_freplace checks that the freplace prog fails to update the + * prog_array map, no matter whether the freplace prog attaches to its target. + */ +static void test_tailcall_freplace(void) +{ + struct tailcall_freplace *freplace_skel = NULL; + struct bpf_link *freplace_link = NULL; + struct bpf_program *freplace_prog; + struct tc_bpf2bpf *tc_skel = NULL; + int prog_fd, tc_prog_fd, map_fd; + char buff[128] = {}; + int err, key; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); + + freplace_skel = tailcall_freplace__open(); + if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open")) + return; + + tc_skel = tc_bpf2bpf__open_and_load(); + if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load")) + goto out; + + tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); + freplace_prog = freplace_skel->progs.entry_freplace; + err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd, + "subprog_tc"); + if (!ASSERT_OK(err, "set_attach_target")) + goto out; + + err = tailcall_freplace__load(freplace_skel); + if (!ASSERT_OK(err, "tailcall_freplace__load")) + goto out; + + map_fd = bpf_map__fd(freplace_skel->maps.jmp_table); + prog_fd = bpf_program__fd(freplace_prog); + key = 0; + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + ASSERT_ERR(err, "update jmp_table failure"); + + freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd, + "subprog_tc"); + if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) + goto out; + + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + ASSERT_ERR(err, "update jmp_table failure"); + +out: + bpf_link__destroy(freplace_link); + tailcall_freplace__destroy(freplace_skel); + tc_bpf2bpf__destroy(tc_skel); +} + +/* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail + * callee prog with freplace prog or fails to update an extended prog to + * prog_array map. + */ +static void test_tailcall_bpf2bpf_freplace(void) +{ + struct tailcall_freplace *freplace_skel = NULL; + struct bpf_link *freplace_link = NULL; + struct tc_bpf2bpf *tc_skel = NULL; + char buff[128] = {}; + int prog_fd, map_fd; + int err, key; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); + + tc_skel = tc_bpf2bpf__open_and_load(); + if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load")) + goto out; + + prog_fd = bpf_program__fd(tc_skel->progs.entry_tc); + freplace_skel = tailcall_freplace__open(); + if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open")) + goto out; + + err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace, + prog_fd, "subprog_tc"); + if (!ASSERT_OK(err, "set_attach_target")) + goto out; + + err = tailcall_freplace__load(freplace_skel); + if (!ASSERT_OK(err, "tailcall_freplace__load")) + goto out; + + /* OK to attach then detach freplace prog. */ + + freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, + prog_fd, "subprog_tc"); + if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) + goto out; + + err = bpf_link__destroy(freplace_link); + if (!ASSERT_OK(err, "destroy link")) + goto out; + + /* OK to update prog_array map then delete element from the map. */ + + key = 0; + map_fd = bpf_map__fd(freplace_skel->maps.jmp_table); + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + if (!ASSERT_OK(err, "update jmp_table")) + goto out; + + err = bpf_map_delete_elem(map_fd, &key); + if (!ASSERT_OK(err, "delete_elem from jmp_table")) + goto out; + + /* Fail to attach a tail callee prog with freplace prog. */ + + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + if (!ASSERT_OK(err, "update jmp_table")) + goto out; + + freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, + prog_fd, "subprog_tc"); + if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure")) + goto out; + + err = bpf_map_delete_elem(map_fd, &key); + if (!ASSERT_OK(err, "delete_elem from jmp_table")) + goto out; + + /* Fail to update an extended prog to prog_array map. */ + + freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace, + prog_fd, "subprog_tc"); + if (!ASSERT_OK_PTR(freplace_link, "attach_freplace")) + goto out; + + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); + if (!ASSERT_ERR(err, "update jmp_table failure")) + goto out; + +out: + bpf_link__destroy(freplace_link); + tailcall_freplace__destroy(freplace_skel); + tc_bpf2bpf__destroy(tc_skel); +} + void test_tailcalls(void) { if (test__start_subtest("tailcall_1")) @@ -910,4 +1062,8 @@ void test_tailcalls(void) test_tailcall_bpf2bpf_4(true); if (test__start_subtest("tailcall_bpf2bpf_6")) test_tailcall_bpf2bpf_6(); + if (test__start_subtest("tailcall_freplace")) + test_tailcall_freplace(); + if (test__start_subtest("tailcall_bpf2bpf_freplace")) + test_tailcall_bpf2bpf_freplace(); } diff --git a/tools/testing/selftests/bpf/progs/tailcall_freplace.c b/tools/testing/selftests/bpf/progs/tailcall_freplace.c new file mode 100644 index 0000000000000000000000000000000000000000..6713b809df442ceac53573de67b35ba47f8f560c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_freplace.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +int count = 0; + +SEC("freplace") +int entry_freplace(struct __sk_buff *skb) +{ + count++; + bpf_tail_call_static(skb, &jmp_table, 0); + return count; +} + +char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c new file mode 100644 index 0000000000000000000000000000000000000000..d1a57f7d09bd805837f6fb14ac8ac4fe449d7821 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "bpf_misc.h" + +__noinline +int subprog_tc(struct __sk_buff *skb) +{ + int ret = 1; + + __sink(skb); + __sink(ret); + return ret; +} + +SEC("tc") +int entry_tc(struct __sk_buff *skb) +{ + return subprog_tc(skb); +} + +char __license[] SEC("license") = "GPL";