diff --git a/drivers/android/binder.c b/drivers/android/binder.c index a4a0950b2a7cedf7254770554f3460d77a5a3855..a38959e912aef716203e9c1b21f46dd49f80d6bf 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -877,6 +877,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread, { WARN_ON(!list_empty(&thread->waiting_thread_node)); binder_enqueue_work_ilocked(work, &thread->todo); + + /* (e)poll-based threads require an explicit wakeup signal when + * queuing their own work; they rely on these events to consume + * messages without I/O block. Without it, threads risk waiting + * indefinitely without handling the work. + */ + if (thread->looper & BINDER_LOOPER_STATE_POLL && + thread->pid == current->pid && !thread->process_todo) + wake_up_interruptible_sync(&thread->wait); + thread->process_todo = true; } diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index a77ed66425f27cee5a0875baa02d8125e7fc8f5f..3c93e6c05c4d88b74424921b5273aa4310d998df 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -1002,7 +1002,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item, goto err_mmget; if (!mmap_read_trylock(mm)) goto err_mmap_read_lock_failed; - vma = binder_alloc_get_vma(alloc); + vma = find_vma(mm, page_addr); + if (vma && vma != binder_alloc_get_vma(alloc)) + goto err_invalid_vma; list_lru_isolate(lru, item); spin_unlock(lock); @@ -1028,6 +1030,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item, mutex_unlock(&alloc->mutex); return LRU_REMOVED_RETRY; +err_invalid_vma: + mmap_read_unlock(mm); err_mmap_read_lock_failed: mmput_async(mm); err_mmget: diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 7ce22d173fc7997408d44bc4b8a7283e5820c305..153150e8beb9493807f838005163624d19c6385d 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -18,6 +18,7 @@ #include "nvmet.h" #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) +#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */ /* Define the socket priority to use for connections were it is desirable * that the NIC consider performing optimized packet processing or filtering. @@ -872,7 +873,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) icresp->hdr.pdo = 0; icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); - icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */ + icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA); icresp->cpda = 0; if (queue->hdr_digest) icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; @@ -918,6 +919,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) { struct nvme_tcp_data_pdu *data = &queue->pdu.data; struct nvmet_tcp_cmd *cmd; + unsigned int plen; if (likely(queue->nr_cmds)) { if (unlikely(data->ttag >= queue->nr_cmds)) { @@ -941,7 +943,16 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) return -EPROTO; } + plen = le32_to_cpu(data->hdr.plen); cmd->pdu_len = le32_to_cpu(data->data_length); + if (unlikely(cmd->pdu_len != (plen - sizeof(*data)) || + cmd->pdu_len == 0 || + cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) { + pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len); + /* FIXME: use proper transport errors */ + nvmet_tcp_fatal_error(queue); + return -EPROTO; + } cmd->pdu_recv = 0; nvmet_tcp_map_pdu_iovec(cmd); queue->cmd = cmd; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 1b9d54d13ea8ebde0e9022e2c2e031d73aee7fea..a58dded6ce6302cf998f918038a971ce0a92f13d 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -661,9 +661,6 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error, ext4_commit_super(sb); } - if (sb_rdonly(sb) || continue_fs) - return; - /* * We force ERRORS_RO behavior when system is rebooting. Otherwise we * could panic during 'reboot -f' as the underlying device got already @@ -673,6 +670,10 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error, panic("EXT4-fs (device %s): panic forced after error\n", sb->s_id); } + + if (sb_rdonly(sb) || continue_fs) + return; + ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); /* * Make sure updated value of ->s_mount_flags will be visible before diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 98263180c0ead6295c94e6dc0bc5b4f1e6a08056..b460347a69b973832ee828acf1a667882d41545d 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -1074,7 +1074,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, } if (old_dir_entry) { - if (old_dir != new_dir && !whiteout) + if (old_dir != new_dir) f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); else diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index f44c60114379ed874fe3882634f513a99cea605b..dd50b747b671ee7ada4969e7305eec0ea0afd284 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -741,6 +741,12 @@ static int __f2fs_setxattr(struct inode *inode, int index, memcpy(pval, value, size); last->e_value_size = cpu_to_le16(size); new_hsize += newsize; + /* + * Explicitly add the null terminator. The unused xattr space + * is supposed to always be zeroed, which would make this + * unnecessary, but don't depend on that. + */ + *(u32 *)((u8 *)last + newsize) = 0; } error = write_all_xattrs(inode, new_hsize, base_addr, ipage); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2f60dffeeb36fc47ff140c12ac7dee444dc83205..f1deefc9bdcdeefa826f11a7ea8264def98112be 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6162,6 +6162,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, } switch (base_type(ptr_reg->type)) { + case PTR_TO_FLOW_KEYS: + if (known) + break; + fallthrough; case CONST_PTR_TO_MAP: /* smin_val represents the known value */ if (known && smin_val == 0 && opcode == BPF_ADD) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index e203172b9b9e7831a623e32917af93d14c6eaf00..d189e3c7a5f598c9c91a747ac409ee7e90771f25 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3873,8 +3873,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, /* GSO partial only requires that we trim off any excess that * doesn't fit into an MSS sized block, so take care of that * now. + * Cap len to not accidentally hit GSO_BY_FRAGS. */ - partial_segs = len / mss; + partial_segs = min(len, GSO_BY_FRAGS - 1U) / mss; if (partial_segs > 1) mss *= partial_segs; else diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 519656e6858227302c074061863558c56fbc00ca..3d886d409210504197b87ea5a18f5bc21981a7f3 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -696,6 +696,10 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len); if (tmpns) { + if (!tmpname) { + info = "empty profile name"; + goto fail; + } *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL); if (!*ns_name) { info = "out of memory";