diff --git a/Documentation/kernel-hacking/locking.rst b/Documentation/kernel-hacking/locking.rst index 6ed806e6061bba317d67475c67586b4eeab7387c..a6d89efede790faf30ed8f859cb20704c9efc9c5 100644 --- a/Documentation/kernel-hacking/locking.rst +++ b/Documentation/kernel-hacking/locking.rst @@ -1358,7 +1358,7 @@ Mutex API reference Futex API reference =================== -.. kernel-doc:: kernel/futex.c +.. kernel-doc:: kernel/futex/core.c :internal: Further reading diff --git a/Documentation/powerpc/associativity.rst b/Documentation/powerpc/associativity.rst index 07e7dd3d6c87e6ef9501e78340f21e601f94a852..4d01c73685619f70ec649d161eb52f0efefa0aaa 100644 --- a/Documentation/powerpc/associativity.rst +++ b/Documentation/powerpc/associativity.rst @@ -1,6 +1,6 @@ ============================ NUMA resource associativity -============================= +============================ Associativity represents the groupings of the various platform resources into domains of substantially similar mean performance relative to resources outside @@ -20,11 +20,11 @@ A value of 1 indicates the usage of Form 1 associativity. For Form 2 associativi bit 2 of byte 5 in the "ibm,architecture-vec-5" property is used. Form 0 ------ +------ Form 0 associativity supports only two NUMA distances (LOCAL and REMOTE). Form 1 ------ +------ With Form 1 a combination of ibm,associativity-reference-points, and ibm,associativity device tree properties are used to determine the NUMA distance between resource groups/domains. @@ -78,17 +78,18 @@ numa-lookup-index-table. For ex: ibm,numa-lookup-index-table = <3 0 8 40>; -ibm,numa-distace-table = <9>, /bits/ 8 < 10 20 80 - 20 10 160 - 80 160 10>; - | 0 8 40 ---|------------ - | -0 | 10 20 80 - | -8 | 20 10 160 - | -40| 80 160 10 +ibm,numa-distace-table = <9>, /bits/ 8 < 10 20 80 20 10 160 80 160 10>; + +:: + + | 0 8 40 + --|------------ + | + 0 | 10 20 80 + | + 8 | 20 10 160 + | + 40| 80 160 10 A possible "ibm,associativity" property for resources in node 0, 8 and 40 diff --git a/Documentation/powerpc/index.rst b/Documentation/powerpc/index.rst index 6ec64b0d5257447d651f7272f08d35ec9f846b5b..4663b72caab8b76b8ba443532f60d03f8d6a7f02 100644 --- a/Documentation/powerpc/index.rst +++ b/Documentation/powerpc/index.rst @@ -7,6 +7,7 @@ powerpc .. toctree:: :maxdepth: 1 + associativity booting bootwrapper cpu_families diff --git a/Documentation/translations/it_IT/kernel-hacking/locking.rst b/Documentation/translations/it_IT/kernel-hacking/locking.rst index bf1acd6204efab7fdde5e265803094180ec29b1a..192ab8e281252909c15cebd83d932306a13dd7b7 100644 --- a/Documentation/translations/it_IT/kernel-hacking/locking.rst +++ b/Documentation/translations/it_IT/kernel-hacking/locking.rst @@ -1400,7 +1400,7 @@ Riferimento per l'API dei Mutex Riferimento per l'API dei Futex =============================== -.. kernel-doc:: kernel/futex.c +.. kernel-doc:: kernel/futex/core.c :internal: Approfondimenti diff --git a/Makefile b/Makefile index 1ccd4af54a450f46ed04d9d9955ebbd1f9182c85..1a4592e1d1f4c8f640b57020f870b681b160b171 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 178 +SUBLEVEL = 179 EXTRAVERSION = NAME = Dare mighty things diff --git a/README.OpenSource b/README.OpenSource index 5fb9dd500cc3d569300a7a6f127789a0efddd3fc..71a2928bd5f5322d4c7dc71dff02c6df6c93565d 100644 --- a/README.OpenSource +++ b/README.OpenSource @@ -3,7 +3,7 @@ "Name": "linux-5.10", "License": "GPL-2.0+", "License File": "COPYING", - "Version Number": "5.10.178", + "Version Number": "5.10.179", "Owner": "liuyu82@huawei.com", "Upstream URL": "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/log/?h=linux-5.10.y", "Description": "linux kernel 5.10" diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 20ba5136ac3ddab57ff5858a2825efa41925de54..32bb26be8a9b1fbce778c9700eca0d08ef75d2e7 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -499,6 +499,8 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) u64 val; int wa_level; + if (KVM_REG_SIZE(reg->id) != sizeof(val)) + return -ENOENT; if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id))) return -EFAULT; diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c index f24cbb4a39b506ad015c15cbd3773b2502b4dc1e..892765b742bbca9ef704f0686e679fb349c5c274 100644 --- a/arch/mips/fw/lib/cmdline.c +++ b/arch/mips/fw/lib/cmdline.c @@ -53,7 +53,7 @@ char *fw_getenv(char *envname) { char *result = NULL; - if (_fw_envp != NULL) { + if (_fw_envp != NULL && fw_envp(0) != NULL) { /* * Return a pointer to the given environment variable. * YAMON uses "name", "value" pairs, while U-Boot uses diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 09fa4705ce8eb34ca53ecc5ce6f2785e62bf5e37..64afe075df0894eff2b2eeea93c099400fab261f 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -15,6 +15,8 @@ #define EMITS_PT_NOTE #endif +#define RUNTIME_DISCARD_EXIT + #include #undef mips diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index a76dd27fb2e819330d384021d2eab8ba6a5a6836..3009bb52725242a795471710abd32b6a5a80f8ef 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -500,9 +500,7 @@ long arch_ptrace(struct task_struct *child, long request, } return 0; case PTRACE_GET_LAST_BREAK: - put_user(child->thread.last_break, - (unsigned long __user *) data); - return 0; + return put_user(child->thread.last_break, (unsigned long __user *)data); case PTRACE_ENABLE_TE: if (!MACHINE_HAS_TE) return -EIO; @@ -854,9 +852,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } return 0; case PTRACE_GET_LAST_BREAK: - put_user(child->thread.last_break, - (unsigned int __user *) data); - return 0; + return put_user(child->thread.last_break, (unsigned int __user *)data); } return compat_ptrace_request(child, request, addr, data); } diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 571220ac8beaa7020cb1a98a41ffb0e79c759484..835b948095cde770c82ac9a910dd60db26571159 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -25,17 +25,7 @@ */ union fpregs_state init_fpstate __read_mostly; -/* - * Track whether the kernel is using the FPU state - * currently. - * - * This flag is used: - * - * - by IRQ context code to potentially use the FPU - * if it's unused. - * - * - to debug kernel_fpu_begin()/end() correctness - */ +/* Track in-kernel FPU usage */ static DEFINE_PER_CPU(bool, in_kernel_fpu); /* @@ -43,42 +33,37 @@ static DEFINE_PER_CPU(bool, in_kernel_fpu); */ DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); -static bool kernel_fpu_disabled(void) -{ - return this_cpu_read(in_kernel_fpu); -} - -static bool interrupted_kernel_fpu_idle(void) -{ - return !kernel_fpu_disabled(); -} - -/* - * Were we in user mode (or vm86 mode) when we were - * interrupted? - * - * Doing kernel_fpu_begin/end() is ok if we are running - * in an interrupt context from user mode - we'll just - * save the FPU state as required. - */ -static bool interrupted_user_mode(void) -{ - struct pt_regs *regs = get_irq_regs(); - return regs && user_mode(regs); -} - /* * Can we use the FPU in kernel mode with the * whole "kernel_fpu_begin/end()" sequence? - * - * It's always ok in process context (ie "not interrupt") - * but it is sometimes ok even from an irq. */ bool irq_fpu_usable(void) { - return !in_interrupt() || - interrupted_user_mode() || - interrupted_kernel_fpu_idle(); + if (WARN_ON_ONCE(in_nmi())) + return false; + + /* In kernel FPU usage already active? */ + if (this_cpu_read(in_kernel_fpu)) + return false; + + /* + * When not in NMI or hard interrupt context, FPU can be used in: + * + * - Task context except from within fpregs_lock()'ed critical + * regions. + * + * - Soft interrupt processing context which cannot happen + * while in a fpregs_lock()'ed critical region. + */ + if (!in_irq()) + return true; + + /* + * In hard interrupt context it's safe when soft interrupts + * are enabled, which means the interrupt did not hit in + * a fpregs_lock()'ed critical region. + */ + return !softirq_count(); } EXPORT_SYMBOL(irq_fpu_usable); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 16943e923902313d96b5b366a00cf321869d6a4a..9aedc7b06da7ab3dc9561df31be849ac435c9bf1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7537,6 +7537,21 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu, /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ break; + case x86_intercept_pause: + /* + * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides + * with vanilla NOPs in the emulator. Apply the interception + * check only to actual PAUSE instructions. Don't check + * PAUSE-loop-exiting, software can't expect a given PAUSE to + * exit, i.e. KVM is within its rights to allow L2 to execute + * the PAUSE. + */ + if ((info->rep_prefix != REPE_PREFIX) || + !nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING)) + return X86EMUL_CONTINUE; + + break; + /* TODO: check more intercepts... */ default: break; diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 95ea17a9d20cb723ea06b65104b2a9110dace921..ebaf329a2368856fb6de73e56ea1bcef92913932 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -64,8 +64,7 @@ CFLAGS_sha256.o += $(PURGATORY_CFLAGS) CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE) CFLAGS_string.o += $(PURGATORY_CFLAGS) -AFLAGS_REMOVE_setup-x86_$(BITS).o += -Wa,-gdwarf-2 -AFLAGS_REMOVE_entry64.o += -Wa,-gdwarf-2 +asflags-remove-y += -g -Wa,-gdwarf-2 $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) diff --git a/block/blk-core.c b/block/blk-core.c index 593ba10475cb3404baa3128b0778a6d370d20339..3c819bda92a5bcdd81f1ded42e622326517059a6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1435,6 +1435,13 @@ bool blk_update_request(struct request *req, blk_status_t error, req->q->integrity.profile->complete_fn(req, nr_bytes); #endif + /* + * Upper layers may call blk_crypto_evict_key() anytime after the last + * bio_endio(). Therefore, the keyslot must be released before that. + */ + if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req)) + __blk_crypto_rq_put_keyslot(req); + if (unlikely(error && !blk_rq_is_passthrough(req) && !(req->rq_flags & RQF_QUIET))) print_req_error(req, error, __func__); diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h index 0d36aae538d7b84e4b877c1cfd1b1e54b5953f7b..8e0834557620387b82b18549e9e10dc7c9279fda 100644 --- a/block/blk-crypto-internal.h +++ b/block/blk-crypto-internal.h @@ -60,6 +60,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq) return rq->crypt_ctx; } +static inline bool blk_crypto_rq_has_keyslot(struct request *rq) +{ + return rq->crypt_keyslot; +} + #else /* CONFIG_BLK_INLINE_ENCRYPTION */ static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, @@ -93,6 +98,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq) return false; } +static inline bool blk_crypto_rq_has_keyslot(struct request *rq) +{ + return false; +} + #endif /* CONFIG_BLK_INLINE_ENCRYPTION */ void __bio_crypt_advance(struct bio *bio, unsigned int bytes); @@ -127,14 +137,21 @@ static inline bool blk_crypto_bio_prep(struct bio **bio_ptr) return true; } -blk_status_t __blk_crypto_init_request(struct request *rq); -static inline blk_status_t blk_crypto_init_request(struct request *rq) +blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq); +static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq) { if (blk_crypto_rq_is_encrypted(rq)) - return __blk_crypto_init_request(rq); + return __blk_crypto_rq_get_keyslot(rq); return BLK_STS_OK; } +void __blk_crypto_rq_put_keyslot(struct request *rq); +static inline void blk_crypto_rq_put_keyslot(struct request *rq) +{ + if (blk_crypto_rq_has_keyslot(rq)) + __blk_crypto_rq_put_keyslot(rq); +} + void __blk_crypto_free_request(struct request *rq); static inline void blk_crypto_free_request(struct request *rq) { @@ -173,7 +190,7 @@ static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq) { if (blk_crypto_rq_is_encrypted(rq)) - return blk_crypto_init_request(rq); + return blk_crypto_rq_get_keyslot(rq); return BLK_STS_OK; } diff --git a/block/blk-crypto.c b/block/blk-crypto.c index 5ffa9aab49de09003d40e5adafdb7063eb74c55e..87ec55d4354f595c23aea00fc3ba72f5bef29037 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "blk-crypto-internal.h" @@ -216,26 +217,26 @@ static bool bio_crypt_check_alignment(struct bio *bio) return true; } -blk_status_t __blk_crypto_init_request(struct request *rq) +blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq) { return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key, &rq->crypt_keyslot); } -/** - * __blk_crypto_free_request - Uninitialize the crypto fields of a request. - * - * @rq: The request whose crypto fields to uninitialize. - * - * Completely uninitializes the crypto fields of a request. If a keyslot has - * been programmed into some inline encryption hardware, that keyslot is - * released. The rq->crypt_ctx is also freed. - */ -void __blk_crypto_free_request(struct request *rq) +void __blk_crypto_rq_put_keyslot(struct request *rq) { blk_ksm_put_slot(rq->crypt_keyslot); + rq->crypt_keyslot = NULL; +} + +void __blk_crypto_free_request(struct request *rq) +{ + /* The keyslot, if one was needed, should have been released earlier. */ + if (WARN_ON_ONCE(rq->crypt_keyslot)) + __blk_crypto_rq_put_keyslot(rq); + mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool); - blk_crypto_rq_set_defaults(rq); + rq->crypt_ctx = NULL; } /** @@ -384,28 +385,38 @@ int blk_crypto_start_using_key(const struct blk_crypto_key *key, } /** - * blk_crypto_evict_key() - Evict a key from any inline encryption hardware - * it may have been programmed into - * @q: The request queue who's associated inline encryption hardware this key - * might have been programmed into - * @key: The key to evict + * blk_crypto_evict_key() - Evict a blk_crypto_key from a request_queue + * @q: a request_queue on which I/O using the key may have been done + * @key: the key to evict * - * Upper layers (filesystems) must call this function to ensure that a key is - * evicted from any hardware that it might have been programmed into. The key - * must not be in use by any in-flight IO when this function is called. + * For a given request_queue, this function removes the given blk_crypto_key + * from the keyslot management structures and evicts it from any underlying + * hardware keyslot(s) or blk-crypto-fallback keyslot it may have been + * programmed into. * - * Return: 0 on success or if key is not present in the q's ksm, -err on error. + * Upper layers must call this before freeing the blk_crypto_key. It must be + * called for every request_queue the key may have been used on. The key must + * no longer be in use by any I/O when this function is called. + * + * Context: May sleep. */ -int blk_crypto_evict_key(struct request_queue *q, - const struct blk_crypto_key *key) +void blk_crypto_evict_key(struct request_queue *q, + const struct blk_crypto_key *key) { - if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) - return blk_ksm_evict_key(q->ksm, key); + int err; + if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) + err = blk_ksm_evict_key(q->ksm, key); + else + err = blk_crypto_fallback_evict_key(key); /* - * If the request queue's associated inline encryption hardware didn't - * have support for the key, then the key might have been programmed - * into the fallback keyslot manager, so try to evict from there. + * An error can only occur here if the key failed to be evicted from a + * keyslot (due to a hardware or driver issue) or is allegedly still in + * use by I/O (due to a kernel bug). Even in these cases, the key is + * still unlinked from the keyslot management structures, and the caller + * is allowed and expected to free it right away. There's nothing + * callers can do to handle errors, so just log them and return void. */ - return blk_crypto_fallback_evict_key(key); + if (err) + pr_warn_ratelimited("error %d evicting key\n", err); } diff --git a/block/blk-merge.c b/block/blk-merge.c index 0e0d36621d754e5deba7ea90f351715d1a5de172..ff3ce1a3ee2ac8ae12c32126a5034bc771f1526c 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -801,6 +801,8 @@ static struct request *attempt_merge(struct request_queue *q, if (!blk_discard_mergable(req)) elv_merge_requests(q, req, next); + blk_crypto_rq_put_keyslot(next); + /* * 'next' is going away, so update stats accordingly */ diff --git a/block/blk-mq.c b/block/blk-mq.c index 02ca17c575c1a276d40e482cbc535620ca09ecd3..ad47eb93b2661cbc129859b3f07e7151bb60d895 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2209,7 +2209,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) blk_mq_bio_to_request(rq, bio, nr_segs); - ret = blk_crypto_init_request(rq); + ret = blk_crypto_rq_get_keyslot(rq); if (ret != BLK_STS_OK) { bio->bi_status = ret; bio_endio(bio); diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c index 86f8195d8039e5da6e5465277a9e22a692524ac8..17a1f1ba44efc0c8be9d11ebaa08c155c665c8ab 100644 --- a/block/keyslot-manager.c +++ b/block/keyslot-manager.c @@ -305,44 +305,43 @@ bool blk_ksm_crypto_cfg_supported(struct blk_keyslot_manager *ksm, return true; } -/** - * blk_ksm_evict_key() - Evict a key from the lower layer device. - * @ksm: The keyslot manager to evict from - * @key: The key to evict - * - * Find the keyslot that the specified key was programmed into, and evict that - * slot from the lower layer device. The slot must not be in use by any - * in-flight IO when this function is called. - * - * Context: Process context. Takes and releases ksm->lock. - * Return: 0 on success or if there's no keyslot with the specified key, -EBUSY - * if the keyslot is still in use, or another -errno value on other - * error. +/* + * This is an internal function that evicts a key from an inline encryption + * device that can be either a real device or the blk-crypto-fallback "device". + * It is used only by blk_crypto_evict_key(); see that function for details. */ int blk_ksm_evict_key(struct blk_keyslot_manager *ksm, const struct blk_crypto_key *key) { struct blk_ksm_keyslot *slot; - int err = 0; + int err; blk_ksm_hw_enter(ksm); slot = blk_ksm_find_keyslot(ksm, key); - if (!slot) - goto out_unlock; + if (!slot) { + /* + * Not an error, since a key not in use by I/O is not guaranteed + * to be in a keyslot. There can be more keys than keyslots. + */ + err = 0; + goto out; + } if (WARN_ON_ONCE(atomic_read(&slot->slot_refs) != 0)) { + /* BUG: key is still in use by I/O */ err = -EBUSY; - goto out_unlock; + goto out_remove; } err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, blk_ksm_get_slot_idx(slot)); - if (err) - goto out_unlock; - +out_remove: + /* + * Callers free the key even on error, so unlink the key from the hash + * table and clear slot->key even on error. + */ hlist_del(&slot->hash_node); slot->key = NULL; - err = 0; -out_unlock: +out: blk_ksm_hw_exit(ksm); return err; } diff --git a/crypto/algapi.c b/crypto/algapi.c index 9de27daa98b47b7c8503c5615dd5f4c2fea9fb60..42dca17dc2d97cd6be9c2cc198ff9583035bd629 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -456,7 +456,9 @@ void crypto_unregister_alg(struct crypto_alg *alg) if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name)) return; - BUG_ON(refcount_read(&alg->cra_refcnt) != 1); + if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1)) + return; + if (alg->cra_destroy) alg->cra_destroy(alg); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index cf922b88bc90a7372927e17b35300922e5a29a19..298daad1f86d06bd6c71c4593e17d7246fc9734f 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -527,7 +527,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = { bool cpu_is_hotpluggable(unsigned cpu) { struct device *dev = get_cpu_device(cpu); - return dev && container_of(dev, struct cpu, dev)->hotpluggable; + return dev && container_of(dev, struct cpu, dev)->hotpluggable + && tick_nohz_cpu_hotpluggable(cpu); } EXPORT_SYMBOL_GPL(cpu_is_hotpluggable); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 497e3d4255c41acd904966d34da76fc658631c19..503c01d4015dcb5af74d78bb8677ecb622fe0099 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -677,7 +677,12 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv) calltime = ktime_get(); ret = really_probe(dev, drv); rettime = ktime_get(); - pr_debug("probe of %s returned %d after %lld usecs\n", + /* + * Don't change this to pr_debug() because that requires + * CONFIG_DYNAMIC_DEBUG and we want a simple 'initcall_debug' on the + * kernel commandline to print this all the time at the debug level. + */ + printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n", dev_name(dev), ret, ktime_us_delta(rettime, calltime)); return ret; } diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 20dc2452815c70ff45c7fa0d6ac6e75b20768fad..a3745fa643f3b206a60c2d8f4a446faee61c7125 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -564,8 +564,10 @@ static void retry_timeout(struct timer_list *t) if (waiting) start_get(ssif_info); - if (resend) + if (resend) { start_resend(ssif_info); + ssif_inc_stat(ssif_info, send_retries); + } } static void watch_timeout(struct timer_list *t) @@ -792,9 +794,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_GET_MSG_FLAGS_CMD) { /* - * Don't abort here, maybe it was a queued - * response to a previous command. + * Recv error response, give up. */ + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); dev_warn(&ssif_info->client->dev, "Invalid response getting flags: %x %x\n", diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c index 21bb2bb767a1e2df61844f29e7012253eef2c595..89c9cb850a34cb1c699a83d76475b1830fd52722 100644 --- a/drivers/counter/104-quad-8.c +++ b/drivers/counter/104-quad-8.c @@ -62,10 +62,6 @@ struct quad8_iio { #define QUAD8_REG_CHAN_OP 0x11 #define QUAD8_REG_INDEX_INPUT_LEVELS 0x16 #define QUAD8_DIFF_ENCODER_CABLE_STATUS 0x17 -/* Borrow Toggle flip-flop */ -#define QUAD8_FLAG_BT BIT(0) -/* Carry Toggle flip-flop */ -#define QUAD8_FLAG_CT BIT(1) /* Error flag */ #define QUAD8_FLAG_E BIT(4) /* Up/Down flag */ @@ -104,9 +100,6 @@ static int quad8_read_raw(struct iio_dev *indio_dev, { struct quad8_iio *const priv = iio_priv(indio_dev); const int base_offset = priv->base + 2 * chan->channel; - unsigned int flags; - unsigned int borrow; - unsigned int carry; int i; switch (mask) { @@ -117,12 +110,7 @@ static int quad8_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; } - flags = inb(base_offset + 1); - borrow = flags & QUAD8_FLAG_BT; - carry = !!(flags & QUAD8_FLAG_CT); - - /* Borrow XOR Carry effectively doubles count range */ - *val = (borrow ^ carry) << 24; + *val = 0; mutex_lock(&priv->lock); @@ -643,17 +631,9 @@ static int quad8_count_read(struct counter_device *counter, { struct quad8_iio *const priv = counter->priv; const int base_offset = priv->base + 2 * count->id; - unsigned int flags; - unsigned int borrow; - unsigned int carry; int i; - flags = inb(base_offset + 1); - borrow = flags & QUAD8_FLAG_BT; - carry = !!(flags & QUAD8_FLAG_CT); - - /* Borrow XOR Carry effectively doubles count range */ - *val = (unsigned long)(borrow ^ carry) << 24; + *val = 0; mutex_lock(&priv->lock); @@ -1198,8 +1178,8 @@ static ssize_t quad8_count_ceiling_read(struct counter_device *counter, mutex_unlock(&priv->lock); - /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */ - return sprintf(buf, "33554431\n"); + /* By default 0xFFFFFF (24 bits unsigned) is maximum count */ + return sprintf(buf, "16777215\n"); } static ssize_t quad8_count_ceiling_write(struct counter_device *counter, diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index fbcf52e46d1794c9922c792daca14c2e51684611..7de9b9d20de02f662a49d8538a5ce99bcf76a1b0 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1634,19 +1634,23 @@ static int safexcel_probe_generic(void *pdev, &priv->ring[i].rdr); if (ret) { dev_err(dev, "Failed to initialize rings\n"); - return ret; + goto err_cleanup_rings; } priv->ring[i].rdr_req = devm_kcalloc(dev, EIP197_DEFAULT_RING_SIZE, sizeof(*priv->ring[i].rdr_req), GFP_KERNEL); - if (!priv->ring[i].rdr_req) - return -ENOMEM; + if (!priv->ring[i].rdr_req) { + ret = -ENOMEM; + goto err_cleanup_rings; + } ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); - if (!ring_irq) - return -ENOMEM; + if (!ring_irq) { + ret = -ENOMEM; + goto err_cleanup_rings; + } ring_irq->priv = priv; ring_irq->ring = i; @@ -1660,7 +1664,8 @@ static int safexcel_probe_generic(void *pdev, ring_irq); if (irq < 0) { dev_err(dev, "Failed to get IRQ ID for ring %d\n", i); - return irq; + ret = irq; + goto err_cleanup_rings; } priv->ring[i].irq = irq; @@ -1672,8 +1677,10 @@ static int safexcel_probe_generic(void *pdev, snprintf(wq_name, 9, "wq_ring%d", i); priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); - if (!priv->ring[i].workqueue) - return -ENOMEM; + if (!priv->ring[i].workqueue) { + ret = -ENOMEM; + goto err_cleanup_rings; + } priv->ring[i].requests = 0; priv->ring[i].busy = false; @@ -1690,16 +1697,26 @@ static int safexcel_probe_generic(void *pdev, ret = safexcel_hw_init(priv); if (ret) { dev_err(dev, "HW init failed (%d)\n", ret); - return ret; + goto err_cleanup_rings; } ret = safexcel_register_algorithms(priv); if (ret) { dev_err(dev, "Failed to register algorithms (%d)\n", ret); - return ret; + goto err_cleanup_rings; } return 0; + +err_cleanup_rings: + for (i = 0; i < priv->config.rings; i++) { + if (priv->ring[i].irq) + irq_set_affinity_hint(priv->ring[i].irq, NULL); + if (priv->ring[i].workqueue) + destroy_workqueue(priv->ring[i].workqueue); + } + + return ret; } static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index ac5d61e65124ea345c0e034e832db456a1d0a788..04f2ec2254e9f0074a0945a1a2a03d4839dc66fe 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1299,6 +1299,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, return -EINVAL; } + var->xres_virtual = fb->width; + var->yres_virtual = fb->height; + /* * Workaround for SDL 1.2, which is known to be setting all pixel format * fields values to zero in some cases. We treat this situation as a diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 6b84822e7d93b0c96084b8eaf19a18d952ab6677..22e314725def0b9499d90f935b4cb88b3397a545 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -1515,9 +1515,9 @@ static int adt7475_set_pwm_polarity(struct i2c_client *client) int ret, i; u8 val; - ret = of_property_read_u32_array(client->dev.of_node, - "adi,pwm-active-state", states, - ARRAY_SIZE(states)); + ret = device_property_read_u32_array(&client->dev, + "adi,pwm-active-state", states, + ARRAY_SIZE(states)); if (ret) return ret; diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 3bc2551577a30f5b432b8d63f66cb607f288f2dc..74a67089ad0737d40bef4f858bae8ae92247f28b 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -74,6 +74,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); #define ZEN_CUR_TEMP_SHIFT 21 #define ZEN_CUR_TEMP_RANGE_SEL_MASK BIT(19) +#define ZEN_CUR_TEMP_TJ_SEL_MASK GENMASK(17, 16) #define ZEN_SVI_BASE 0x0005A000 @@ -173,7 +174,8 @@ static long get_raw_temp(struct k10temp_data *data) data->read_tempreg(data->pdev, ®val); temp = (regval >> ZEN_CUR_TEMP_SHIFT) * 125; - if (regval & data->temp_adjust_mask) + if ((regval & data->temp_adjust_mask) || + (regval & ZEN_CUR_TEMP_TJ_SEL_MASK) == ZEN_CUR_TEMP_TJ_SEL_MASK) temp -= 49000; return temp; } diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 250b78ee162519d7a13c53046e1400c451e4e31a..b806c1ab9b618ca623fd1d02ae888f3ec6b164b1 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -1002,7 +1002,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio, trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name, indio->id, trigger_name); if (!trig) - return NULL; + return ERR_PTR(-ENOMEM); trig->dev.parent = indio->dev.parent; iio_trigger_set_drvdata(trig, indio); diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c index f4756671cddb669ea1c94be421bd3cb77d0391a6..6ed0d151ad21a7f138d3ffd9a712367089a30416 100644 --- a/drivers/iio/adc/palmas_gpadc.c +++ b/drivers/iio/adc/palmas_gpadc.c @@ -628,7 +628,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev) static int palmas_gpadc_remove(struct platform_device *pdev) { - struct iio_dev *indio_dev = dev_to_iio_dev(&pdev->dev); + struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev); struct palmas_gpadc *adc = iio_priv(indio_dev); if (adc->wakeup1_enable || adc->wakeup2_enable) diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c index d79205361dfac52cd4405e29e1d77bcea631972d..ff33ad3714206ccd8a1805030579fa0a2cd802b1 100644 --- a/drivers/iio/light/tsl2772.c +++ b/drivers/iio/light/tsl2772.c @@ -606,6 +606,7 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip) return -EINVAL; } } + chip->settings.prox_diode = prox_diode_mask; return 0; } diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 65c0081838e3ddc326fd3ee6beb74f8393a2381c..9dcdf21c50bdcd09dc0f610b4414e984f6d8350b 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -601,6 +601,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { }, .driver_data = (void *)(SERIO_QUIRK_NOMUX) }, + { + /* Fujitsu Lifebook A574/H */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "FMVA0501PZ"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) + }, { /* Gigabyte M912 */ .matches = { diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index 12bc3f5a6cbbd594c56013526b973659d0defe9a..1c7a9dcfed658975a4fd0e4c86ab8c20d26adc39 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -412,6 +412,7 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host) return card; err_out: host->card = old_card; + kfree_const(card->dev.kobj.name); kfree(card); return NULL; } @@ -470,8 +471,10 @@ static void memstick_check(struct work_struct *work) put_device(&card->dev); host->card = NULL; } - } else + } else { + kfree_const(card->dev.kobj.name); kfree(card); + } } out_power_off: diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index bf2592774165bf37c91d52b974b1c0602cfe8e6d..8e52905458f9cd649a03e500af2e8d0e3f8e7b28 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -351,8 +351,6 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg) */ case MMC_TIMING_SD_HS: case MMC_TIMING_MMC_HS: - case MMC_TIMING_UHS_SDR12: - case MMC_TIMING_UHS_SDR25: val &= ~SDHCI_CTRL_HISPD; } } diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index c628d0980c0b1ffda5cb1f1dfb94a0797f533ef6..1d52cb3e46d523791a9f8ff7b119bb932d26d3af 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -215,6 +215,18 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg, return 0; } +static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg, + u16 *value) +{ + return -EIO; +} + +static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg, + u16 value) +{ + return -EIO; +} + static const struct b53_io_ops b53_mmap_ops = { .read8 = b53_mmap_read8, .read16 = b53_mmap_read16, @@ -226,6 +238,8 @@ static const struct b53_io_ops b53_mmap_ops = { .write32 = b53_mmap_write32, .write48 = b53_mmap_write48, .write64 = b53_mmap_write64, + .phy_read16 = b53_mmap_phy_read16, + .phy_write16 = b53_mmap_phy_write16, }; static int b53_mmap_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index ae0c9aaab48db9846dc27e3c81f0244734377775..b700663a634d24f0f778fccc03b24cda392326f2 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5294,31 +5294,6 @@ static void e1000_watchdog_task(struct work_struct *work) ew32(TARC(0), tarc0); } - /* disable TSO for pcie and 10/100 speeds, to avoid - * some hardware issues - */ - if (!(adapter->flags & FLAG_TSO_FORCE)) { - switch (adapter->link_speed) { - case SPEED_10: - case SPEED_100: - e_info("10/100 speed: disabling TSO\n"); - netdev->features &= ~NETIF_F_TSO; - netdev->features &= ~NETIF_F_TSO6; - break; - case SPEED_1000: - netdev->features |= NETIF_F_TSO; - netdev->features |= NETIF_F_TSO6; - break; - default: - /* oops */ - break; - } - if (hw->mac.type == e1000_pch_spt) { - netdev->features &= ~NETIF_F_TSO; - netdev->features &= ~NETIF_F_TSO6; - } - } - /* enable transmits in the hardware, need to do this * after setting TARC(0) */ @@ -7477,6 +7452,32 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_RXCSUM | NETIF_F_HW_CSUM); + /* disable TSO for pcie and 10/100 speeds to avoid + * some hardware issues and for i219 to fix transfer + * speed being capped at 60% + */ + if (!(adapter->flags & FLAG_TSO_FORCE)) { + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + e_info("10/100 speed: disabling TSO\n"); + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + break; + case SPEED_1000: + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + break; + default: + /* oops */ + break; + } + if (hw->mac.type == e1000_pch_spt) { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } + } + /* Set user-changeable features (subset of all device features) */ netdev->hw_features = netdev->features; netdev->hw_features |= NETIF_F_RXFCS; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 76481ff7074ba545c237128ee02f7409e4bc2271..d23a467d0d2092a5a411e7029e98e2c66fde282c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10448,8 +10448,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) pf->hw.aq.asq_last_status)); } /* reinit the misc interrupt */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { ret = i40e_setup_misc_vector(pf); + if (ret) + goto end_unlock; + } /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out @@ -13458,15 +13461,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) vsi->id = ctxt.vsi_number; } - vsi->active_filters = 0; - clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); spin_lock_bh(&vsi->mac_filter_hash_lock); + vsi->active_filters = 0; /* If macvlan filters already exist, force them to get loaded */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { f->state = I40E_FILTER_NEW; f_count++; } spin_unlock_bh(&vsi->mac_filter_hash_lock); + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c index 017d68f1e1232c394aa4b2ac209300256fbbab51..972c571b41587a14518b71d8bacce2172f82a081 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c @@ -31,6 +31,8 @@ mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file, if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) { multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv); + if (!multi) + return NULL; tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len)); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index a2c1fbd3e0d13741958411a11e58390dde7a28e1..0225c8f1e5ea208e196ca681e0e14d93a4b2b4fe 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -26,7 +26,7 @@ #define MLXSW_PCI_CIR_TIMEOUT_MSECS 1000 #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000 -#define MLXSW_PCI_SW_RESET_WAIT_MSECS 200 +#define MLXSW_PCI_SW_RESET_WAIT_MSECS 400 #define MLXSW_PCI_FW_READY 0xA1844 #define MLXSW_PCI_FW_READY_MASK 0xFFFF #define MLXSW_PCI_FW_READY_MAGIC 0x5E diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index 63a44ee763be7994e5a477cdcfd29fc9928c2b1c..b9429e8faba1e1eaf75b5e0d93fd838fc82c27d6 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -96,6 +96,8 @@ static int ef100_net_stop(struct net_device *net_dev) efx_mcdi_free_vis(efx); efx_remove_interrupts(efx); + efx->state = STATE_NET_DOWN; + return 0; } @@ -172,6 +174,8 @@ static int ef100_net_open(struct net_device *net_dev) efx_link_status_changed(efx); mutex_unlock(&efx->mac_lock); + efx->state = STATE_NET_UP; + return 0; fail: @@ -272,7 +276,7 @@ int ef100_register_netdev(struct efx_nic *efx) /* Always start with carrier off; PHY events will detect the link */ netif_carrier_off(net_dev); - efx->state = STATE_READY; + efx->state = STATE_NET_DOWN; rtnl_unlock(); efx_init_mcdi_logging(efx); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index c069659c9e2d0a0e75dfed11a5548529f981fb0b..7cf52fcdb3078c2418da9326cc96c42defb6e168 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -105,14 +105,6 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp); static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, u32 flags); -#define EFX_ASSERT_RESET_SERIALISED(efx) \ - do { \ - if ((efx->state == STATE_READY) || \ - (efx->state == STATE_RECOVERY) || \ - (efx->state == STATE_DISABLED)) \ - ASSERT_RTNL(); \ - } while (0) - /************************************************************************** * * Port handling @@ -377,6 +369,8 @@ static int efx_probe_all(struct efx_nic *efx) if (rc) goto fail5; + efx->state = STATE_NET_DOWN; + return 0; fail5: @@ -543,7 +537,9 @@ int efx_net_open(struct net_device *net_dev) efx_start_all(efx); if (efx->state == STATE_DISABLED || efx->reset_pending) netif_device_detach(efx->net_dev); - efx_selftest_async_start(efx); + else + efx->state = STATE_NET_UP; + return 0; } @@ -721,8 +717,6 @@ static int efx_register_netdev(struct efx_nic *efx) * already requested. If so, the NIC is probably hosed so we * abort. */ - efx->state = STATE_READY; - smp_mb(); /* ensure we change state before checking reset_pending */ if (efx->reset_pending) { netif_err(efx, probe, efx->net_dev, "aborting probe due to scheduled reset\n"); @@ -750,6 +744,8 @@ static int efx_register_netdev(struct efx_nic *efx) efx_associate(efx); + efx->state = STATE_NET_DOWN; + rtnl_unlock(); rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); @@ -851,7 +847,7 @@ static void efx_pci_remove_main(struct efx_nic *efx) /* Flush reset_work. It can no longer be scheduled since we * are not READY. */ - BUG_ON(efx->state == STATE_READY); + WARN_ON(efx_net_active(efx->state)); efx_flush_reset_workqueue(efx); efx_disable_interrupts(efx); @@ -1196,13 +1192,13 @@ static int efx_pm_freeze(struct device *dev) rtnl_lock(); - if (efx->state != STATE_DISABLED) { - efx->state = STATE_UNINIT; - + if (efx_net_active(efx->state)) { efx_device_detach_sync(efx); efx_stop_all(efx); efx_disable_interrupts(efx); + + efx->state = efx_freeze(efx->state); } rtnl_unlock(); @@ -1217,7 +1213,7 @@ static int efx_pm_thaw(struct device *dev) rtnl_lock(); - if (efx->state != STATE_DISABLED) { + if (efx_frozen(efx->state)) { rc = efx_enable_interrupts(efx); if (rc) goto fail; @@ -1230,7 +1226,7 @@ static int efx_pm_thaw(struct device *dev) efx_device_attach_if_not_resetting(efx); - efx->state = STATE_READY; + efx->state = efx_thaw(efx->state); efx->type->resume_wol(efx); } diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index de797e1ac5a98cae9ac45f79713aafe01a8153f3..476ef1c9763758a8a92d24c7708103d6620b1855 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -542,6 +542,8 @@ void efx_start_all(struct efx_nic *efx) /* Start the hardware monitor if there is one */ efx_start_monitor(efx); + efx_selftest_async_start(efx); + /* Link state detection is normally event-driven; we have * to poll now because we could have missed a change */ @@ -897,7 +899,7 @@ static void efx_reset_work(struct work_struct *data) * have changed by now. Now that we have the RTNL lock, * it cannot change again. */ - if (efx->state == STATE_READY) + if (efx_net_active(efx->state)) (void)efx_reset(efx, method); rtnl_unlock(); @@ -907,7 +909,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) { enum reset_type method; - if (efx->state == STATE_RECOVERY) { + if (efx_recovering(efx->state)) { netif_dbg(efx, drv, efx->net_dev, "recovering: skip scheduling %s reset\n", RESET_TYPE(type)); @@ -942,7 +944,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) /* If we're not READY then just leave the flags set as the cue * to abort probing or reschedule the reset later. */ - if (READ_ONCE(efx->state) != STATE_READY) + if (!efx_net_active(READ_ONCE(efx->state))) return; /* efx_process_channel() will no longer read events once a @@ -1214,7 +1216,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, rtnl_lock(); if (efx->state != STATE_DISABLED) { - efx->state = STATE_RECOVERY; + efx->state = efx_recover(efx->state); efx->reset_pending = 0; efx_device_detach_sync(efx); @@ -1268,7 +1270,7 @@ static void efx_io_resume(struct pci_dev *pdev) netif_err(efx, hw, efx->net_dev, "efx_reset failed after PCI error (%d)\n", rc); } else { - efx->state = STATE_READY; + efx->state = efx_recovered(efx->state); netif_dbg(efx, hw, efx->net_dev, "Done resetting and resuming IO after PCI error.\n"); } diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h index 65513fd0cf6c469fe25aca8260924298e320956e..c72e819da8fd358d5c3cd495e2edf62e18d600a6 100644 --- a/drivers/net/ethernet/sfc/efx_common.h +++ b/drivers/net/ethernet/sfc/efx_common.h @@ -45,9 +45,7 @@ int efx_reconfigure_port(struct efx_nic *efx); #define EFX_ASSERT_RESET_SERIALISED(efx) \ do { \ - if ((efx->state == STATE_READY) || \ - (efx->state == STATE_RECOVERY) || \ - (efx->state == STATE_DISABLED)) \ + if (efx->state != STATE_UNINIT) \ ASSERT_RTNL(); \ } while (0) @@ -64,7 +62,7 @@ void efx_port_dummy_op_void(struct efx_nic *efx); static inline int efx_check_disabled(struct efx_nic *efx) { - if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { + if (efx->state == STATE_DISABLED || efx_recovering(efx->state)) { netif_err(efx, drv, efx->net_dev, "device is disabled due to earlier errors\n"); return -EIO; diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index bd552c7dffcb1b2c30a92f7c15d1e985129472e1..3846b76b89720d07b706bbf2626866a141bb822d 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -137,7 +137,7 @@ void efx_ethtool_self_test(struct net_device *net_dev, if (!efx_tests) goto fail; - if (efx->state != STATE_READY) { + if (!efx_net_active(efx->state)) { rc = -EBUSY; goto out; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 8aecb4bd2c0d595e944e8143e1e3d2bd11220646..39f97929b3ffe0e74713f9a499d096f55404fd8f 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -627,12 +627,54 @@ enum efx_int_mode { #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) enum nic_state { - STATE_UNINIT = 0, /* device being probed/removed or is frozen */ - STATE_READY = 1, /* hardware ready and netdev registered */ - STATE_DISABLED = 2, /* device disabled due to hardware errors */ - STATE_RECOVERY = 3, /* device recovering from PCI error */ + STATE_UNINIT = 0, /* device being probed/removed */ + STATE_NET_DOWN, /* hardware probed and netdev registered */ + STATE_NET_UP, /* ready for traffic */ + STATE_DISABLED, /* device disabled due to hardware errors */ + + STATE_RECOVERY = 0x100,/* recovering from PCI error */ + STATE_FROZEN = 0x200, /* frozen by power management */ }; +static inline bool efx_net_active(enum nic_state state) +{ + return state == STATE_NET_DOWN || state == STATE_NET_UP; +} + +static inline bool efx_frozen(enum nic_state state) +{ + return state & STATE_FROZEN; +} + +static inline bool efx_recovering(enum nic_state state) +{ + return state & STATE_RECOVERY; +} + +static inline enum nic_state efx_freeze(enum nic_state state) +{ + WARN_ON(!efx_net_active(state)); + return state | STATE_FROZEN; +} + +static inline enum nic_state efx_thaw(enum nic_state state) +{ + WARN_ON(!efx_frozen(state)); + return state & ~STATE_FROZEN; +} + +static inline enum nic_state efx_recover(enum nic_state state) +{ + WARN_ON(!efx_net_active(state)); + return state | STATE_RECOVERY; +} + +static inline enum nic_state efx_recovered(enum nic_state state) +{ + WARN_ON(!efx_recovering(state)); + return state & ~STATE_RECOVERY; +} + /* Forward declaration */ struct efx_nic; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d533211161366a043af8902f2d853e53dcd36643..47c9118cc92a39d5a40dc3a11c509be34c29b052 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -646,8 +646,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, int page_off, unsigned int *len) { - struct page *page = alloc_page(GFP_ATOMIC); + int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + struct page *page; + if (page_off + *len + tailroom > PAGE_SIZE) + return NULL; + + page = alloc_page(GFP_ATOMIC); if (!page) return NULL; @@ -655,7 +660,6 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, page_off += *len; while (--*num_buf) { - int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); unsigned int buflen; void *buf; int off; diff --git a/drivers/net/wireguard/timers.c b/drivers/net/wireguard/timers.c index d54d32ac9bc41531e1b5a71c55bb989fb6d15c45..91f5d6d2d4e2d43a1cc16cc5ccf4612b4ceb24b1 100644 --- a/drivers/net/wireguard/timers.c +++ b/drivers/net/wireguard/timers.c @@ -46,7 +46,7 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer) if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) { pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n", peer->device->dev->name, peer->internal_id, - &peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2); + &peer->endpoint.addr, (int)MAX_TIMER_HANDSHAKES + 2); del_timer(&peer->timer_send_keepalive); /* We drop all packets without a keypair and don't try again, @@ -64,7 +64,7 @@ static void wg_expired_retransmit_handshake(struct timer_list *timer) ++peer->timer_handshake_attempts; pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n", peer->device->dev->name, peer->internal_id, - &peer->endpoint.addr, REKEY_TIMEOUT, + &peer->endpoint.addr, (int)REKEY_TIMEOUT, peer->timer_handshake_attempts + 1); /* We clear the endpoint address src address, in case this is @@ -94,7 +94,7 @@ static void wg_expired_new_handshake(struct timer_list *timer) pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n", peer->device->dev->name, peer->internal_id, - &peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT); + &peer->endpoint.addr, (int)(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT)); /* We clear the endpoint address src address, in case this is the cause * of trouble. */ @@ -126,7 +126,7 @@ static void wg_queued_expired_zero_key_material(struct work_struct *work) pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n", peer->device->dev->name, peer->internal_id, - &peer->endpoint.addr, REJECT_AFTER_TIME * 3); + &peer->endpoint.addr, (int)REJECT_AFTER_TIME * 3); wg_noise_handshake_clear(&peer->handshake); wg_noise_keypairs_clear(&peer->keypairs); wg_peer_put(peer); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 98e8b461bf30a67f2c9f7dbf18e470fca91b2081..1c366ddf62bc50b2532074ba67c54483e69dfb45 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1001,10 +1001,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, /* No crossing a page as the payload mustn't fragment. */ if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) { - netdev_err(queue->vif->dev, - "txreq.offset: %u, size: %u, end: %lu\n", - txreq.offset, txreq.size, - (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size); + netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n", + txreq.offset, txreq.size); xenvif_fatal_tx_err(queue->vif); break; } diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 22c5116b41be46e8bc78608150b80d5460c877f0..167bd7ddd0efe1ce660ff27d432e0c36bc08914f 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1535,22 +1535,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, if (ret) goto err_init_connect; - queue->rd_enabled = true; set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); - nvme_tcp_init_recv_ctx(queue); - - write_lock_bh(&queue->sock->sk->sk_callback_lock); - queue->sock->sk->sk_user_data = queue; - queue->state_change = queue->sock->sk->sk_state_change; - queue->data_ready = queue->sock->sk->sk_data_ready; - queue->write_space = queue->sock->sk->sk_write_space; - queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; - queue->sock->sk->sk_state_change = nvme_tcp_state_change; - queue->sock->sk->sk_write_space = nvme_tcp_write_space; -#ifdef CONFIG_NET_RX_BUSY_POLL - queue->sock->sk->sk_ll_usec = 1; -#endif - write_unlock_bh(&queue->sock->sk->sk_callback_lock); return 0; @@ -1569,7 +1554,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, return ret; } -static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue) +static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue) { struct socket *sock = queue->sock; @@ -1584,7 +1569,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue) static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) { kernel_sock_shutdown(queue->sock, SHUT_RDWR); - nvme_tcp_restore_sock_calls(queue); + nvme_tcp_restore_sock_ops(queue); cancel_work_sync(&queue->io_work); } @@ -1599,21 +1584,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) mutex_unlock(&queue->queue_lock); } +static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) +{ + write_lock_bh(&queue->sock->sk->sk_callback_lock); + queue->sock->sk->sk_user_data = queue; + queue->state_change = queue->sock->sk->sk_state_change; + queue->data_ready = queue->sock->sk->sk_data_ready; + queue->write_space = queue->sock->sk->sk_write_space; + queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; + queue->sock->sk->sk_state_change = nvme_tcp_state_change; + queue->sock->sk->sk_write_space = nvme_tcp_write_space; +#ifdef CONFIG_NET_RX_BUSY_POLL + queue->sock->sk->sk_ll_usec = 1; +#endif + write_unlock_bh(&queue->sock->sk->sk_callback_lock); +} + static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); + struct nvme_tcp_queue *queue = &ctrl->queues[idx]; int ret; + queue->rd_enabled = true; + nvme_tcp_init_recv_ctx(queue); + nvme_tcp_setup_sock_ops(queue); + if (idx) ret = nvmf_connect_io_queue(nctrl, idx, false); else ret = nvmf_connect_admin_queue(nctrl); if (!ret) { - set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); + set_bit(NVME_TCP_Q_LIVE, &queue->flags); } else { - if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags)) - __nvme_tcp_stop_queue(&ctrl->queues[idx]); + if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) + __nvme_tcp_stop_queue(queue); dev_err(nctrl->device, "failed to connect queue: %d ret=%d\n", idx, ret); } diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 5fbd80908a99a5d1efa9632dbc8792e129bfb223..c68e14271c02c95bfdc561e87a1c90cec4bec6e7 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -1210,11 +1210,9 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) val |= BIT(4); writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); - if (IS_ENABLED(CONFIG_PCI_MSI)) { - val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); - val |= BIT(31); - writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); - } + val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); return 0; err_disable_clocks: diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index d17f3bf36f70913f7f12713389aa4faab324105f..ad12515a4a121fe08a1d84f8c03d67676a207c2a 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c @@ -63,7 +63,14 @@ int pciehp_configure_device(struct controller *ctrl) pci_assign_unassigned_bridge_resources(bridge); pcie_bus_configure_settings(parent); + + /* + * Release reset_lock during driver binding + * to avoid AB-BA deadlock with device_lock. + */ + up_read(&ctrl->reset_lock); pci_bus_add_devices(parent); + down_read_nested(&ctrl->reset_lock, ctrl->depth); out: pci_unlock_rescan_remove(); @@ -104,7 +111,15 @@ void pciehp_unconfigure_device(struct controller *ctrl, bool presence) list_for_each_entry_safe_reverse(dev, temp, &parent->devices, bus_list) { pci_dev_get(dev); + + /* + * Release reset_lock during driver unbinding + * to avoid AB-BA deadlock with device_lock. + */ + up_read(&ctrl->reset_lock); pci_stop_and_remove_bus_device(dev); + down_read_nested(&ctrl->reset_lock, ctrl->depth); + /* * Ensure that no new Requests will be generated from * the device. diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c index ad205fdad3722d46193c3a6a5099cd2878cf127a..286e9b119ee5b4379d8e9c59d7ce5d3ecebb3b5d 100644 --- a/drivers/pwm/pwm-hibvt.c +++ b/drivers/pwm/pwm-hibvt.c @@ -146,6 +146,7 @@ static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm)); state->enabled = (PWM_ENABLE_MASK & value); + state->polarity = (PWM_POLARITY_MASK & value) ? PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL; } static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c index 3e967a12458c6c8404d8e0b073dbd96704a69eb2..a2aef006cb71e790d6c66295cf77c7be03497235 100644 --- a/drivers/pwm/pwm-iqs620a.c +++ b/drivers/pwm/pwm-iqs620a.c @@ -132,6 +132,7 @@ static void iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, mutex_unlock(&iqs620_pwm->lock); state->period = IQS620_PWM_PERIOD_NS; + state->polarity = PWM_POLARITY_NORMAL; } static int iqs620_pwm_notifier(struct notifier_block *notifier, diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index bd0d7336b8983beefc9c3cebe0e601dd6955ed31..0283163ddbe8ebe3d98d3ea94f0f137562d51fd1 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -168,6 +168,12 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm, duty = state->duty_cycle; period = state->period; + /* + * Note this is wrong. The result is an output wave that isn't really + * inverted and so is wrongly identified by .get_state as normal. + * Fixing this needs some care however as some machines might rely on + * this. + */ if (state->polarity == PWM_POLARITY_INVERSED) duty = period - duty; @@ -366,6 +372,7 @@ static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, state->period = 0; state->duty_cycle = 0; } + state->polarity = PWM_POLARITY_NORMAL; } static const struct pwm_ops meson_pwm_ops = { @@ -417,7 +424,7 @@ static const struct meson_pwm_data pwm_axg_ee_data = { }; static const char * const pwm_axg_ao_parent_names[] = { - "aoclk81", "xtal", "fclk_div4", "fclk_div5" + "xtal", "axg_ao_clk81", "fclk_div4", "fclk_div5" }; static const struct meson_pwm_data pwm_axg_ao_data = { @@ -426,7 +433,7 @@ static const struct meson_pwm_data pwm_axg_ao_data = { }; static const char * const pwm_g12a_ao_ab_parent_names[] = { - "xtal", "aoclk81", "fclk_div4", "fclk_div5" + "xtal", "g12a_ao_clk81", "fclk_div4", "fclk_div5" }; static const struct meson_pwm_data pwm_g12a_ao_ab_data = { @@ -435,7 +442,7 @@ static const struct meson_pwm_data pwm_g12a_ao_ab_data = { }; static const char * const pwm_g12a_ao_cd_parent_names[] = { - "xtal", "aoclk81", + "xtal", "g12a_ao_clk81", }; static const struct meson_pwm_data pwm_g12a_ao_cd_data = { diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 718160ca66b3542545449182e4283706579af73d..45fbf12be40bf67c63fe65c20a6c0162c8038c2c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3266,7 +3266,7 @@ fw_crash_buffer_show(struct device *cdev, spin_lock_irqsave(&instance->crashdump_lock, flags); buff_offset = instance->fw_crash_buffer_offset; - if (!instance->crash_dump_buf && + if (!instance->crash_dump_buf || !((instance->fw_crash_state == AVAILABLE) || (instance->fw_crash_state == COPYING))) { dev_err(&instance->pdev->dev, diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 6ad834d61d4c7a4b8070e4b83ad0a099a9e46b03..d6c25a88cebc927d68724edc57123577b5765d81 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -317,11 +317,18 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, if (result) return -EIO; - /* Sanity check that we got the page back that we asked for */ + /* + * Sanity check that we got the page back that we asked for and that + * the page size is not 0. + */ if (buffer[1] != page) return -EIO; - return get_unaligned_be16(&buffer[2]) + 4; + result = get_unaligned_be16(&buffer[2]); + if (!result) + return -EIO; + + return result + 4; } /** diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c index 74adb82f37c30f30e692f2858e93e938ece95f47..a19cfb2998c93c81fbc37918aabca2b224de4ad5 100644 --- a/drivers/staging/iio/resolver/ad2s1210.c +++ b/drivers/staging/iio/resolver/ad2s1210.c @@ -101,7 +101,7 @@ struct ad2s1210_state { static const int ad2s1210_mode_vals[4][2] = { [MOD_POS] = { 0, 0 }, [MOD_VEL] = { 0, 1 }, - [MOD_CONFIG] = { 1, 0 }, + [MOD_CONFIG] = { 1, 1 }, }; static inline void ad2s1210_set_mode(enum ad2s1210_mode mode, diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index d73f624ed42a362579ec822e2ad65212cab149a9..5709b959b1d939b3c792f27312f00a32b74541a8 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1567,13 +1567,11 @@ static int dwc3_probe(struct platform_device *pdev) spin_lock_init(&dwc->lock); mutex_init(&dwc->mutex); + pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY); pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); - if (ret < 0) - goto err1; pm_runtime_forbid(dev); @@ -1633,12 +1631,10 @@ static int dwc3_probe(struct platform_device *pdev) dwc3_free_event_buffers(dwc); err2: - pm_runtime_allow(&pdev->dev); - -err1: - pm_runtime_put_sync(&pdev->dev); - pm_runtime_disable(&pdev->dev); - + pm_runtime_allow(dev); + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + pm_runtime_put_noidle(dev); disable_clks: clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks); assert_reset: @@ -1659,6 +1655,7 @@ static int dwc3_remove(struct platform_device *pdev) dwc3_core_exit(dwc); dwc3_ulpi_exit(dwc); + pm_runtime_allow(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index dc832ddf7033f57a61196f5eba7f96581d01c5f9..bd40caeeb21c619eaabe3fce1d0ea0dff81b2b8d 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -133,6 +133,7 @@ static void xhci_debugfs_regset(struct xhci_hcd *xhci, u32 base, regset->regs = regs; regset->nregs = nregs; regset->base = hcd->regs + base; + regset->dev = hcd->self.controller; debugfs_create_regset32((const char *)rgs->name, 0444, parent, regset); } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index da8b7bd39703e981c71b5eab2c74cb8b0dc5b245..5b474efeab6abd8fb273d556c771c0b0caadc8d4 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -595,6 +595,11 @@ static void option_instat_callback(struct urb *urb); #define SIERRA_VENDOR_ID 0x1199 #define SIERRA_PRODUCT_EM9191 0x90d3 +/* UNISOC (Spreadtrum) products */ +#define UNISOC_VENDOR_ID 0x1782 +/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */ +#define TOZED_PRODUCT_LT70C 0x4055 + /* Device flags */ /* Highest interface number which can be used with NCTRL() and RSVD() */ @@ -2225,6 +2230,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index 68aaed48315ff242335c9ba53c459f7fa7e8239e..76f634d185f1084935bf37f01beaf7724f851e5f 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -242,7 +242,6 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type) handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits); if (IS_ERR(handle)) return PTR_ERR(handle); - ext4_fc_start_update(inode); if ((type == ACL_TYPE_ACCESS) && acl) { error = posix_acl_update_mode(inode, &mode, &acl); @@ -260,7 +259,6 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type) } out_stop: ext4_journal_stop(handle); - ext4_fc_stop_update(inode); if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; return error; diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index a6302e6b95287f4635358a79da3f83e6db5ef99f..7cf33851eb69d2c153b26ca1985bfc4dd949122c 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4706,7 +4706,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) FALLOC_FL_INSERT_RANGE)) return -EOPNOTSUPP; - ext4_fc_start_update(inode); inode_lock(inode); ret = ext4_convert_inline_data(inode); inode_unlock(inode); @@ -4776,7 +4775,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) inode_unlock(inode); trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); exit: - ext4_fc_stop_update(inode); return ret; } diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 904d0bd91160cda92b2078b7a917b9f1f400ae4e..fcf3887d18805e2a26ebfb647948f2d52fed8ad1 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -260,7 +260,6 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, if (iocb->ki_flags & IOCB_NOWAIT) return -EOPNOTSUPP; - ext4_fc_start_update(inode); inode_lock(inode); ret = ext4_write_checks(iocb, from); if (ret <= 0) @@ -272,7 +271,6 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, out: inode_unlock(inode); - ext4_fc_stop_update(inode); if (likely(ret > 0)) { iocb->ki_pos += ret; ret = generic_write_sync(iocb, ret); @@ -559,9 +557,7 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from) goto out; } - ext4_fc_start_update(inode); ret = ext4_orphan_add(handle, inode); - ext4_fc_stop_update(inode); if (ret) { ext4_journal_stop(handle); goto out; diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index baf28781b2264f403684c512b7a8a28b1d2ff7c0..4adf36c17e34f715f8e749bbc0bb01c9dcf20c81 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -206,7 +206,7 @@ static int ext4_read_inline_data(struct inode *inode, void *buffer, /* * write the buffer to the inline inode. * If 'create' is set, we don't need to do the extra copy in the xattr - * value since it is already handled by ext4_xattr_ibody_inline_set. + * value since it is already handled by ext4_xattr_ibody_set. * That saves us one memcpy. */ static void ext4_write_inline_data(struct inode *inode, struct ext4_iloc *iloc, @@ -288,7 +288,7 @@ static int ext4_create_inline_data(handle_t *handle, BUG_ON(!is.s.not_found); - error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is); + error = ext4_xattr_ibody_set(handle, inode, &i, &is); if (error) { if (error == -ENOSPC) ext4_clear_inode_state(inode, @@ -360,7 +360,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode, i.value = value; i.value_len = len; - error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is); + error = ext4_xattr_ibody_set(handle, inode, &i, &is); if (error) goto out; @@ -433,7 +433,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle, if (error) goto out; - error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is); + error = ext4_xattr_ibody_set(handle, inode, &i, &is); if (error) goto out; @@ -1936,8 +1936,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline) i.value = value; i.value_len = i_size > EXT4_MIN_INLINE_DATA_SIZE ? i_size - EXT4_MIN_INLINE_DATA_SIZE : 0; - err = ext4_xattr_ibody_inline_set(handle, inode, - &i, &is); + err = ext4_xattr_ibody_set(handle, inode, &i, &is); if (err) goto out_error; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index be439ef94ebf23e33bc5bed2a2352fbd635762db..8af9ee25def8937b741043b42c1d0df582ba1f51 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5413,7 +5413,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) if (error) return error; } - ext4_fc_start_update(inode); + if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { handle_t *handle; @@ -5437,7 +5437,6 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) if (error) { ext4_journal_stop(handle); - ext4_fc_stop_update(inode); return error; } /* Update corresponding info in inode so that everything is in @@ -5449,7 +5448,6 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) error = ext4_mark_inode_dirty(handle, inode); ext4_journal_stop(handle); if (unlikely(error)) { - ext4_fc_stop_update(inode); return error; } } @@ -5464,12 +5462,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (attr->ia_size > sbi->s_bitmap_maxbytes) { - ext4_fc_stop_update(inode); return -EFBIG; } } if (!S_ISREG(inode->i_mode)) { - ext4_fc_stop_update(inode); return -EINVAL; } @@ -5595,7 +5591,6 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) ext4_std_error(inode->i_sb, error); if (!error) error = rc; - ext4_fc_stop_update(inode); return error; } diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 53bdc67a815f6cb555ec9834fe2a05c5ac116b7a..1171618f6549ab65565ac52f81c021390ab1ee38 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -1322,13 +1322,7 @@ static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - long ret; - - ext4_fc_start_update(file_inode(filp)); - ret = __ext4_ioctl(filp, cmd, arg); - ext4_fc_stop_update(file_inode(filp)); - - return ret; + return __ext4_ioctl(filp, cmd, arg); } #ifdef CONFIG_COMPAT diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 8dc3495fa2a9c9536a490ae6decf4bb0a79d4206..8af72ac28cee249c38ea5079b921f73c151ddf3d 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -2218,32 +2218,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, return 0; } -int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, - struct ext4_xattr_info *i, - struct ext4_xattr_ibody_find *is) -{ - struct ext4_xattr_ibody_header *header; - struct ext4_xattr_search *s = &is->s; - int error; - - if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) - return -ENOSPC; - - error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); - if (error) - return error; - header = IHDR(inode, ext4_raw_inode(&is->iloc)); - if (!IS_LAST_ENTRY(s->first)) { - header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); - ext4_set_inode_state(inode, EXT4_STATE_XATTR); - } else { - header->h_magic = cpu_to_le32(0); - ext4_clear_inode_state(inode, EXT4_STATE_XATTR); - } - return 0; -} - -static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, +int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, struct ext4_xattr_info *i, struct ext4_xattr_ibody_find *is) { diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h index b357872ab83b4d4e9b3ab5c2fab29908b6a16280..e5e36bd11f0556697e170c55a4690786e7e848a2 100644 --- a/fs/ext4/xattr.h +++ b/fs/ext4/xattr.h @@ -200,9 +200,9 @@ extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, extern int ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, void *buffer, size_t buffer_size); -extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, - struct ext4_xattr_info *i, - struct ext4_xattr_ibody_find *is); +extern int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, + struct ext4_xattr_info *i, + struct ext4_xattr_ibody_find *is); extern struct mb_cache *ext4_xattr_create_cache(void); extern void ext4_xattr_destroy_cache(struct mb_cache *); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 2011199476ea6b5a9c23309aad61f644e0e163d3..20e29e35884a8658c3f948a2a971a15c4312fb35 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -884,6 +884,16 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, continue; } + /* + * If wb_tryget fails, the wb has been shutdown, skip it. + * + * Pin @wb so that it stays on @bdi->wb_list. This allows + * continuing iteration from @wb after dropping and + * regrabbing rcu read lock. + */ + if (!wb_tryget(wb)) + continue; + /* alloc failed, execute synchronously using on-stack fallback */ work = &fallback_work; *work = *base_work; @@ -892,13 +902,6 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, work->done = &fallback_work_done; wb_queue_work(wb, work); - - /* - * Pin @wb so that it stays on @bdi->wb_list. This allows - * continuing iteration from @wb after dropping and - * regrabbing rcu read lock. - */ - wb_get(wb); last_wb = wb; rcu_read_unlock(); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 80a9e50392a09c71c5d96e8e948299e02542ed57..e3b9b7d188e675d58eb1ca13d05f74ccce75a937 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -205,7 +205,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) if (inode && fuse_is_bad(inode)) goto invalid; else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) || - (flags & (LOOKUP_EXCL | LOOKUP_REVAL))) { + (flags & (LOOKUP_EXCL | LOOKUP_REVAL | LOOKUP_RENAME_TARGET))) { struct fuse_entry_out outarg; FUSE_ARGS(args); struct fuse_forget_link *forget; @@ -537,6 +537,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, struct fuse_entry_out outentry; struct fuse_inode *fi; struct fuse_file *ff; + bool trunc = flags & O_TRUNC; /* Userspace expects S_IFREG in create mode */ BUG_ON((mode & S_IFMT) != S_IFREG); @@ -604,6 +605,10 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, } else { file->private_data = ff; fuse_finish_open(inode, file); + if (fm->fc->atomic_o_trunc && trunc) + truncate_pagecache(inode, 0); + else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) + invalidate_inode_pages2(inode->i_mapping); } return err; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 504389568dac58ae0fde6e83115e85ff152404de..13d97547eaf6c502f4fac99572cbe6223e120e1d 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -206,14 +206,10 @@ void fuse_finish_open(struct inode *inode, struct file *file) fi->attr_version = atomic64_inc_return(&fc->attr_version); i_size_write(inode, 0); spin_unlock(&fi->lock); - truncate_pagecache(inode, 0); fuse_invalidate_attr(inode); if (fc->writeback_cache) file_update_time(file); - } else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) { - invalidate_inode_pages2(inode->i_mapping); } - if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) fuse_link_write_file(file); } @@ -236,30 +232,39 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) if (err) return err; - if (is_wb_truncate || dax_truncate) { + if (is_wb_truncate || dax_truncate) inode_lock(inode); - fuse_set_nowrite(inode); - } if (dax_truncate) { down_write(&get_fuse_inode(inode)->i_mmap_sem); err = fuse_dax_break_layouts(inode, 0, 0); if (err) - goto out; + goto out_inode_unlock; } + if (is_wb_truncate || dax_truncate) + fuse_set_nowrite(inode); + err = fuse_do_open(fm, get_node_id(inode), file, isdir); if (!err) fuse_finish_open(inode, file); -out: + if (is_wb_truncate || dax_truncate) + fuse_release_nowrite(inode); + if (!err) { + struct fuse_file *ff = file->private_data; + + if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) + truncate_pagecache(inode, 0); + else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) + invalidate_inode_pages2(inode->i_mapping); + } if (dax_truncate) up_write(&get_fuse_inode(inode)->i_mmap_sem); - if (is_wb_truncate | dax_truncate) { - fuse_release_nowrite(inode); +out_inode_unlock: + if (is_wb_truncate || dax_truncate) inode_unlock(inode); - } return err; } @@ -782,7 +787,7 @@ static void fuse_read_update_size(struct inode *inode, loff_t size, struct fuse_inode *fi = get_fuse_inode(inode); spin_lock(&fi->lock); - if (attr_ver == fi->attr_version && size < inode->i_size && + if (attr_ver >= fi->attr_version && size < inode->i_size && !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { fi->attr_version = atomic64_inc_return(&fc->attr_version); i_size_write(inode, size); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index b10cddd723559fb17c1e3e2bfcc6e1763b4789ae..ceaa6868386e6b3a170006617d704cb2f82dd5ba 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -556,6 +556,9 @@ struct fuse_conn { /** Maxmum number of pages that can be used in a single request */ unsigned int max_pages; + /** Constrain ->max_pages to this value during feature negotiation */ + unsigned int max_pages_limit; + /** Input queue */ struct fuse_iqueue iq; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 2ede05df7d069d7d84b0080d469f90245c433a68..9ea175ff9c8e689413a5f806397f2762f7051308 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -710,6 +710,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm, fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); fc->user_ns = get_user_ns(user_ns); fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; + fc->max_pages_limit = FUSE_MAX_MAX_PAGES; INIT_LIST_HEAD(&fc->mounts); list_add(&fm->fc_entry, &fc->mounts); @@ -1056,7 +1057,7 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, fc->abort_err = 1; if (arg->flags & FUSE_MAX_PAGES) { fc->max_pages = - min_t(unsigned int, FUSE_MAX_MAX_PAGES, + min_t(unsigned int, fc->max_pages_limit, max_t(unsigned int, arg->max_pages, 1)); } if (IS_ENABLED(CONFIG_FUSE_DAX) && @@ -1595,7 +1596,7 @@ static void fuse_kill_sb_blk(struct super_block *sb) struct fuse_mount *fm = get_fuse_mount_super(sb); bool last; - if (fm) { + if (sb->s_root) { last = fuse_mount_remove(fm); if (last) fuse_conn_destroy(fm); diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index b9cfb1165ff422c36e22e9c1c2e5e9595c65c02f..faadc80485e7febaddcdcb8447f99a90c33ec0f1 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -18,6 +18,12 @@ #include #include "fuse_i.h" +/* Used to help calculate the FUSE connection's max_pages limit for a request's + * size. Parts of the struct fuse_req are sliced into scattergather lists in + * addition to the pages used, so this can help account for that overhead. + */ +#define FUSE_HEADER_OVERHEAD 4 + /* List of virtio-fs device instances and a lock for the list. Also provides * mutual exclusion in device removal and mounting path */ @@ -1393,7 +1399,7 @@ static void virtio_kill_sb(struct super_block *sb) bool last; /* If mount failed, we can still be called without any fc */ - if (fm) { + if (sb->s_root) { last = fuse_mount_remove(fm); if (last) virtio_fs_conn_destroy(fm); @@ -1426,9 +1432,10 @@ static int virtio_fs_get_tree(struct fs_context *fsc) { struct virtio_fs *fs; struct super_block *sb; - struct fuse_conn *fc; + struct fuse_conn *fc = NULL; struct fuse_mount *fm; - int err; + unsigned int virtqueue_size; + int err = -EIO; /* This gets a reference on virtio_fs object. This ptr gets installed * in fc->iq->priv. Once fuse_conn is going away, it calls ->put() @@ -1440,28 +1447,28 @@ static int virtio_fs_get_tree(struct fs_context *fsc) return -EINVAL; } + virtqueue_size = virtqueue_get_vring_size(fs->vqs[VQ_REQUEST].vq); + if (WARN_ON(virtqueue_size <= FUSE_HEADER_OVERHEAD)) + goto out_err; + + err = -ENOMEM; fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL); - if (!fc) { - mutex_lock(&virtio_fs_mutex); - virtio_fs_put(fs); - mutex_unlock(&virtio_fs_mutex); - return -ENOMEM; - } + if (!fc) + goto out_err; fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL); - if (!fm) { - mutex_lock(&virtio_fs_mutex); - virtio_fs_put(fs); - mutex_unlock(&virtio_fs_mutex); - kfree(fc); - return -ENOMEM; - } + if (!fm) + goto out_err; fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs); fc->release = fuse_free_conn; fc->delete_stale = true; fc->auto_submounts = true; + /* Tell FUSE to split requests that exceed the virtqueue's size */ + fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit, + virtqueue_size - FUSE_HEADER_OVERHEAD); + fsc->s_fs_info = fm; sb = sget_fc(fsc, virtio_fs_test_super, virtio_fs_set_super); fuse_mount_put(fm); @@ -1483,6 +1490,13 @@ static int virtio_fs_get_tree(struct fs_context *fsc) WARN_ON(fsc->root); fsc->root = dget(sb->s_root); return 0; + +out_err: + kfree(fc); + mutex_lock(&virtio_fs_mutex); + virtio_fs_put(fs); + mutex_unlock(&virtio_fs_mutex); + return err; } static const struct fs_context_operations virtio_fs_context_ops = { diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 4a9f6efcd0718bff6f4a90761b384a3d67788bb4..0139c8d31bb2d6ef08372fc3d1a79ab42825a273 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -757,6 +757,7 @@ int jbd2_fc_begin_commit(journal_t *journal, tid_t tid) } journal->j_flags |= JBD2_FAST_COMMIT_ONGOING; write_unlock(&journal->j_state_lock); + jbd2_journal_lock_updates(journal); return 0; } @@ -768,6 +769,7 @@ EXPORT_SYMBOL(jbd2_fc_begin_commit); */ static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback) { + jbd2_journal_unlock_updates(journal); if (journal->j_fc_cleanup_callback) journal->j_fc_cleanup_callback(journal, 0); write_lock(&journal->j_state_lock); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 5e835bbf1ffb8d3286c201d653690fa8eea1e557..fff2cdc69e5ee6236841a53245e29fb4b09a0962 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -435,6 +435,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci) return 0; } +/** + * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area + * @sci: segment constructor object + * + * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of + * the current segment summary block. + */ +static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci) +{ + struct nilfs_segsum_pointer *ssp; + + ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr; + if (ssp->offset < ssp->bh->b_size) + memset(ssp->bh->b_data + ssp->offset, 0, + ssp->bh->b_size - ssp->offset); +} + static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci) { sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; @@ -443,6 +460,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci) * The current segment is filled up * (internal code) */ + nilfs_segctor_zeropad_segsum(sci); sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg); return nilfs_segctor_reset_segment_buffer(sci); } @@ -547,6 +565,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci, goto retry; } if (unlikely(required)) { + nilfs_segctor_zeropad_segsum(sci); err = nilfs_segbuf_extend_segsum(segbuf); if (unlikely(err)) goto failed; @@ -1536,6 +1555,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci, nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); sci->sc_stage = prev_stage; } + nilfs_segctor_zeropad_segsum(sci); nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile); return 0; diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c index 59d87f9f72fb4172bd31bcd1baa65e3b0ff1df3b..159af6c26f4bd1d717573f7b8d80bddd43f6d0ec 100644 --- a/fs/reiserfs/xattr_security.c +++ b/fs/reiserfs/xattr_security.c @@ -81,11 +81,15 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th, struct inode *inode, struct reiserfs_security_handle *sec) { + char xattr_name[XATTR_NAME_MAX + 1] = XATTR_SECURITY_PREFIX; int error; - if (strlen(sec->name) < sizeof(XATTR_SECURITY_PREFIX)) + + if (XATTR_SECURITY_PREFIX_LEN + strlen(sec->name) > XATTR_NAME_MAX) return -EINVAL; - error = reiserfs_xattr_set_handle(th, inode, sec->name, sec->value, + strlcat(xattr_name, sec->name, sizeof(xattr_name)); + + error = reiserfs_xattr_set_handle(th, inode, xattr_name, sec->value, sec->length, XATTR_CREATE); if (error == -ENODATA || error == -EOPNOTSUPP) error = 0; diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 953de843d9c380b12a9c1527071a056b38593a60..e341d6531e687ce94c863bb82214546c6bdeaea0 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -39,33 +39,6 @@ static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend) XFS_I(ioend->io_inode)->i_d.di_size; } -STATIC int -xfs_setfilesize_trans_alloc( - struct iomap_ioend *ioend) -{ - struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; - struct xfs_trans *tp; - int error; - - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); - if (error) - return error; - - ioend->io_private = tp; - - /* - * We may pass freeze protection with a transaction. So tell lockdep - * we released it. - */ - __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS); - /* - * We hand off the transaction to the completion thread now, so - * clear the flag here. - */ - xfs_trans_clear_context(tp); - return 0; -} - /* * Update on-disk file size now that data has been written to disk. */ @@ -191,12 +164,10 @@ xfs_end_ioend( error = xfs_reflink_end_cow(ip, offset, size); else if (ioend->io_type == IOMAP_UNWRITTEN) error = xfs_iomap_write_unwritten(ip, offset, size, false); - else - ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private); + if (!error && xfs_ioend_is_append(ioend)) + error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); done: - if (ioend->io_private) - error = xfs_setfilesize_ioend(ioend, error); iomap_finish_ioends(ioend, error); memalloc_nofs_restore(nofs_flag); } @@ -246,7 +217,7 @@ xfs_end_io( static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend) { - return ioend->io_private || + return xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN || (ioend->io_flags & IOMAP_F_SHARED); } @@ -259,8 +230,6 @@ xfs_end_bio( struct xfs_inode *ip = XFS_I(ioend->io_inode); unsigned long flags; - ASSERT(xfs_ioend_needs_workqueue(ioend)); - spin_lock_irqsave(&ip->i_ioend_lock, flags); if (list_empty(&ip->i_ioend_list)) WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue, @@ -510,14 +479,6 @@ xfs_prepare_ioend( ioend->io_offset, ioend->io_size); } - /* Reserve log space if we might write beyond the on-disk inode size. */ - if (!status && - ((ioend->io_flags & IOMAP_F_SHARED) || - ioend->io_type != IOMAP_UNWRITTEN) && - xfs_ioend_is_append(ioend) && - !ioend->io_private) - status = xfs_setfilesize_trans_alloc(ioend); - memalloc_nofs_restore(nofs_flag); if (xfs_ioend_needs_workqueue(ioend)) diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 9ea83d80eb6f93f62fa98ba9875d03ed58871a69..dcbd41048b4e7dfae577332c71e667ccdd5e6aad 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -190,7 +190,7 @@ static inline u64 readq(const volatile void __iomem *addr) u64 val; __io_br(); - val = __le64_to_cpu(__raw_readq(addr)); + val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); __io_ar(val); return val; } @@ -233,7 +233,7 @@ static inline void writel(u32 value, volatile void __iomem *addr) static inline void writeq(u64 value, volatile void __iomem *addr) { __io_bw(); - __raw_writeq(__cpu_to_le64(value), addr); + __raw_writeq((u64 __force)__cpu_to_le64(value), addr); __io_aw(); } #endif diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h index 69b24fe92cbf1c49e09808ab2ebe70e67cb9d23b..5e96bad54804724b11b52a41267ab62ad1c7000c 100644 --- a/include/linux/blk-crypto.h +++ b/include/linux/blk-crypto.h @@ -97,8 +97,8 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, int blk_crypto_start_using_key(const struct blk_crypto_key *key, struct request_queue *q); -int blk_crypto_evict_key(struct request_queue *q, - const struct blk_crypto_key *key); +void blk_crypto_evict_key(struct request_queue *q, + const struct blk_crypto_key *key); bool blk_crypto_config_supported(struct request_queue *q, const struct blk_crypto_config *cfg); diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 913aa60228b16c68e56edf25a89b7adb6fe49974..8e284161b65e5e14785124e431e07a107e1adfdf 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -63,16 +64,18 @@ static inline int clockid_to_fd(const clockid_t clk) * cpu_timer - Posix CPU timer representation for k_itimer * @node: timerqueue node to queue in the task/sig * @head: timerqueue head on which this timer is queued - * @task: Pointer to target task + * @pid: Pointer to target task PID * @elist: List head for the expiry list * @firing: Timer is currently firing + * @handling: Pointer to the task which handles expiry */ struct cpu_timer { - struct timerqueue_node node; - struct timerqueue_head *head; - struct pid *pid; - struct list_head elist; - int firing; + struct timerqueue_node node; + struct timerqueue_head *head; + struct pid *pid; + struct list_head elist; + int firing; + struct task_struct __rcu *handling; }; static inline bool cpu_timer_enqueue(struct timerqueue_head *head, @@ -129,10 +132,12 @@ struct posix_cputimers { /** * posix_cputimers_work - Container for task work based posix CPU timer expiry * @work: The task work to be scheduled + * @mutex: Mutex held around expiry in context of this task work * @scheduled: @work has been scheduled already, no further processing */ struct posix_cputimers_work { struct callback_head work; + struct mutex mutex; unsigned int scheduled; }; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 287999eedef4545f8d25c736bfdbb9e987a34415..a210f19958621775452d2be0281a0693249d42c9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -4277,7 +4277,7 @@ static inline void nf_reset_ct(struct sk_buff *skb) static inline void nf_reset_trace(struct sk_buff *skb) { -#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES) skb->nf_trace = 0; #endif } @@ -4297,7 +4297,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, dst->_nfct = src->_nfct; nf_conntrack_get(skb_nfct(src)); #endif -#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES) if (copy) dst->nf_trace = src->nf_trace; #endif diff --git a/include/linux/tick.h b/include/linux/tick.h index 7340613c7eff7ad791634c76ae2ecd547f711c45..a90a8f7759a2614c13b4ab10eadc6383edbcb9f4 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -211,6 +211,7 @@ extern void tick_nohz_dep_set_signal(struct signal_struct *signal, enum tick_dep_bits bit); extern void tick_nohz_dep_clear_signal(struct signal_struct *signal, enum tick_dep_bits bit); +extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu); /* * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases @@ -275,6 +276,7 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { } static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { } +static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; } static inline void tick_dep_set(enum tick_dep_bits bit) { } static inline void tick_dep_clear(enum tick_dep_bits bit) { } diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 6538b11fadd538d496bde84026d4ac658a69801c..4c8f97a6da5a7ea18416f96efe256de93003a5f4 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1105,6 +1105,8 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu); +void inet6_cleanup_sock(struct sock *sk); +void inet6_sock_destruct(struct sock *sk); int inet6_release(struct socket *sock); int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); int inet6_getname(struct socket *sock, struct sockaddr *uaddr, diff --git a/include/net/udp.h b/include/net/udp.h index 388e68c7bca050b3782201a80a424575a5f793bc..e2550a4547a7054405516c8a263ac0fdded09ef9 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -268,7 +268,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if, } /* net/ipv4/udp.c */ -void udp_destruct_sock(struct sock *sk); +void udp_destruct_common(struct sock *sk); void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len); int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb); void udp_skb_destructor(struct sock *sk, struct sk_buff *skb); diff --git a/include/net/udplite.h b/include/net/udplite.h index 9185e45b997ff67ce2999c6b0665c8d693b44338..c59ba86668af0e5734195263a8080f3e1f44a0a6 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -24,14 +24,6 @@ static __inline__ int udplite_getfrag(void *from, char *to, int offset, return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT; } -/* Designate sk as UDP-Lite socket */ -static inline int udplite_sk_init(struct sock *sk) -{ - udp_init_sock(sk); - udp_sk(sk)->pcflag = UDPLITE_BIT; - return 0; -} - /* * Checksumming routines */ diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 2f863aba6acdf609bc1abe3bcfd9533b1adf5f33..0b042847b9f844db0e4d6d8c53aedc3e31ca9f69 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -513,7 +513,7 @@ TRACE_EVENT(f2fs_truncate_partial_nodes, TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) - __field(nid_t, nid[3]) + __array(nid_t, nid, 3) __field(int, depth) __field(int, err) ), diff --git a/kernel/fork.c b/kernel/fork.c index dd0375e4644bd5edc8f0f914afafee1d2aa94f03..82a9b0473cd4aacd4f8426b2ee56524ca740143a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -449,6 +449,9 @@ void put_task_stack(struct task_struct *tsk) void free_task(struct task_struct *tsk) { +#ifdef CONFIG_SECCOMP + WARN_ON_ONCE(tsk->seccomp.filter); +#endif scs_release(tsk); #ifndef CONFIG_THREAD_INFO_IN_TASK @@ -2271,12 +2274,6 @@ static __latent_entropy struct task_struct *copy_process( spin_lock(¤t->sighand->siglock); - /* - * Copy seccomp details explicitly here, in case they were changed - * before holding sighand lock. - */ - copy_seccomp(p); - rseq_fork(p, clone_flags); /* Don't start children in a dying pid namespace */ @@ -2291,6 +2288,14 @@ static __latent_entropy struct task_struct *copy_process( goto bad_fork_cancel_cgroup; } + /* No more failure paths after this point. */ + + /* + * Copy seccomp details explicitly here, in case they were changed + * before holding sighand lock. + */ + copy_seccomp(p); + init_task_pid_links(p); if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); diff --git a/kernel/kheaders.c b/kernel/kheaders.c index 8f69772af77b457b7fa5fa19723e8721445b5c6e..42163c9e94e557c5043e2ac7987b567f23087ee2 100644 --- a/kernel/kheaders.c +++ b/kernel/kheaders.c @@ -26,15 +26,15 @@ asm ( " .popsection \n" ); -extern char kernel_headers_data; -extern char kernel_headers_data_end; +extern char kernel_headers_data[]; +extern char kernel_headers_data_end[]; static ssize_t ikheaders_read(struct file *file, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len) { - memcpy(buf, &kernel_headers_data + off, len); + memcpy(buf, &kernel_headers_data[off], len); return len; } @@ -48,8 +48,8 @@ static struct bin_attribute kheaders_attr __ro_after_init = { static int __init ikheaders_init(void) { - kheaders_attr.size = (&kernel_headers_data_end - - &kernel_headers_data); + kheaders_attr.size = (kernel_headers_data_end - + kernel_headers_data); return sysfs_create_bin_file(kernel_kobj, &kheaders_attr); } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9cce4e13af4141a834207361274f40c5ce539953..30e1d7fedb5fddd795bd0a9a20bf646bda569535 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -964,6 +964,7 @@ void __rcu_irq_enter_check_tick(void) } raw_spin_unlock_rcu_node(rdp->mynode); } +NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick); #endif /* CONFIG_NO_HZ_FULL */ /** diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ad03e08d82bd9403bccb4a4a1db60083b8500a67..9afa1dd0f7e261e839e6377e93be5dd8c00ba9f7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1031,7 +1031,7 @@ static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) return; - WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); + uclamp_rq_set(rq, clamp_id, clamp_value); } static inline @@ -1209,8 +1209,8 @@ static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, if (bucket->tasks == 1 || uc_se->value > bucket->value) bucket->value = uc_se->value; - if (uc_se->value > READ_ONCE(uc_rq->value)) - WRITE_ONCE(uc_rq->value, uc_se->value); + if (uc_se->value > uclamp_rq_get(rq, clamp_id)) + uclamp_rq_set(rq, clamp_id, uc_se->value); } /* @@ -1276,7 +1276,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, if (likely(bucket->tasks)) return; - rq_clamp = READ_ONCE(uc_rq->value); + rq_clamp = uclamp_rq_get(rq, clamp_id); /* * Defensive programming: this should never happen. If it happens, * e.g. due to future modification, warn and fixup the expected value. @@ -1284,7 +1284,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, SCHED_WARN_ON(bucket->value > rq_clamp); if (bucket->value >= rq_clamp) { bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); - WRITE_ONCE(uc_rq->value, bkt_clamp); + uclamp_rq_set(rq, clamp_id, bkt_clamp); } } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 01038fc4c41e149145574a04d6a1584360c00300..e5e72262e2efca0a15372e2bf22b8933c1885f0d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3964,20 +3964,26 @@ static inline unsigned long task_util_est(struct task_struct *p) #ifdef CONFIG_UCLAMP_TASK #ifdef CONFIG_SCHED_RT_CAS -unsigned long uclamp_task_util(struct task_struct *p) +unsigned long uclamp_task_util(struct task_struct *p, + unsigned long uclamp_min, + unsigned long uclamp_max) #else -static inline unsigned long uclamp_task_util(struct task_struct *p) +static inline unsigned long uclamp_task_util(struct task_struct *p, + unsigned long uclamp_min, + unsigned long uclamp_max) #endif { - return clamp(task_util_est(p), - uclamp_eff_value(p, UCLAMP_MIN), - uclamp_eff_value(p, UCLAMP_MAX)); + return clamp(task_util_est(p), uclamp_min, uclamp_max); } #else #ifdef CONFIG_SCHED_RT_CAS -unsigned long uclamp_task_util(struct task_struct *p) +unsigned long uclamp_task_util(struct task_struct *p, + unsigned long uclamp_min, + unsigned long uclamp_max) #else -static inline unsigned long uclamp_task_util(struct task_struct *p) +static inline unsigned long uclamp_task_util(struct task_struct *p, + unsigned long uclamp_min, + unsigned long uclamp_max) #endif { return task_util_est(p); @@ -4154,12 +4160,16 @@ static inline int util_fits_cpu(unsigned long util, * For uclamp_max, we can tolerate a drop in performance level as the * goal is to cap the task. So it's okay if it's getting less. * - * In case of capacity inversion, which is not handled yet, we should - * honour the inverted capacity for both uclamp_min and uclamp_max all - * the time. + * In case of capacity inversion we should honour the inverted capacity + * for both uclamp_min and uclamp_max all the time. */ - capacity_orig = capacity_orig_of(cpu); - capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu); + capacity_orig = cpu_in_capacity_inversion(cpu); + if (capacity_orig) { + capacity_orig_thermal = capacity_orig; + } else { + capacity_orig = capacity_orig_of(cpu); + capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu); + } /* * We want to force a task to fit a cpu as implied by uclamp_max. @@ -4240,10 +4250,12 @@ static inline int util_fits_cpu(unsigned long util, return fits; } -static inline int task_fits_capacity(struct task_struct *p, - unsigned long capacity) +static inline int task_fits_cpu(struct task_struct *p, int cpu) { - return fits_capacity(uclamp_task_util(p), capacity); + unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN); + unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX); + unsigned long util = task_util_est(p); + return util_fits_cpu(util, uclamp_min, uclamp_max, cpu); } #ifdef CONFIG_SCHED_RTG @@ -4255,7 +4267,7 @@ bool task_fits_max(struct task_struct *p, int cpu) if (capacity == max_capacity) return true; - return task_fits_capacity(p, capacity); + return task_fits_cpu(p, cpu); } #endif @@ -4281,9 +4293,9 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) task_fits = capacity_orig_of(cpu) >= capacity_orig_of(cpumask_first(rtg_target)); else - task_fits = task_fits_capacity(p, capacity_of(cpu_of(rq))); + task_fits = task_fits_cpu(p, cpu_of(rq)); #else - task_fits = task_fits_capacity(p, capacity_of(cpu_of(rq))); + task_fits = task_fits_cpu(p, cpu_of(rq)); #endif if (task_fits) { rq->misfit_task_load = 0; @@ -5732,7 +5744,10 @@ static inline void hrtick_update(struct rq *rq) #ifdef CONFIG_SMP static inline bool cpu_overutilized(int cpu) { - return !fits_capacity(cpu_util(cpu), capacity_of(cpu)); + unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN); + unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX); + + return !util_fits_cpu(cpu_util(cpu), rq_util_min, rq_util_max, cpu); } static inline void update_overutilized_status(struct rq *rq) @@ -6513,14 +6528,16 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t static int select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) { - unsigned long task_util, best_cap = 0; + unsigned long task_util, util_min, util_max, best_cap = 0; int cpu, best_cpu = -1; struct cpumask *cpus; cpus = this_cpu_cpumask_var_ptr(select_idle_mask); cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); - task_util = uclamp_task_util(p); + task_util = task_util_est(p); + util_min = uclamp_eff_value(p, UCLAMP_MIN); + util_max = uclamp_eff_value(p, UCLAMP_MAX); for_each_cpu_wrap(cpu, cpus, target) { unsigned long cpu_cap = capacity_of(cpu); @@ -6530,7 +6547,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu)) continue; - if (fits_capacity(task_util, cpu_cap)) + if (util_fits_cpu(task_util, util_min, util_max, cpu)) return cpu; if (cpu_cap > best_cap) { @@ -6542,10 +6559,13 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) return best_cpu; } -static inline bool asym_fits_capacity(unsigned long task_util, int cpu) +static inline bool asym_fits_cpu(unsigned long util, + unsigned long util_min, + unsigned long util_max, + int cpu) { if (static_branch_unlikely(&sched_asym_cpucapacity)) - return fits_capacity(task_util, capacity_of(cpu)); + return util_fits_cpu(util, util_min, util_max, cpu); return true; } @@ -6556,7 +6576,7 @@ static inline bool asym_fits_capacity(unsigned long task_util, int cpu) static int select_idle_sibling(struct task_struct *p, int prev, int target) { struct sched_domain *sd; - unsigned long task_util; + unsigned long task_util, util_min, util_max; int i, recent_used_cpu; /* @@ -6565,11 +6585,13 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) */ if (static_branch_unlikely(&sched_asym_cpucapacity)) { sync_entity_load_avg(&p->se); - task_util = uclamp_task_util(p); + task_util = task_util_est(p); + util_min = uclamp_eff_value(p, UCLAMP_MIN); + util_max = uclamp_eff_value(p, UCLAMP_MAX); } if ((available_idle_cpu(target) || sched_idle_cpu(target)) && - !cpu_isolated(target) && asym_fits_capacity(task_util, target)) + !cpu_isolated(target) && asym_fits_cpu(task_util, util_min, util_max, target)) return target; /* @@ -6577,7 +6599,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) */ if (prev != target && cpus_share_cache(prev, target) && ((available_idle_cpu(prev) || sched_idle_cpu(prev)) && - !cpu_isolated(target) && asym_fits_capacity(task_util, prev))) + !cpu_isolated(target) && asym_fits_cpu(task_util, util_min, util_max, prev))) return prev; /* @@ -6592,7 +6614,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) in_task() && prev == smp_processor_id() && this_rq()->nr_running <= 1 && - asym_fits_capacity(task_util, prev)) { + asym_fits_cpu(task_util, util_min, util_max, prev)) { return prev; } @@ -6603,7 +6625,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) cpus_share_cache(recent_used_cpu, target) && (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && - asym_fits_capacity(task_util, recent_used_cpu)) { + asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) { /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: @@ -6971,6 +6993,8 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) { unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX; + unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0; + unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024; struct root_domain *rd = cpu_rq(smp_processor_id())->rd; unsigned long cpu_cap, util, base_energy = 0; int cpu, best_energy_cpu = prev_cpu; @@ -6993,11 +7017,13 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) goto fail; sync_entity_load_avg(&p->se); - if (!task_util_est(p)) + if (!uclamp_task_util(p, p_util_min, p_util_max)) goto unlock; for (; pd; pd = pd->next) { + unsigned long util_min = p_util_min, util_max = p_util_max; unsigned long cur_delta, spare_cap, max_spare_cap = 0; + unsigned long rq_util_min, rq_util_max; unsigned long base_energy_pd; int max_spare_cap_cpu = -1; @@ -7006,6 +7032,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) base_energy += base_energy_pd; for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { + struct rq *rq = cpu_rq(cpu); + if (!cpumask_test_cpu(cpu, p->cpus_ptr)) continue; @@ -7021,8 +7049,21 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) * much capacity we can get out of the CPU; this is * aligned with schedutil_cpu_util(). */ - util = uclamp_rq_util_with(cpu_rq(cpu), util, p); - if (!fits_capacity(util, cpu_cap)) + if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) { + /* + * Open code uclamp_rq_util_with() except for + * the clamp() part. Ie: apply max aggregation + * only. util_fits_cpu() logic requires to + * operate on non clamped util but must use the + * max-aggregated uclamp_{min, max}. + */ + rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN); + rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX); + + util_min = max(rq_util_min, p_util_min); + util_max = max(rq_util_max, p_util_max); + } + if (!util_fits_cpu(util, util_min, util_max, cpu)) continue; /* Always use prev_cpu as a candidate. */ @@ -8182,7 +8223,7 @@ static int detach_tasks(struct lb_env *env) case migrate_misfit: /* This is not a misfit task */ - if (task_fits_capacity(p, capacity_of(env->src_cpu))) + if (task_fits_cpu(p, env->src_cpu)) goto next; env->imbalance = 0; @@ -8589,16 +8630,82 @@ static unsigned long scale_rt_capacity(int cpu) static void update_cpu_capacity(struct sched_domain *sd, int cpu) { + unsigned long capacity_orig = arch_scale_cpu_capacity(cpu); unsigned long capacity = scale_rt_capacity(cpu); struct sched_group *sdg = sd->groups; + struct rq *rq = cpu_rq(cpu); - cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu); + rq->cpu_capacity_orig = capacity_orig; if (!capacity) capacity = 1; - cpu_rq(cpu)->cpu_capacity = capacity; - trace_sched_cpu_capacity_tp(cpu_rq(cpu)); + rq->cpu_capacity = capacity; + + /* + * Detect if the performance domain is in capacity inversion state. + * + * Capacity inversion happens when another perf domain with equal or + * lower capacity_orig_of() ends up having higher capacity than this + * domain after subtracting thermal pressure. + * + * We only take into account thermal pressure in this detection as it's + * the only metric that actually results in *real* reduction of + * capacity due to performance points (OPPs) being dropped/become + * unreachable due to thermal throttling. + * + * We assume: + * * That all cpus in a perf domain have the same capacity_orig + * (same uArch). + * * Thermal pressure will impact all cpus in this perf domain + * equally. + */ + if (sched_energy_enabled()) { + unsigned long inv_cap = capacity_orig - thermal_load_avg(rq); + struct perf_domain *pd; + + rcu_read_lock(); + + pd = rcu_dereference(rq->rd->pd); + rq->cpu_capacity_inverted = 0; + + for (; pd; pd = pd->next) { + struct cpumask *pd_span = perf_domain_span(pd); + unsigned long pd_cap_orig, pd_cap; + + /* We can't be inverted against our own pd */ + if (cpumask_test_cpu(cpu_of(rq), pd_span)) + continue; + + cpu = cpumask_any(pd_span); + pd_cap_orig = arch_scale_cpu_capacity(cpu); + + if (capacity_orig < pd_cap_orig) + continue; + + /* + * handle the case of multiple perf domains have the + * same capacity_orig but one of them is under higher + * thermal pressure. We record it as capacity + * inversion. + */ + if (capacity_orig == pd_cap_orig) { + pd_cap = pd_cap_orig - thermal_load_avg(cpu_rq(cpu)); + + if (pd_cap > inv_cap) { + rq->cpu_capacity_inverted = inv_cap; + break; + } + } else if (pd_cap_orig > inv_cap) { + rq->cpu_capacity_inverted = inv_cap; + break; + } + } + + rcu_read_unlock(); + } + + trace_sched_cpu_capacity_tp(rq); sdg->sgc->capacity = capacity; sdg->sgc->min_capacity = capacity; @@ -9154,6 +9261,10 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd, memset(sgs, 0, sizeof(*sgs)); + /* Assume that task can't fit any CPU of the group */ + if (sd->flags & SD_ASYM_CPUCAPACITY) + sgs->group_misfit_task_load = 1; + for_each_cpu(i, sched_group_span(group)) { struct rq *rq = cpu_rq(i); unsigned int local; @@ -9173,12 +9284,12 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd, if (!nr_running && idle_cpu_without(i, p)) sgs->idle_cpus++; - } + /* Check if task fits in the CPU */ + if (sd->flags & SD_ASYM_CPUCAPACITY && + sgs->group_misfit_task_load && + task_fits_cpu(p, i)) + sgs->group_misfit_task_load = 0; - /* Check if task fits in the group */ - if (sd->flags & SD_ASYM_CPUCAPACITY && - !task_fits_capacity(p, group->sgc->max_capacity)) { - sgs->group_misfit_task_load = 1; } sgs->group_capacity = group->sgc->capacity; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3457a8ac743a3c8cd88b5babde0231331d72c7f6..e17b13878749aed99625b2880d06c946fb2d878c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -88,7 +88,9 @@ struct rq; struct cpuidle_state; #ifdef CONFIG_SCHED_RT_CAS -extern unsigned long uclamp_task_util(struct task_struct *p); +extern unsigned long uclamp_task_util(struct task_struct *p, + unsigned long uclamp_min, + unsigned long uclamp_max) #endif #ifdef CONFIG_SCHED_WALT @@ -1073,6 +1075,7 @@ struct rq { unsigned long cpu_capacity; unsigned long cpu_capacity_orig; + unsigned long cpu_capacity_inverted; struct callback_head *balance_callback; @@ -2569,6 +2572,23 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} #ifdef CONFIG_UCLAMP_TASK unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); +static inline unsigned long uclamp_rq_get(struct rq *rq, + enum uclamp_id clamp_id) +{ + return READ_ONCE(rq->uclamp[clamp_id].value); +} + +static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, + unsigned int value) +{ + WRITE_ONCE(rq->uclamp[clamp_id].value, value); +} + +static inline bool uclamp_rq_is_idle(struct rq *rq) +{ + return rq->uclamp_flags & UCLAMP_FLAG_IDLE; +} + /** * uclamp_rq_util_with - clamp @util with @rq and @p effective uclamp values. * @rq: The rq to clamp against. Must not be NULL. @@ -2604,12 +2624,12 @@ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, * Ignore last runnable task's max clamp, as this task will * reset it. Similarly, no need to read the rq's min clamp. */ - if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) + if (uclamp_rq_is_idle(rq)) goto out; } - min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); - max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); + min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN)); + max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX)); out: /* * Since CPU's {min,max}_util clamps are MAX aggregated considering @@ -2640,6 +2660,15 @@ static inline bool uclamp_is_used(void) return static_branch_likely(&sched_uclamp_used); } #else /* CONFIG_UCLAMP_TASK */ +static inline unsigned long uclamp_eff_value(struct task_struct *p, + enum uclamp_id clamp_id) +{ + if (clamp_id == UCLAMP_MIN) + return 0; + + return SCHED_CAPACITY_SCALE; +} + static inline unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, struct task_struct *p) @@ -2656,6 +2685,25 @@ static inline bool uclamp_is_used(void) { return false; } + +static inline unsigned long uclamp_rq_get(struct rq *rq, + enum uclamp_id clamp_id) +{ + if (clamp_id == UCLAMP_MIN) + return 0; + + return SCHED_CAPACITY_SCALE; +} + +static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, + unsigned int value) +{ +} + +static inline bool uclamp_rq_is_idle(struct rq *rq) +{ + return false; +} #endif /* CONFIG_UCLAMP_TASK */ #ifdef arch_scale_freq_capacity @@ -2676,6 +2724,24 @@ static inline unsigned long capacity_orig_of(int cpu) { return cpu_rq(cpu)->cpu_capacity_orig; } + +/* + * Returns inverted capacity if the CPU is in capacity inversion state. + * 0 otherwise. + * + * Capacity inversion detection only considers thermal impact where actual + * performance points (OPPs) gets dropped. + * + * Capacity inversion state happens when another performance domain that has + * equal or lower capacity_orig_of() becomes effectively larger than the perf + * domain this CPU belongs to due to thermal pressure throttling it hard. + * + * See comment in update_cpu_capacity(). + */ +static inline unsigned long cpu_in_capacity_inversion(int cpu) +{ + return cpu_rq(cpu)->cpu_capacity_inverted; +} #endif /** diff --git a/kernel/sys.c b/kernel/sys.c index ee71621340dcb02a19dff09d0381d6b3b4674951..ad2d6b91ecdefff7b3e8a7511391406c2e18bc55 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -635,6 +635,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) struct cred *new; int retval; kuid_t kruid, keuid, ksuid; + bool ruid_new, euid_new, suid_new; kruid = make_kuid(ns, ruid); keuid = make_kuid(ns, euid); @@ -649,25 +650,29 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) if ((suid != (uid_t) -1) && !uid_valid(ksuid)) return -EINVAL; + old = current_cred(); + + /* check for no-op */ + if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) && + (euid == (uid_t) -1 || (uid_eq(keuid, old->euid) && + uid_eq(keuid, old->fsuid))) && + (suid == (uid_t) -1 || uid_eq(ksuid, old->suid))) + return 0; + + ruid_new = ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && + !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid); + euid_new = euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && + !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid); + suid_new = suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && + !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid); + if ((ruid_new || euid_new || suid_new) && + !ns_capable_setid(old->user_ns, CAP_SETUID)) + return -EPERM; + new = prepare_creds(); if (!new) return -ENOMEM; - old = current_cred(); - - retval = -EPERM; - if (!ns_capable_setid(old->user_ns, CAP_SETUID)) { - if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && - !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid)) - goto error; - if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && - !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid)) - goto error; - if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && - !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid)) - goto error; - } - if (ruid != (uid_t) -1) { new->uid = kruid; if (!uid_eq(kruid, old->uid)) { @@ -727,6 +732,7 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) struct cred *new; int retval; kgid_t krgid, kegid, ksgid; + bool rgid_new, egid_new, sgid_new; krgid = make_kgid(ns, rgid); kegid = make_kgid(ns, egid); @@ -739,23 +745,28 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) return -EINVAL; + old = current_cred(); + + /* check for no-op */ + if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) && + (egid == (gid_t) -1 || (gid_eq(kegid, old->egid) && + gid_eq(kegid, old->fsgid))) && + (sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid))) + return 0; + + rgid_new = rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && + !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid); + egid_new = egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && + !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid); + sgid_new = sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && + !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid); + if ((rgid_new || egid_new || sgid_new) && + !ns_capable_setid(old->user_ns, CAP_SETGID)) + return -EPERM; + new = prepare_creds(); if (!new) return -ENOMEM; - old = current_cred(); - - retval = -EPERM; - if (!ns_capable_setid(old->user_ns, CAP_SETGID)) { - if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && - !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid)) - goto error; - if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && - !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid)) - goto error; - if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && - !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid)) - goto error; - } if (rgid != (gid_t) -1) new->gid = krgid; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 5d76edd0ad9c26d477e6a2035d668c882d177f61..bede1e608d959cee591dd9f11ec8582199dfb1a9 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -782,6 +782,8 @@ static u64 collect_timerqueue(struct timerqueue_head *head, return expires; ctmr->firing = 1; + /* See posix_cpu_timer_wait_running() */ + rcu_assign_pointer(ctmr->handling, current); cpu_timer_dequeue(ctmr); list_add_tail(&ctmr->elist, firing); } @@ -1097,7 +1099,49 @@ static void handle_posix_cpu_timers(struct task_struct *tsk); #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK static void posix_cpu_timers_work(struct callback_head *work) { + struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work); + + mutex_lock(&cw->mutex); handle_posix_cpu_timers(current); + mutex_unlock(&cw->mutex); +} + +/* + * Invoked from the posix-timer core when a cancel operation failed because + * the timer is marked firing. The caller holds rcu_read_lock(), which + * protects the timer and the task which is expiring it from being freed. + */ +static void posix_cpu_timer_wait_running(struct k_itimer *timr) +{ + struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling); + + /* Has the handling task completed expiry already? */ + if (!tsk) + return; + + /* Ensure that the task cannot go away */ + get_task_struct(tsk); + /* Now drop the RCU protection so the mutex can be locked */ + rcu_read_unlock(); + /* Wait on the expiry mutex */ + mutex_lock(&tsk->posix_cputimers_work.mutex); + /* Release it immediately again. */ + mutex_unlock(&tsk->posix_cputimers_work.mutex); + /* Drop the task reference. */ + put_task_struct(tsk); + /* Relock RCU so the callsite is balanced */ + rcu_read_lock(); +} + +static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr) +{ + /* Ensure that timr->it.cpu.handling task cannot go away */ + rcu_read_lock(); + spin_unlock_irq(&timr->it_lock); + posix_cpu_timer_wait_running(timr); + rcu_read_unlock(); + /* @timr is on stack and is valid */ + spin_lock_irq(&timr->it_lock); } /* @@ -1113,6 +1157,7 @@ void clear_posix_cputimers_work(struct task_struct *p) sizeof(p->posix_cputimers_work.work)); init_task_work(&p->posix_cputimers_work.work, posix_cpu_timers_work); + mutex_init(&p->posix_cputimers_work.mutex); p->posix_cputimers_work.scheduled = false; } @@ -1191,6 +1236,18 @@ static inline void __run_posix_cpu_timers(struct task_struct *tsk) lockdep_posixtimer_exit(); } +static void posix_cpu_timer_wait_running(struct k_itimer *timr) +{ + cpu_relax(); +} + +static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr) +{ + spin_unlock_irq(&timr->it_lock); + cpu_relax(); + spin_lock_irq(&timr->it_lock); +} + static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk) { return false; @@ -1299,6 +1356,8 @@ static void handle_posix_cpu_timers(struct task_struct *tsk) */ if (likely(cpu_firing >= 0)) cpu_timer_fire(timer); + /* See posix_cpu_timer_wait_running() */ + rcu_assign_pointer(timer->it.cpu.handling, NULL); spin_unlock(&timer->it_lock); } } @@ -1434,23 +1493,16 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, expires = cpu_timer_getexpires(&timer.it.cpu); error = posix_cpu_timer_set(&timer, 0, &zero_it, &it); if (!error) { - /* - * Timer is now unarmed, deletion can not fail. - */ + /* Timer is now unarmed, deletion can not fail. */ posix_cpu_timer_del(&timer); + } else { + while (error == TIMER_RETRY) { + posix_cpu_timer_wait_running_nsleep(&timer); + error = posix_cpu_timer_del(&timer); + } } - spin_unlock_irq(&timer.it_lock); - while (error == TIMER_RETRY) { - /* - * We need to handle case when timer was or is in the - * middle of firing. In other cases we already freed - * resources. - */ - spin_lock_irq(&timer.it_lock); - error = posix_cpu_timer_del(&timer); - spin_unlock_irq(&timer.it_lock); - } + spin_unlock_irq(&timer.it_lock); if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) { /* @@ -1560,6 +1612,7 @@ const struct k_clock clock_posix_cpu = { .timer_del = posix_cpu_timer_del, .timer_get = posix_cpu_timer_get, .timer_rearm = posix_cpu_timer_rearm, + .timer_wait_running = posix_cpu_timer_wait_running, }; const struct k_clock clock_process = { diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 5bbfb9b298cbcfd3d81c22045145b03bd2ff1ab7..7a18690e511012cb09c9b837c0cfb90293d19790 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -846,6 +846,10 @@ static struct k_itimer *timer_wait_running(struct k_itimer *timer, rcu_read_lock(); unlock_timer(timer, *flags); + /* + * kc->timer_wait_running() might drop RCU lock. So @timer + * cannot be touched anymore after the function returns! + */ if (!WARN_ON_ONCE(!kc->timer_wait_running)) kc->timer_wait_running(timer); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 92fb738813f39e3762c19fc79db39f588c01a3c2..e4e0d032126bc3daab47e6a7cde9d71181cab925 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -426,7 +426,7 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask) tick_nohz_full_running = true; } -static int tick_nohz_cpu_down(unsigned int cpu) +bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { /* * The tick_do_timer_cpu CPU handles housekeeping duty (unbound @@ -434,8 +434,13 @@ static int tick_nohz_cpu_down(unsigned int cpu) * CPUs. It must remain online when nohz full is enabled. */ if (tick_nohz_full_running && tick_do_timer_cpu == cpu) - return -EBUSY; - return 0; + return false; + return true; +} + +static int tick_nohz_cpu_down(unsigned int cpu) +{ + return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY; } void __init tick_nohz_init(void) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 21b07c7c6ee5eeceecace956adf1b4afd8766512..1fe6b29366f102290d56d167869fc72457422b67 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1644,6 +1644,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) struct list_head *head = cpu_buffer->pages; struct buffer_page *bpage, *tmp; + irq_work_sync(&cpu_buffer->irq_work.work); + free_buffer_page(cpu_buffer->reader_page); if (head) { @@ -1750,6 +1752,8 @@ ring_buffer_free(struct trace_buffer *buffer) cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); + irq_work_sync(&buffer->irq_work.work); + for_each_buffer_cpu(buffer, cpu) rb_free_cpu_buffer(buffer->buffers[cpu]); diff --git a/mm/backing-dev.c b/mm/backing-dev.c index ca770a783a9f91de7479d9ffdc1a414fcbf86b7d..b28f629c3527161d96d309b8009ec7dc120b0bd4 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -378,6 +378,15 @@ static void wb_exit(struct bdi_writeback *wb) static DEFINE_SPINLOCK(cgwb_lock); static struct workqueue_struct *cgwb_release_wq; +static void cgwb_free_rcu(struct rcu_head *rcu_head) +{ + struct bdi_writeback *wb = container_of(rcu_head, + struct bdi_writeback, rcu); + + percpu_ref_exit(&wb->refcnt); + kfree(wb); +} + static void cgwb_release_workfn(struct work_struct *work) { struct bdi_writeback *wb = container_of(work, struct bdi_writeback, @@ -397,7 +406,7 @@ static void cgwb_release_workfn(struct work_struct *work) fprop_local_destroy_percpu(&wb->memcg_completions); percpu_ref_exit(&wb->refcnt); wb_exit(wb); - kfree_rcu(wb, rcu); + call_rcu(&wb->rcu, cgwb_free_rcu); } static void cgwb_release(struct percpu_ref *refcnt) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index b77186ec70e932980a5507ebb67ce9c0ca4bdff1..28e18777ec513c9cc575ad7d1e5bc039c8f9b43d 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -622,6 +622,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, result = SCAN_PTE_NON_PRESENT; goto out; } + if (pte_uffd_wp(pteval)) { + result = SCAN_PTE_UFFD_WP; + goto out; + } page = vm_normal_page(vma, address, pteval); if (unlikely(!page)) { result = SCAN_PAGE_NULL; diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 5183e627468d8901f4f80ed8d74a655aa2a6557f..0218eb169891cb8b075861df5ea5816ee3e2e2a0 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -283,6 +283,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct dccp_hdr *dh, const unsigned int len); +void dccp_destruct_common(struct sock *sk); int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); void dccp_destroy_sock(struct sock *sk); diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index c563f9b325d05e79a9da66c38cfdf0f7ca7abe95..64e91783860df3b6c20019cab84752cbfa635d91 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -992,6 +992,12 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { .sockaddr_len = sizeof(struct sockaddr_in6), }; +static void dccp_v6_sk_destruct(struct sock *sk) +{ + dccp_destruct_common(sk); + inet6_sock_destruct(sk); +} + /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ @@ -1004,17 +1010,12 @@ static int dccp_v6_init_sock(struct sock *sk) if (unlikely(!dccp_v6_ctl_sock_initialized)) dccp_v6_ctl_sock_initialized = 1; inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; + sk->sk_destruct = dccp_v6_sk_destruct; } return err; } -static void dccp_v6_destroy_sock(struct sock *sk) -{ - dccp_destroy_sock(sk); - inet6_destroy_sock(sk); -} - static struct timewait_sock_ops dccp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct dccp6_timewait_sock), }; @@ -1037,7 +1038,7 @@ static struct proto dccp_v6_prot = { .accept = inet_csk_accept, .get_port = inet_csk_get_port, .shutdown = dccp_shutdown, - .destroy = dccp_v6_destroy_sock, + .destroy = dccp_destroy_sock, .orphan_count = &dccp_orphan_count, .max_header = MAX_DCCP_HEADER, .obj_size = sizeof(struct dccp6_sock), diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 65e81e0199b0463a96d30e6456fc02f1eb4fee03..e946211758c05f08c7be9d4d0441c47c2c13df9d 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -171,12 +171,18 @@ const char *dccp_packet_name(const int type) EXPORT_SYMBOL_GPL(dccp_packet_name); -static void dccp_sk_destruct(struct sock *sk) +void dccp_destruct_common(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); dp->dccps_hc_tx_ccid = NULL; +} +EXPORT_SYMBOL_GPL(dccp_destruct_common); + +static void dccp_sk_destruct(struct sock *sk) +{ + dccp_destruct_common(sk); inet_sock_destruct(sk); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index b093daaa3deb93dbc28bbd7f2aadf46a9aa0eae6..f0db66e415bd672145ac1ccc90da3db1e2431343 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1582,7 +1582,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); -void udp_destruct_sock(struct sock *sk) +void udp_destruct_common(struct sock *sk) { /* reclaim completely the forward allocated memory */ struct udp_sock *up = udp_sk(sk); @@ -1595,10 +1595,14 @@ void udp_destruct_sock(struct sock *sk) kfree_skb(skb); } udp_rmem_release(sk, total, 0, true); +} +EXPORT_SYMBOL_GPL(udp_destruct_common); +static void udp_destruct_sock(struct sock *sk) +{ + udp_destruct_common(sk); inet_sock_destruct(sk); } -EXPORT_SYMBOL_GPL(udp_destruct_sock); int udp_init_sock(struct sock *sk) { @@ -1606,7 +1610,6 @@ int udp_init_sock(struct sock *sk) sk->sk_destruct = udp_destruct_sock; return 0; } -EXPORT_SYMBOL_GPL(udp_init_sock); void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) { diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index bd8773b49e72ed826c761f8190db32effdf15c8b..cfb36655a5fdaa6b4ba5a74d3b3b8b51e4cc6c74 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -17,6 +17,14 @@ struct udp_table udplite_table __read_mostly; EXPORT_SYMBOL(udplite_table); +/* Designate sk as UDP-Lite socket */ +static int udplite_sk_init(struct sock *sk) +{ + udp_init_sock(sk); + udp_sk(sk)->pcflag = UDPLITE_BIT; + return 0; +} + static int udplite_rcv(struct sk_buff *skb) { return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index f18e6499a853dede0f1b7505e6a94e6130281d01..0979a4b15f2620f4c15e35e3c3d2581cb8246328 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -107,6 +107,13 @@ static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) return (struct ipv6_pinfo *)(((u8 *)sk) + offset); } +void inet6_sock_destruct(struct sock *sk) +{ + inet6_cleanup_sock(sk); + inet_sock_destruct(sk); +} +EXPORT_SYMBOL_GPL(inet6_sock_destruct); + static int inet6_create(struct net *net, struct socket *sock, int protocol, int kern) { @@ -199,7 +206,7 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol, inet->hdrincl = 1; } - sk->sk_destruct = inet_sock_destruct; + sk->sk_destruct = inet6_sock_destruct; sk->sk_family = PF_INET6; sk->sk_protocol = protocol; @@ -505,6 +512,12 @@ void inet6_destroy_sock(struct sock *sk) } EXPORT_SYMBOL_GPL(inet6_destroy_sock); +void inet6_cleanup_sock(struct sock *sk) +{ + inet6_destroy_sock(sk); +} +EXPORT_SYMBOL_GPL(inet6_cleanup_sock); + /* * This does both peername and sockname. */ diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index ce4e0da4ab9ba52041461e3dfc5103895ac49f79..72391b5321af1896bcb7b2e2590393e853fef8b2 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -429,9 +429,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, if (optlen < sizeof(int)) goto e_inval; if (val == PF_INET) { - struct ipv6_txoptions *opt; - struct sk_buff *pktopt; - if (sk->sk_type == SOCK_RAW) break; @@ -462,7 +459,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, break; } - fl6_free_socklist(sk); __ipv6_sock_mc_close(sk); __ipv6_sock_ac_close(sk); @@ -500,14 +496,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, sk->sk_socket->ops = &inet_dgram_ops; sk->sk_family = PF_INET; } - opt = xchg((__force struct ipv6_txoptions **)&np->opt, - NULL); - if (opt) { - atomic_sub(opt->tot_len, &sk->sk_omem_alloc); - txopt_put(opt); - } - pktopt = xchg(&np->pktoptions, NULL); - kfree_skb(pktopt); + + /* Disable all options not to allocate memory anymore, + * but there is still a race. See the lockless path + * in udpv6_sendmsg() and ipv6_local_rxpmtu(). + */ + np->rxopt.all = 0; + + inet6_cleanup_sock(sk); /* * ... and add it to the refcnt debug socks count diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 135e3a060caa881f9461ce49d834ebdd9057c81b..6ac88fe24a8e076e59c706413e2241fcf0288260 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -22,11 +22,6 @@ #include #include -static void ping_v6_destroy(struct sock *sk) -{ - inet6_destroy_sock(sk); -} - /* Compatibility glue so we can support IPv6 when it's compiled as a module */ static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) @@ -171,7 +166,6 @@ struct proto pingv6_prot = { .owner = THIS_MODULE, .init = ping_init_sock, .close = ping_close, - .destroy = ping_v6_destroy, .connect = ip6_datagram_connect_v6_only, .disconnect = __udp_disconnect, .setsockopt = ipv6_setsockopt, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 110254f44a468c106f5960863b4d7398ef6ff98d..69f0f9c05d028a80027db393b76051d014136851 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1211,8 +1211,6 @@ static void raw6_destroy(struct sock *sk) lock_sock(sk); ip6_flush_pending_frames(sk); release_sock(sk); - - inet6_destroy_sock(sk); } static int rawv6_init_sk(struct sock *sk) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 3f721eda2dacbcaa43eb7fdc8d24a487ff9f2e2c..48e55e93deedf4e3e3c2bd5ae8beeac2dd7a68b7 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1938,12 +1938,6 @@ static int tcp_v6_init_sock(struct sock *sk) return 0; } -static void tcp_v6_destroy_sock(struct sock *sk) -{ - tcp_v4_destroy_sock(sk); - inet6_destroy_sock(sk); -} - #ifdef CONFIG_PROC_FS /* Proc filesystem TCPv6 sock list dumping. */ static void get_openreq6(struct seq_file *seq, @@ -2136,7 +2130,7 @@ struct proto tcpv6_prot = { .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v6_init_sock, - .destroy = tcp_v6_destroy_sock, + .destroy = tcp_v4_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 20cc08210c7007d5d2d96c1637d37f34b472fdd9..19c0721399d9e1a3c1df5ab5af6c70770e56afca 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -54,6 +54,19 @@ #include #include "udp_impl.h" +static void udpv6_destruct_sock(struct sock *sk) +{ + udp_destruct_common(sk); + inet6_sock_destruct(sk); +} + +int udpv6_init_sock(struct sock *sk) +{ + skb_queue_head_init(&udp_sk(sk)->reader_queue); + sk->sk_destruct = udpv6_destruct_sock; + return 0; +} + static u32 udp6_ehashfn(const struct net *net, const struct in6_addr *laddr, const u16 lport, @@ -1617,8 +1630,6 @@ void udpv6_destroy_sock(struct sock *sk) udp_encap_disable(); } } - - inet6_destroy_sock(sk); } /* @@ -1702,7 +1713,7 @@ struct proto udpv6_prot = { .connect = ip6_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, - .init = udp_init_sock, + .init = udpv6_init_sock, .destroy = udpv6_destroy_sock, .setsockopt = udpv6_setsockopt, .getsockopt = udpv6_getsockopt, diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index b2fcc46c1630e0b602d2e8c3bab976d2bd9abca2..e4977681944146a333d4fa9647bf29a447107444 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h @@ -12,6 +12,7 @@ int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int); int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32, struct udp_table *); +int udpv6_init_sock(struct sock *sk); int udp_v6_get_port(struct sock *sk, unsigned short snum); void udp_v6_rehash(struct sock *sk); diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index fbb700d3f437ee73824b369486010e152a659abb..b6482e04dad0e936c940cdd2ff329329ccc7a916 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -12,6 +12,13 @@ #include #include "udp_impl.h" +static int udplitev6_sk_init(struct sock *sk) +{ + udpv6_init_sock(sk); + udp_sk(sk)->pcflag = UDPLITE_BIT; + return 0; +} + static int udplitev6_rcv(struct sk_buff *skb) { return __udp6_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE); @@ -38,7 +45,7 @@ struct proto udplitev6_prot = { .connect = ip6_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, - .init = udplite_sk_init, + .init = udplitev6_sk_init, .destroy = udpv6_destroy_sock, .setsockopt = udpv6_setsockopt, .getsockopt = udpv6_getsockopt, diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index d54dbd01d86f1e949c9a564221849caeeca0bfd5..382124d6f764784e0f86ddc9d9b03a639a247e25 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -255,8 +255,6 @@ static void l2tp_ip6_destroy_sock(struct sock *sk) if (tunnel) l2tp_tunnel_delete(tunnel); - - inet6_destroy_sock(sk); } static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index e61c85873ea2fdaade30b16e788fedae32ff2892..72d944e6a641fad3deee331abeebc4c9663feb45 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2863,12 +2863,6 @@ static const struct proto_ops mptcp_v6_stream_ops = { static struct proto mptcp_v6_prot; -static void mptcp_v6_destroy(struct sock *sk) -{ - mptcp_destroy(sk); - inet6_destroy_sock(sk); -} - static struct inet_protosw mptcp_v6_protosw = { .type = SOCK_STREAM, .protocol = IPPROTO_MPTCP, @@ -2884,7 +2878,6 @@ int __init mptcp_proto_v6_init(void) mptcp_v6_prot = mptcp_prot; strcpy(mptcp_v6_prot.name, "MPTCPv6"); mptcp_v6_prot.slab = NULL; - mptcp_v6_prot.destroy = mptcp_v6_destroy; mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); err = proto_register(&mptcp_v6_prot, 1); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 3a68d65f7d153cd2f034e4d9f785681f06319cd8..35d3eee26ea564dd7f10ad9de06c361742eb1f84 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4995,13 +4995,17 @@ static void sctp_destroy_sock(struct sock *sk) } /* Triggered when there are no references on the socket anymore */ -static void sctp_destruct_sock(struct sock *sk) +static void sctp_destruct_common(struct sock *sk) { struct sctp_sock *sp = sctp_sk(sk); /* Free up the HMAC transform. */ crypto_free_shash(sp->hmac); +} +static void sctp_destruct_sock(struct sock *sk) +{ + sctp_destruct_common(sk); inet_sock_destruct(sk); } @@ -9195,7 +9199,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, sctp_sk(newsk)->reuse = sp->reuse; newsk->sk_shutdown = sk->sk_shutdown; - newsk->sk_destruct = sctp_destruct_sock; + newsk->sk_destruct = sk->sk_destruct; newsk->sk_family = sk->sk_family; newsk->sk_protocol = IPPROTO_SCTP; newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; @@ -9427,11 +9431,20 @@ struct proto sctp_prot = { #if IS_ENABLED(CONFIG_IPV6) -#include -static void sctp_v6_destroy_sock(struct sock *sk) +static void sctp_v6_destruct_sock(struct sock *sk) +{ + sctp_destruct_common(sk); + inet6_sock_destruct(sk); +} + +static int sctp_v6_init_sock(struct sock *sk) { - sctp_destroy_sock(sk); - inet6_destroy_sock(sk); + int ret = sctp_init_sock(sk); + + if (!ret) + sk->sk_destruct = sctp_v6_destruct_sock; + + return ret; } struct proto sctpv6_prot = { @@ -9441,8 +9454,8 @@ struct proto sctpv6_prot = { .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, - .init = sctp_init_sock, - .destroy = sctp_v6_destroy_sock, + .init = sctp_v6_init_sock, + .destroy = sctp_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c index adabd41452640991ed86d1557ac32012042ce4e0..985fb81cae79b21837e56111289506be638e6082 100644 --- a/scripts/asn1_compiler.c +++ b/scripts/asn1_compiler.c @@ -625,7 +625,7 @@ int main(int argc, char **argv) p = strrchr(argv[1], '/'); p = p ? p + 1 : argv[1]; grammar_name = strdup(p); - if (!p) { + if (!grammar_name) { perror(NULL); exit(1); } diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c index 29f91cdecbc33142ca1a0b7f8b1c132601c9a166..9b2a986ce41522ed59a418fd2f28745b39b29287 100644 --- a/sound/soc/fsl/fsl_asrc_dma.c +++ b/sound/soc/fsl/fsl_asrc_dma.c @@ -207,14 +207,19 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component, be_chan = soc_component_to_pcm(component_be)->chan[substream->stream]; tmp_chan = be_chan; } - if (!tmp_chan) - tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx"); + if (!tmp_chan) { + tmp_chan = dma_request_chan(dev_be, tx ? "tx" : "rx"); + if (IS_ERR(tmp_chan)) { + dev_err(dev, "failed to request DMA channel for Back-End\n"); + return -EINVAL; + } + } /* * An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each * peripheral, unlike SDMA channel that is allocated dynamically. So no * need to configure dma_request and dma_request2, but get dma_chan of - * Back-End device directly via dma_request_slave_channel. + * Back-End device directly via dma_request_chan. */ if (!asrc->use_edma) { /* Get DMA request of Back-End */ diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index 8a99cb6dfcd69d90735515bfd82aa31fd388573a..9a5ab96f917d3083655d25c6c6c188b6b9dca5d2 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -393,6 +393,18 @@ static int byt_rt5640_aif1_hw_params(struct snd_pcm_substream *substream, /* Please keep this list alphabetically sorted */ static const struct dmi_system_id byt_rt5640_quirk_table[] = { + { /* Acer Iconia One 7 B1-750 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "VESPA2"), + }, + .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | + BYT_RT5640_JD_SRC_JD1_IN4P | + BYT_RT5640_OVCD_TH_1500UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { /* Acer Iconia Tab 8 W1-810 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index d3b5f5faf8c1417eb22aa58aa6a969794083fcdf..02e5774cabb6edc9c6c7e22a5271ab18736cd3e7 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -670,7 +670,7 @@ static void create_tasks(struct perf_sched *sched) err = pthread_attr_init(&attr); BUG_ON(err); err = pthread_attr_setstacksize(&attr, - (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); + (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN)); BUG_ON(err); err = pthread_mutex_lock(&sched->start_work_mutex); BUG_ON(err); diff --git a/tools/testing/selftests/sigaltstack/current_stack_pointer.h b/tools/testing/selftests/sigaltstack/current_stack_pointer.h new file mode 100644 index 0000000000000000000000000000000000000000..ea9bdf3a90b16449a0ffcdfcdc54247125f1c806 --- /dev/null +++ b/tools/testing/selftests/sigaltstack/current_stack_pointer.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#if __alpha__ +register unsigned long sp asm("$30"); +#elif __arm__ || __aarch64__ || __csky__ || __m68k__ || __mips__ || __riscv +register unsigned long sp asm("sp"); +#elif __i386__ +register unsigned long sp asm("esp"); +#elif __loongarch64 +register unsigned long sp asm("$sp"); +#elif __ppc__ +register unsigned long sp asm("r1"); +#elif __s390x__ +register unsigned long sp asm("%15"); +#elif __sh__ +register unsigned long sp asm("r15"); +#elif __x86_64__ +register unsigned long sp asm("rsp"); +#elif __XTENSA__ +register unsigned long sp asm("a1"); +#else +#error "implement current_stack_pointer equivalent" +#endif diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c index 8934a3766d2076bee27a62cd41c6bfa0b3ef4af6..41646c22384a2f117a41f054455ff1175b14e119 100644 --- a/tools/testing/selftests/sigaltstack/sas.c +++ b/tools/testing/selftests/sigaltstack/sas.c @@ -19,6 +19,7 @@ #include #include "../kselftest.h" +#include "current_stack_pointer.h" #ifndef SS_AUTODISARM #define SS_AUTODISARM (1U << 31) @@ -40,12 +41,6 @@ void my_usr1(int sig, siginfo_t *si, void *u) stack_t stk; struct stk_data *p; -#if __s390x__ - register unsigned long sp asm("%15"); -#else - register unsigned long sp asm("sp"); -#endif - if (sp < (unsigned long)sstack || sp >= (unsigned long)sstack + SIGSTKSZ) { ksft_exit_fail_msg("SP is not on sigaltstack\n");