diff --git a/Makefile b/Makefile index 1a4592e1d1f4c8f640b57020f870b681b160b171..267c6073a41f62f3c85874760d5ed6070e26b0b5 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 179 +SUBLEVEL = 180 EXTRAVERSION = NAME = Dare mighty things diff --git a/README.OpenSource b/README.OpenSource index 71a2928bd5f5322d4c7dc71dff02c6df6c93565d..996851d9a4022a6c9a2ec84d8f49237b07beaf9c 100644 --- a/README.OpenSource +++ b/README.OpenSource @@ -3,7 +3,7 @@ "Name": "linux-5.10", "License": "GPL-2.0+", "License File": "COPYING", - "Version Number": "5.10.179", + "Version Number": "5.10.180", "Owner": "liuyu82@huawei.com", "Upstream URL": "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/log/?h=linux-5.10.y", "Description": "linux kernel 5.10" diff --git a/arch/arm/boot/dts/exynos4412-itop-elite.dts b/arch/arm/boot/dts/exynos4412-itop-elite.dts index f6d0a5f5d339edf76da9373fdd7ceb81fb54317e..9a2a49420d4db522f74fed83eefb119ebcb1c26b 100644 --- a/arch/arm/boot/dts/exynos4412-itop-elite.dts +++ b/arch/arm/boot/dts/exynos4412-itop-elite.dts @@ -179,7 +179,7 @@ codec: wm8960@1a { compatible = "wlf,wm8960"; reg = <0x1a>; clocks = <&pmu_system_controller 0>; - clock-names = "MCLK1"; + clock-names = "mclk"; wlf,shared-lrclk; #sound-dai-cells = <0>; }; diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi index eb7e3660ada793200ef8843d8182307b94cdb1dd..81ab9fe9897fc52dd0f0f5168ba14f1a9b216f24 100644 --- a/arch/arm/boot/dts/s5pv210.dtsi +++ b/arch/arm/boot/dts/s5pv210.dtsi @@ -583,7 +583,7 @@ csis0: csis@fa600000 { interrupts = <29>; clocks = <&clocks CLK_CSIS>, <&clocks SCLK_CSIS>; - clock-names = "clk_csis", + clock-names = "csis", "sclk_csis"; bus-width = <4>; status = "disabled"; diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c index 1dbe98948ce30c3da627c66d4ee803662446eab7..9627c4cf3e41ddf702a9ce63ba5ca889ae16e3fe 100644 --- a/arch/arm/mach-sa1100/jornada720_ssp.c +++ b/arch/arm/mach-sa1100/jornada720_ssp.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * arch/arm/mac-sa1100/jornada720_ssp.c * * Copyright (C) 2006/2007 Kristoffer Ericson @@ -26,6 +26,7 @@ static unsigned long jornada_ssp_flags; /** * jornada_ssp_reverse - reverses input byte + * @byte: input byte to reverse * * we need to reverse all data we receive from the mcu due to its physical location * returns : 01110111 -> 11101110 @@ -46,6 +47,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse); /** * jornada_ssp_byte - waits for ready ssp bus and sends byte + * @byte: input byte to transmit * * waits for fifo buffer to clear and then transmits, if it doesn't then we will * timeout after rounds. Needs mcu running before its called. @@ -77,6 +79,7 @@ EXPORT_SYMBOL(jornada_ssp_byte); /** * jornada_ssp_inout - decide if input is command or trading byte + * @byte: input byte to send (may be %TXDUMMY) * * returns : (jornada_ssp_byte(byte)) on success * : %-ETIMEDOUT on timeout failure diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 02b5f6f1d331eb549466a4d6ba9a714c3a11ac3f..159cdd03e7c01572ac588c4669fe937841a460dc 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -1771,8 +1771,11 @@ dwc3@6a00000 { interrupts = <0 131 IRQ_TYPE_LEVEL_HIGH>; phys = <&hsusb_phy1>, <&ssusb_phy_0>; phy-names = "usb2-phy", "usb3-phy"; + snps,hird-threshold = /bits/ 8 <0>; snps,dis_u2_susphy_quirk; snps,dis_enblslpm_quirk; + snps,is-utmi-l1-suspend; + tx-fifo-resize; }; }; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index af4b4d3c6ff6462ccf9faa3fdd8d11b745c154c3..3e9f1c820edbf6e3495d2f1434c5c6dedbcad965 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -664,7 +664,7 @@ struct kvm_vcpu_arch { u8 preempted; u64 msr_val; u64 last_steal; - struct gfn_to_pfn_cache cache; + struct gfn_to_hva_cache cache; } st; u64 l1_tsc_offset; diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 09ec1cda2d687c517a3bfe5de1f94899f1b5f8e7..e03e320847cdd9ec8437654d4a3b0783a64aca76 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1562,16 +1562,19 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, cpumask_clear(&hv_vcpu->tlb_flush); - vcpu_mask = all_cpus ? NULL : - sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, - vp_bitmap, vcpu_bitmap); - /* * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't * analyze it here, flush TLB regardless of the specified address space. */ - kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, - NULL, vcpu_mask, &hv_vcpu->tlb_flush); + if (all_cpus) { + kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH_GUEST); + } else { + vcpu_mask = sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, + vp_bitmap, vcpu_bitmap); + + kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, + NULL, vcpu_mask, &hv_vcpu->tlb_flush); + } ret_success: /* We always do full TLB flush, set rep_done = rep_cnt. */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c1351335d22f0c7a9d935531037e0eae70fc9126..5fbae8cc06977c84ea625068f64b0c2c01236427 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3022,51 +3022,95 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) static void record_steal_time(struct kvm_vcpu *vcpu) { - struct kvm_host_map map; - struct kvm_steal_time *st; + struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; + struct kvm_steal_time __user *st; + struct kvm_memslots *slots; + gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; + u64 steal; + u32 version; if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; - /* -EAGAIN is returned in atomic context so we can just return. */ - if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, - &map, &vcpu->arch.st.cache, false)) + if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) return; - st = map.hva + - offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); + slots = kvm_memslots(vcpu->kvm); + + if (unlikely(slots->generation != ghc->generation || + gpa != ghc->gpa || + kvm_is_error_hva(ghc->hva) || !ghc->memslot)) { + /* We rely on the fact that it fits in a single page. */ + BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS); + + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) || + kvm_is_error_hva(ghc->hva) || !ghc->memslot) + return; + } + st = (struct kvm_steal_time __user *)ghc->hva; /* * Doing a TLB flush here, on the guest's behalf, can avoid * expensive IPIs. */ if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { + u8 st_preempted = 0; + int err = -EFAULT; + + if (!user_access_begin(st, sizeof(*st))) + return; + + asm volatile("1: xchgb %0, %2\n" + "xor %1, %1\n" + "2:\n" + _ASM_EXTABLE_UA(1b, 2b) + : "+q" (st_preempted), + "+&r" (err), + "+m" (st->preempted)); + if (err) + goto out; + + user_access_end(); + + vcpu->arch.st.preempted = 0; + trace_kvm_pv_tlb_flush(vcpu->vcpu_id, - st->preempted & KVM_VCPU_FLUSH_TLB); - if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB) + st_preempted & KVM_VCPU_FLUSH_TLB); + if (st_preempted & KVM_VCPU_FLUSH_TLB) kvm_vcpu_flush_tlb_guest(vcpu); + + if (!user_access_begin(st, sizeof(*st))) + goto dirty; } else { - st->preempted = 0; - } + if (!user_access_begin(st, sizeof(*st))) + return; - vcpu->arch.st.preempted = 0; + unsafe_put_user(0, &st->preempted, out); + vcpu->arch.st.preempted = 0; + } - if (st->version & 1) - st->version += 1; /* first time write, random junk */ + unsafe_get_user(version, &st->version, out); + if (version & 1) + version += 1; /* first time write, random junk */ - st->version += 1; + version += 1; + unsafe_put_user(version, &st->version, out); smp_wmb(); - st->steal += current->sched_info.run_delay - + unsafe_get_user(steal, &st->steal, out); + steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; + unsafe_put_user(steal, &st->steal, out); - smp_wmb(); - - st->version += 1; + version += 1; + unsafe_put_user(version, &st->version, out); - kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); + out: + user_access_end(); + dirty: + mark_page_dirty_in_slot(ghc->memslot, gpa_to_gfn(ghc->gpa)); } int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) @@ -4051,8 +4095,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) { - struct kvm_host_map map; - struct kvm_steal_time *st; + struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; + struct kvm_steal_time __user *st; + struct kvm_memslots *slots; + static const u8 preempted = KVM_VCPU_PREEMPTED; + gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; /* * The vCPU can be marked preempted if and only if the VM-Exit was on @@ -4073,42 +4120,42 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) if (vcpu->arch.st.preempted) return; - if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, - &vcpu->arch.st.cache, true)) + /* This happens on process exit */ + if (unlikely(current->mm != vcpu->kvm->mm)) return; - st = map.hva + - offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); + slots = kvm_memslots(vcpu->kvm); - st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; + if (unlikely(slots->generation != ghc->generation || + gpa != ghc->gpa || + kvm_is_error_hva(ghc->hva) || !ghc->memslot)) + return; - kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); + st = (struct kvm_steal_time __user *)ghc->hva; + BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted)); + + if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted))) + vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; + + mark_page_dirty_in_slot(ghc->memslot, gpa_to_gfn(ghc->gpa)); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { int idx; - if (vcpu->preempted) + if (vcpu->preempted) { vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); - /* - * Disable page faults because we're in atomic context here. - * kvm_write_guest_offset_cached() would call might_fault() - * that relies on pagefault_disable() to tell if there's a - * bug. NOTE: the write to guest memory may not go through if - * during postcopy live migration or if there's heavy guest - * paging. - */ - pagefault_disable(); - /* - * kvm_memslots() will be called by - * kvm_write_guest_offset_cached() so take the srcu lock. - */ - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_steal_time_set_preempted(vcpu); - srcu_read_unlock(&vcpu->kvm->srcu, idx); - pagefault_enable(); + /* + * Take the srcu lock as memslots will be accessed to check the gfn + * cache generation against the memslots generation. + */ + idx = srcu_read_lock(&vcpu->kvm->srcu); + kvm_steal_time_set_preempted(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + } + kvm_x86_ops.vcpu_put(vcpu); vcpu->arch.last_host_tsc = rdtsc(); /* @@ -10264,11 +10311,8 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { - struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache; int idx; - kvm_release_pfn(cache->pfn, cache->dirty, cache); - kvmclock_reset(vcpu); kvm_x86_ops.vcpu_free(vcpu); diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c index 3615e1a6efd8ae084b4f0961b4083e7ea754bd54..b91155ea9c343cd82871b3dc0638a2143a0f11bf 100644 --- a/drivers/acpi/acpica/dbnames.c +++ b/drivers/acpi/acpica/dbnames.c @@ -652,6 +652,9 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg) object_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_object_info)); + if (!object_info) + return (AE_NO_MEMORY); + /* Walk the namespace from the root */ (void)acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c index 809a0c0536b5936d99087708e3b05393039716e0..f9ba7695be1478ffee7d432cdc48ab488c20dc57 100644 --- a/drivers/acpi/acpica/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c @@ -576,9 +576,14 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state, ACPI_FUNCTION_TRACE(ds_init_aml_walk); walk_state->parser_state.aml = - walk_state->parser_state.aml_start = aml_start; - walk_state->parser_state.aml_end = - walk_state->parser_state.pkg_end = aml_start + aml_length; + walk_state->parser_state.aml_start = + walk_state->parser_state.aml_end = + walk_state->parser_state.pkg_end = aml_start; + /* Avoid undefined behavior: applying zero offset to null pointer */ + if (aml_length != 0) { + walk_state->parser_state.aml_end += aml_length; + walk_state->parser_state.pkg_end += aml_length; + } /* The next_op of the next_walk will be the beginning of the method */ diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 4707d1808ca54ce6b804c4a68053cf0380c3f5ac..487884420fb0d0a44264a4d7d3ebdd45dd100fb1 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1114,6 +1114,7 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec, void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) { acpi_ec_remove_query_handlers(ec, false, query_bit); + flush_workqueue(ec_query_wq); } EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); diff --git a/drivers/base/core.c b/drivers/base/core.c index 9a874a58d690cc1b37822013ecff514ce4ec196a..cb859febd03cf0d9924ece74c4c14d49a2c3e6c3 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -4352,6 +4352,13 @@ void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) } EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); +void device_set_node(struct device *dev, struct fwnode_handle *fwnode) +{ + dev->fwnode = fwnode; + dev->of_node = to_of_node(fwnode); +} +EXPORT_SYMBOL_GPL(device_set_node); + int device_match_name(struct device *dev, const void *name) { return sysfs_streq(dev_name(dev), name); diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 7f4b3b62492ca3849fb08b0bbe888d8b548c12a7..7fdd702e564ae504d9c7043318041b769aca9768 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -343,6 +343,9 @@ int regcache_sync(struct regmap *map) const char *name; bool bypass; + if (WARN_ON(map->cache_type == REGCACHE_NONE)) + return -EINVAL; + BUG_ON(!map->cache_ops); map->lock(map->lock_arg); @@ -412,6 +415,9 @@ int regcache_sync_region(struct regmap *map, unsigned int min, const char *name; bool bypass; + if (WARN_ON(map->cache_type == REGCACHE_NONE)) + return -EINVAL; + BUG_ON(!map->cache_ops); map->lock(map->lock_arg); diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index dc333dbe5232817da94096fea0f360b4520902f1..405e09575f08a2da59368c21385ecc2f48e23de7 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1299,7 +1299,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont bio_set_dev(bio, device->ldev->backing_bdev); bio->bi_private = octx; bio->bi_end_io = one_flush_endio; - bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH; + bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; device->flush_jif = jiffies; set_bit(FLUSH_PENDING, &device->flags); diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 25db095e943b7d9d20f7d43029733f6bca35e47d..35b390a785dd4f36d63898351b37958cf792c8a4 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1738,6 +1738,11 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) static int null_validate_conf(struct nullb_device *dev) { + if (dev->queue_mode == NULL_Q_RQ) { + pr_err("legacy IO path is no longer available\n"); + return -EINVAL; + } + dev->blocksize = round_down(dev->blocksize, 512); dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 840754dcc6ca4429ac17b7e398cc5a8ca70fd6e4..5a877d76078f7271997281050bab8d39a9e5d4de 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -44,6 +44,8 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id, /* entry point from firmware to arch asm code */ static unsigned long sdei_entry_point; +static int sdei_hp_state; + struct sdei_event { /* These three are protected by the sdei_list_lock */ struct list_head list; @@ -302,8 +304,6 @@ int sdei_mask_local_cpu(void) { int err; - WARN_ON_ONCE(preemptible()); - err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL); if (err && err != -EIO) { pr_warn_once("failed to mask CPU[%u]: %d\n", @@ -316,6 +316,7 @@ int sdei_mask_local_cpu(void) static void _ipi_mask_cpu(void *ignored) { + WARN_ON_ONCE(preemptible()); sdei_mask_local_cpu(); } @@ -323,8 +324,6 @@ int sdei_unmask_local_cpu(void) { int err; - WARN_ON_ONCE(preemptible()); - err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL); if (err && err != -EIO) { pr_warn_once("failed to unmask CPU[%u]: %d\n", @@ -337,6 +336,7 @@ int sdei_unmask_local_cpu(void) static void _ipi_unmask_cpu(void *ignored) { + WARN_ON_ONCE(preemptible()); sdei_unmask_local_cpu(); } @@ -344,6 +344,8 @@ static void _ipi_private_reset(void *ignored) { int err; + WARN_ON_ONCE(preemptible()); + err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0, NULL); if (err && err != -EIO) @@ -390,8 +392,6 @@ static void _local_event_enable(void *data) int err; struct sdei_crosscall_args *arg = data; - WARN_ON_ONCE(preemptible()); - err = sdei_api_event_enable(arg->event->event_num); sdei_cross_call_return(arg, err); @@ -480,8 +480,6 @@ static void _local_event_unregister(void *data) int err; struct sdei_crosscall_args *arg = data; - WARN_ON_ONCE(preemptible()); - err = sdei_api_event_unregister(arg->event->event_num); sdei_cross_call_return(arg, err); @@ -562,8 +560,6 @@ static void _local_event_register(void *data) struct sdei_registered_event *reg; struct sdei_crosscall_args *arg = data; - WARN_ON(preemptible()); - reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id()); err = sdei_api_event_register(arg->event->event_num, sdei_entry_point, reg, 0, 0); @@ -718,6 +714,8 @@ static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action, { int rv; + WARN_ON_ONCE(preemptible()); + switch (action) { case CPU_PM_ENTER: rv = sdei_mask_local_cpu(); @@ -766,7 +764,7 @@ static int sdei_device_freeze(struct device *dev) int err; /* unregister private events */ - cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); + cpuhp_remove_state(sdei_entry_point); err = sdei_unregister_shared(); if (err) @@ -787,12 +785,15 @@ static int sdei_device_thaw(struct device *dev) return err; } - err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI", &sdei_cpuhp_up, &sdei_cpuhp_down); - if (err) + if (err < 0) { pr_warn("Failed to re-register CPU hotplug notifier...\n"); + return err; + } - return err; + sdei_hp_state = err; + return 0; } static int sdei_device_restore(struct device *dev) @@ -824,7 +825,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action, * We are going to reset the interface, after this there is no point * doing work when we take CPUs offline. */ - cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); + cpuhp_remove_state(sdei_hp_state); sdei_platform_reset(); @@ -1004,13 +1005,15 @@ static int sdei_probe(struct platform_device *pdev) goto remove_cpupm; } - err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI", &sdei_cpuhp_up, &sdei_cpuhp_down); - if (err) { + if (err < 0) { pr_warn("Failed to register CPU hotplug notifier...\n"); goto remove_reboot; } + sdei_hp_state = err; + return 0; remove_reboot: diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 38f4c7474487be58415566f3a8834224a6e90c9c..629671f66b319c2cad6438c2c01e5dd973b005a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3943,7 +3943,8 @@ static int gfx_v9_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 1673bf3bae55a06be816027010d54fac792d2ad0..945cbdbc2f9986cb5820b488d27933f28136276a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1686,7 +1686,6 @@ static int gmc_v9_0_hw_fini(void *handle) return 0; } - amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 1f2e2460e121ee134a72d423baeea06184474750..dbcaef3f35da99981d57ad05ae9d11b1923becff 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1979,9 +1979,11 @@ static int sdma_v4_0_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) return 0; - for (i = 0; i < adev->sdma.num_instances; i++) { - amdgpu_irq_put(adev, &adev->sdma.ecc_irq, - AMDGPU_SDMA_IRQ_INSTANCE0 + i); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + amdgpu_irq_put(adev, &adev->sdma.ecc_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i); + } } sdma_v4_0_ctx_switch_enable(adev, false); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index dbdf0e210522c3e3ab330852a45b302550d93157..3ca1ee396e4c67d6e029441b71151aaed4071f5f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7248,6 +7248,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, continue; dc_plane = dm_new_plane_state->dc_state; + if (!dc_plane) + continue; bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { @@ -8562,8 +8564,9 @@ static int dm_update_plane_state(struct dc *dc, return -EINVAL; } + if (dm_old_plane_state->dc_state) + dc_plane_state_release(dm_old_plane_state->dc_state); - dc_plane_state_release(dm_old_plane_state->dc_state); dm_new_plane_state->dc_state = NULL; *lock_and_validation_needed = true; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 930d2b7d34489bc5a5603e8c93a3fc3f2c20049d..9dd41eaf32cb56e951986050ded9d0c9804cc760 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -406,11 +406,8 @@ static enum bp_result get_gpio_i2c_info( info->i2c_slave_address = record->i2c_slave_addr; /* TODO: check how to get register offset for en, Y, etc. */ - info->gpio_info.clk_a_register_index = - le16_to_cpu( - header->gpio_pin[table_index].data_a_reg_index); - info->gpio_info.clk_a_shift = - header->gpio_pin[table_index].gpio_bitshift; + info->gpio_info.clk_a_register_index = le16_to_cpu(pin->data_a_reg_index); + info->gpio_info.clk_a_shift = pin->gpio_bitshift; return BP_RESULT_OK; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 1e47afc4ccc1deea8efb4edc265f8f626588bde7..f1eda1a6496d4bb493b3b2bb71b17ff3b2230b6b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1502,6 +1502,9 @@ bool dc_remove_plane_from_context( struct dc_stream_status *stream_status = NULL; struct resource_pool *pool = dc->res_pool; + if (!plane_state) + return true; + for (i = 0; i < context->stream_count; i++) if (context->streams[i] == stream) { stream_status = &context->stream_status[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index e2e79025825f844db1718390a2b40732fddac5dd..a54a30987924694237c79f214ab4633d12812580 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -1011,7 +1011,7 @@ static void dce_transform_set_pixel_storage_depth( color_depth = COLOR_DEPTH_101010; pixel_depth = 0; expan_mode = 1; - BREAK_TO_DEBUGGER(); + DC_LOG_DC("The pixel depth %d is not valid, set COLOR_DEPTH_101010 instead.", depth); break; } @@ -1025,8 +1025,7 @@ static void dce_transform_set_pixel_storage_depth( if (!(xfm_dce->lb_pixel_depth_supported & depth)) { /*we should use unsupported capabilities * unless it is required by w/a*/ - DC_LOG_WARNING("%s: Capability not supported", - __func__); + DC_LOG_DC("%s: Capability not supported", __func__); } } diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 19fb1d93a4f07835ed39c8ec87b6fee00769edcb..0c806e99e86909fb16496ac55c0a054c556220e0 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -221,7 +221,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host, return dsi; } - dsi->dev.of_node = info->node; + device_set_node(&dsi->dev, of_fwnode_handle(info->node)); dsi->channel = info->channel; strlcpy(dsi->name, info->type, sizeof(dsi->name)); diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 1f79bc2a881e1f3889e5161ef14fbba33d1764be..c277d2fc50c66dcf961f486ae7f4ab0f9d38db60 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -775,8 +775,8 @@ static int decon_conf_irq(struct decon_context *ctx, const char *name, return irq; } } - irq_set_status_flags(irq, IRQ_NOAUTOEN); - ret = devm_request_irq(ctx->dev, irq, handler, flags, "drm_decon", ctx); + ret = devm_request_irq(ctx->dev, irq, handler, + flags | IRQF_NO_AUTOEN, "drm_decon", ctx); if (ret < 0) { dev_err(ctx->dev, "IRQ %s request failed\n", name); return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 5b9666fc7af1a9c985e5a897c0adba224983f2bf..afb03de2880f0e6437bebf6c16d72dba0569a03a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1353,10 +1353,9 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi, } te_gpio_irq = gpio_to_irq(dsi->te_gpio); - irq_set_status_flags(te_gpio_irq, IRQ_NOAUTOEN); ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL, - IRQF_TRIGGER_RISING, "TE", dsi); + IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi); if (ret) { dev_err(dsi->dev, "request interrupt failed with %d\n", ret); gpio_free(dsi->te_gpio); @@ -1802,9 +1801,9 @@ static int exynos_dsi_probe(struct platform_device *pdev) if (dsi->irq < 0) return dsi->irq; - irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN); ret = devm_request_threaded_irq(dev, dsi->irq, NULL, - exynos_dsi_irq, IRQF_ONESHOT, + exynos_dsi_irq, + IRQF_ONESHOT | IRQF_NO_AUTOEN, dev_name(dev), dsi); if (ret) { dev_err(dev, "failed to request dsi irq\n"); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 1c1931f5c958b138b80f146a4d2a4a56923a410e..7f633f8b3239a7f583ab756987d1855b8dfc9f65 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2281,6 +2281,11 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_config->dsc.slice_count = drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, true); + if (!pipe_config->dsc.slice_count) { + drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n", + pipe_config->dsc.slice_count); + return -EINVAL; + } } else { u16 dsc_max_output_bpp; u8 dsc_dp_slice_count; diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c index 6ac1accade803f0dc5bf917376c455c1bc0a4341..b19597b836e3a2087fb3c48d24185e00b155dee6 100644 --- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c +++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c @@ -458,7 +458,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi) DRM_MODE_CONNECTOR_DSI); ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev), - dsi->host->dev, ctx, + dev, ctx, &otm8009a_backlight_ops, NULL); if (IS_ERR(ctx->bl_dev)) { diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 32c83f2e386ca2c1334e245031c32ed5143b9863..9d60d1c4cfcea838f18a27e605bed345b49f6499 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -1153,7 +1153,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor, struct drm_dp_link *link) { const u64 f = 100000, link_rate = link->rate * 1000; - const u64 pclk = mode->clock * 1000; + const u64 pclk = (u64)mode->clock * 1000; u64 input, output, watermark, num; struct tegra_sor_params params; u32 num_syms_per_line; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index afb94b89fc4d415c8d2093903c606076eb675a7c..6c64165fae13ed7c21c82430423e5852272532bf 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1265,6 +1265,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) struct input_dev *pen_input = wacom->pen_input; unsigned char *data = wacom->data; + int number_of_valid_frames = 0; + int time_interval = 15000000; + ktime_t time_packet_received = ktime_get(); int i; if (wacom->features.type == INTUOSP2_BT || @@ -1285,12 +1288,30 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF; } + /* number of valid frames */ for (i = 0; i < pen_frames; i++) { unsigned char *frame = &data[i*pen_frame_len + 1]; bool valid = frame[0] & 0x80; + + if (valid) + number_of_valid_frames++; + } + + if (number_of_valid_frames) { + if (wacom->hid_data.time_delayed) + time_interval = ktime_get() - wacom->hid_data.time_delayed; + time_interval /= number_of_valid_frames; + wacom->hid_data.time_delayed = time_packet_received; + } + + for (i = 0; i < number_of_valid_frames; i++) { + unsigned char *frame = &data[i*pen_frame_len + 1]; + bool valid = frame[0] & 0x80; bool prox = frame[0] & 0x40; bool range = frame[0] & 0x20; bool invert = frame[0] & 0x10; + int frames_number_reversed = number_of_valid_frames - i - 1; + int event_timestamp = time_packet_received - frames_number_reversed * time_interval; if (!valid) continue; @@ -1303,6 +1324,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) wacom->tool[0] = 0; wacom->id[0] = 0; wacom->serial[0] = 0; + wacom->hid_data.time_delayed = 0; return; } @@ -1339,6 +1361,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) get_unaligned_le16(&frame[11])); } } + if (wacom->tool[0]) { input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); if (wacom->features.type == INTUOSP2_BT || @@ -1362,6 +1385,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) wacom->shared->stylus_in_proximity = prox; + /* add timestamp to unpack the frames */ + input_set_timestamp(pen_input, event_timestamp); + input_sync(pen_input); } } @@ -1853,6 +1879,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage, int fmax = field->logical_maximum; unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid); int resolution_code = code; + int resolution = hidinput_calc_abs_res(field, resolution_code); if (equivalent_usage == HID_DG_TWIST) { resolution_code = ABS_RZ; @@ -1875,8 +1902,15 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage, switch (type) { case EV_ABS: input_set_abs_params(input, code, fmin, fmax, fuzz, 0); - input_abs_set_res(input, code, - hidinput_calc_abs_res(field, resolution_code)); + + /* older tablet may miss physical usage */ + if ((code == ABS_X || code == ABS_Y) && !resolution) { + resolution = WACOM_INTUOS_RES; + hid_warn(input, + "Wacom usage (%d) missing resolution \n", + code); + } + input_abs_set_res(input, code, resolution); break; case EV_KEY: input_set_capability(input, EV_KEY, code); diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index ca172efcf072fa344a0f36cdce053abbc32ba2af..88badfbae999cd52f16ba3c2f483e1874a7e16d6 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -320,6 +320,7 @@ struct hid_data { int bat_connected; int ps_connected; bool pad_input_event_flag; + int time_delayed; }; struct wacom_remote_data { diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index 7d3784aa20e588af58a01270f8a58f937160792f..90cc3cd49a5eea7a9ec2028462b04fe36854446b 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c @@ -1430,7 +1430,9 @@ mptlan_remove(struct pci_dev *pdev) { MPT_ADAPTER *ioc = pci_get_drvdata(pdev); struct net_device *dev = ioc->netdev; + struct mpt_lan_priv *priv = netdev_priv(dev); + cancel_delayed_work_sync(&priv->post_buckets_task); if(dev != NULL) { unregister_netdev(dev); free_netdev(dev); diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 040a15a828b41bb54c71af22cd047ccc27bbbb54..c1d7bd168f1d186f7a6608a525bef7227134925b 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1423,7 +1423,7 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb, write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2); } -static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) { struct pasemi_mac * const mac = netdev_priv(dev); struct pasemi_mac_txring * const txring = tx_ring(mac); diff --git a/drivers/net/mdio/mdio-mvusb.c b/drivers/net/mdio/mdio-mvusb.c index d5eabddfdf51b34412fe5dd2fa7fa828b67d6d36..11e048136ac23d9328c7d857672bcf2133f7822e 100644 --- a/drivers/net/mdio/mdio-mvusb.c +++ b/drivers/net/mdio/mdio-mvusb.c @@ -73,6 +73,7 @@ static int mvusb_mdio_probe(struct usb_interface *interface, struct device *dev = &interface->dev; struct mvusb_mdio *mvusb; struct mii_bus *mdio; + int ret; mdio = devm_mdiobus_alloc_size(dev, sizeof(*mvusb)); if (!mdio) @@ -93,7 +94,15 @@ static int mvusb_mdio_probe(struct usb_interface *interface, mdio->write = mvusb_mdio_write; usb_set_intfdata(interface, mvusb); - return of_mdiobus_register(mdio, dev->of_node); + ret = of_mdiobus_register(mdio, dev->of_node); + if (ret) + goto put_dev; + + return 0; + +put_dev: + usb_put_dev(mvusb->udev); + return ret; } static void mvusb_mdio_disconnect(struct usb_interface *interface) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 2c9ae02ada3e0565d6fb535fc41929b52acaf3f1..41ee56015a45efc9538b373044ec772449888415 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -713,9 +713,8 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, skb_probe_transport_header(skb); /* Move network header to the right position for VLAN tagged packets */ - if ((skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) && - __vlan_get_protocol(skb, skb->protocol, &depth) != 0) + if (eth_type_vlan(skb->protocol) && + vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0) skb_set_network_header(skb, depth); rcu_read_lock(); @@ -1165,9 +1164,8 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) } /* Move network header to the right position for VLAN tagged packets */ - if ((skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) && - __vlan_get_protocol(skb, skb->protocol, &depth) != 0) + if (eth_type_vlan(skb->protocol) && + vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0) skb_set_network_header(skb, depth); rcu_read_lock(); diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index f083fb9038c36384629631d0292a05152a6b17ce..f02a308a9ffc5efdd34949edc92b418a6b379f54 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -96,11 +96,13 @@ struct ath_keyval { u8 kv_type; u8 kv_pad; u16 kv_len; - u8 kv_val[16]; /* TK */ - u8 kv_mic[8]; /* Michael MIC key */ - u8 kv_txmic[8]; /* Michael MIC TX key (used only if the hardware - * supports both MIC keys in the same key cache entry; - * in that case, kv_mic is the RX key) */ + struct_group(kv_values, + u8 kv_val[16]; /* TK */ + u8 kv_mic[8]; /* Michael MIC key */ + u8 kv_txmic[8]; /* Michael MIC TX key (used only if the hardware + * supports both MIC keys in the same key cache entry; + * in that case, kv_mic is the RX key) */ + ); }; enum ath_cipher { diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 578fdc446bc03f8ae7e4126c6f015bc069faed55..583bcf148403b13c8472f6af7a59775d21cf8bbd 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -324,10 +324,10 @@ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, goto fail_free_skb; spin_lock_bh(&rx_ring->idr_lock); - buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, - rx_ring->bufs_max * 3, GFP_ATOMIC); + buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1, + (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC); spin_unlock_bh(&rx_ring->idr_lock); - if (buf_id < 0) + if (buf_id <= 0) goto fail_dma_unmap; desc = ath11k_hal_srng_src_get_next_entry(ab, srng); @@ -2564,6 +2564,9 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, cookie); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); + if (unlikely(buf_id == 0)) + continue; + ar = ab->pdevs[mac_id].ar; rx_ring = &ar->dp.rx_refill_buf_ring; spin_lock_bh(&rx_ring->idr_lock); diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c index 61b59a804e308d8358903d3f81950a85c97afdf0..b7b61d4f02bae279d40b947bbd890e9ba4b1c900 100644 --- a/drivers/net/wireless/ath/key.c +++ b/drivers/net/wireless/ath/key.c @@ -503,7 +503,7 @@ int ath_key_config(struct ath_common *common, hk.kv_len = key->keylen; if (key->keylen) - memcpy(hk.kv_val, key->key, key->keylen); + memcpy(&hk.kv_values, key->key, key->keylen); if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { switch (vif->type) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index b8c571d80b80485acaa6949b6b2ff059da36d1cd..fd4e598e3dfc134e29cbdba0d1c4330ef8a0fa0a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1350,13 +1350,14 @@ static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len) { struct brcmf_pub *drvr = ifp->drvr; struct brcmf_wsec_pmk_le pmk; - int i, err; + int err; + + memset(&pmk, 0, sizeof(pmk)); - /* convert to firmware key format */ - pmk.key_len = cpu_to_le16(pmk_len << 1); - pmk.flags = cpu_to_le16(BRCMF_WSEC_PASSPHRASE); - for (i = 0; i < pmk_len; i++) - snprintf(&pmk.key[2 * i], 3, "%02x", pmk_data[i]); + /* pass pmk directly */ + pmk.key_len = cpu_to_le16(pmk_len); + pmk.flags = cpu_to_le16(0); + memcpy(pmk.key, pmk_data, pmk_len); /* store psk in firmware */ err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK, diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c index e622948661fa87c5f4e6af0cee42b981edcfe6de..b307f0e5277799625abe7a5f70ad5f59c3058b3b 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c @@ -1086,6 +1086,7 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv, { __le16 key_flags; struct iwl_addsta_cmd sta_cmd; + size_t to_copy; int i; spin_lock_bh(&priv->sta_lock); @@ -1105,7 +1106,9 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv, sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32; for (i = 0; i < 5; i++) sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); - memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen); + /* keyconf may contain MIC rx/tx keys which iwl does not use */ + to_copy = min_t(size_t, sizeof(sta_cmd.key.key), keyconf->keylen); + memcpy(sta_cmd.key.key, keyconf->key, to_copy); break; case WLAN_CIPHER_SUITE_WEP104: key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 4e43efd5d1ea15d13edfbbafe562af5269e9edba..dc0a507213ca6cee946c11e5366481294adc8f56 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1214,6 +1214,9 @@ static void iwl_pci_remove(struct pci_dev *pdev) { struct iwl_trans *trans = pci_get_drvdata(pdev); + if (!trans) + return; + iwl_drv_stop(trans->drv); iwl_trans_pcie_free(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 7f8b7f7697cfdc35bed92b0a673b8260b4c75a6d..fac7cc75bc31e5187cbad84124aa8608a78fb81c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2835,7 +2835,7 @@ static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, void *buf, ssize_t *size, ssize_t *bytes_copied) { - int buf_size_left = count - *bytes_copied; + ssize_t buf_size_left = count - *bytes_copied; buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); if (*size > buf_size_left) diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c index 24760d8ea6374e003b6c92cddf604a83a64a0b54..df784fec124f65cf7f8b1d26d651717e8392362f 100644 --- a/drivers/remoteproc/stm32_rproc.c +++ b/drivers/remoteproc/stm32_rproc.c @@ -301,8 +301,16 @@ static void stm32_rproc_mb_vq_work(struct work_struct *work) struct stm32_mbox *mb = container_of(work, struct stm32_mbox, vq_work); struct rproc *rproc = dev_get_drvdata(mb->client.dev); + mutex_lock(&rproc->lock); + + if (rproc->state != RPROC_RUNNING) + goto unlock_mutex; + if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE) dev_dbg(&rproc->dev, "no message found in vq%d\n", mb->vq_id); + +unlock_mutex: + mutex_unlock(&rproc->lock); } static void stm32_rproc_mb_callback(struct mbox_client *cl, void *data) diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index fbc76d69ea0b472008389102450f3d6556e27a29..2b77cbbcdccb69261c47662fe62dc0fa87ef0f07 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -2159,10 +2159,13 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf, char mybuf[64]; char *pbuf; int i; + size_t bsize; memset(mybuf, 0, sizeof(mybuf)); - if (copy_from_user(mybuf, buf, nbytes)) + bsize = min(nbytes, (sizeof(mybuf) - 1)); + + if (copy_from_user(mybuf, buf, bsize)) return -EFAULT; pbuf = &mybuf[0]; @@ -2183,7 +2186,7 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf, qp->lock_conflict.wq_access = 0; } } - return nbytes; + return bsize; } #endif diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 6bb84035807293a78d7c68213ebf6e9407fb35ee..075e2a6fb474f6e96a53c483dd3026dd3bb58bfd 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4385,6 +4385,9 @@ int iscsit_close_session(struct iscsi_session *sess) iscsit_stop_time2retain_timer(sess); spin_unlock_bh(&se_tpg->session_lock); + if (sess->sess_ops->ErrorRecoveryLevel == 2) + iscsit_free_connection_recovery_entries(sess); + /* * transport_deregister_session_configfs() will clear the * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context @@ -4412,9 +4415,6 @@ int iscsit_close_session(struct iscsi_session *sess) transport_deregister_session(sess->se_sess); - if (sess->sess_ops->ErrorRecoveryLevel == 2) - iscsit_free_connection_recovery_entries(sess); - iscsit_free_all_ooo_cmdsns(sess); spin_lock_bh(&se_tpg->session_lock); diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index b6dc9003b8c4a2d106d45eb95e636b9eff64a08f..0771cd2265813fad4d2929b2426fc9d310b6dcda 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h @@ -330,6 +330,13 @@ extern int serial8250_rx_dma(struct uart_8250_port *); extern void serial8250_rx_dma_flush(struct uart_8250_port *); extern int serial8250_request_dma(struct uart_8250_port *); extern void serial8250_release_dma(struct uart_8250_port *); + +static inline bool serial8250_tx_dma_running(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + + return dma && dma->tx_running; +} #else static inline int serial8250_tx_dma(struct uart_8250_port *p) { @@ -345,6 +352,11 @@ static inline int serial8250_request_dma(struct uart_8250_port *p) return -1; } static inline void serial8250_release_dma(struct uart_8250_port *p) { } + +static inline bool serial8250_tx_dma_running(struct uart_8250_port *p) +{ + return false; +} #endif static inline int ns16550a_goto_highspeed(struct uart_8250_port *up) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 1f9e4b87387b50c9607bab3fb5e247531e3ec207..b19908779e3b8475bfb318fca90e2f021f736e68 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1971,19 +1971,25 @@ static int serial8250_tx_threshold_handle_irq(struct uart_port *port) static unsigned int serial8250_tx_empty(struct uart_port *port) { struct uart_8250_port *up = up_to_u8250p(port); + unsigned int result = 0; unsigned long flags; unsigned int lsr; serial8250_rpm_get(up); spin_lock_irqsave(&port->lock, flags); - lsr = serial_port_in(port, UART_LSR); - up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + if (!serial8250_tx_dma_running(up)) { + lsr = serial_port_in(port, UART_LSR); + up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + + if ((lsr & BOTH_EMPTY) == BOTH_EMPTY) + result = TIOCSER_TEMT; + } spin_unlock_irqrestore(&port->lock, flags); serial8250_rpm_put(up); - return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; + return result; } unsigned int serial8250_do_get_mctrl(struct uart_port *port) diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index 1447324ed0b643e26da608176f57dc95a1a5e369..08da436265d92a11b004f36acadeff45208171b5 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -523,7 +523,7 @@ static int arcfb_probe(struct platform_device *dev) info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev); if (!info) - goto err; + goto err_fb_alloc; info->screen_base = (char __iomem *)videomemory; info->fbops = &arcfb_ops; @@ -535,7 +535,7 @@ static int arcfb_probe(struct platform_device *dev) if (!dio_addr || !cio_addr || !c2io_addr) { printk(KERN_WARNING "no IO addresses supplied\n"); - goto err1; + goto err_addr; } par->dio_addr = dio_addr; par->cio_addr = cio_addr; @@ -551,12 +551,12 @@ static int arcfb_probe(struct platform_device *dev) printk(KERN_INFO "arcfb: Failed req IRQ %d\n", par->irq); retval = -EBUSY; - goto err1; + goto err_addr; } } retval = register_framebuffer(info); if (retval < 0) - goto err1; + goto err_register_fb; platform_set_drvdata(dev, info); fb_info(info, "Arc frame buffer device, using %dK of video memory\n", videomemorysize >> 10); @@ -580,9 +580,12 @@ static int arcfb_probe(struct platform_device *dev) } return 0; -err1: + +err_register_fb: + free_irq(par->irq, info); +err_addr: framebuffer_release(info); -err: +err_fb_alloc: vfree(videomemory); return retval; } diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 5136b7289e8daf7ac50ff26ce22df9d26e65ccc2..f06367cfd76417509a55314511fbfabd285c9622 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -177,6 +177,7 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb) #define EXT2_MIN_BLOCK_SIZE 1024 #define EXT2_MAX_BLOCK_SIZE 4096 #define EXT2_MIN_BLOCK_LOG_SIZE 10 +#define EXT2_MAX_BLOCK_LOG_SIZE 16 #define EXT2_BLOCK_SIZE(s) ((s)->s_blocksize) #define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32)) #define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits) diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 9a6475b2ab28bd8bf9afbc8de4d26d961405553f..ab01ec7ac48c5ae9fbaaa39188e764b4fe3de7a0 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -950,6 +950,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } + if (le32_to_cpu(es->s_log_block_size) > + (EXT2_MAX_BLOCK_LOG_SIZE - BLOCK_SIZE_BITS)) { + ext2_msg(sb, KERN_ERR, + "Invalid log block size: %u", + le32_to_cpu(es->s_log_block_size)); + goto failed_mount; + } blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); if (test_opt(sb, DAX)) { diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 1afd60fcd7723e04fea15354bcf6b38eb15e38b7..a43167042b6b1d6b4bed979329f9fd65cd4ee7e2 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -303,6 +303,38 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, return desc; } +static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb, + ext4_group_t block_group, + struct buffer_head *bh) +{ + ext4_grpblk_t next_zero_bit; + unsigned long bitmap_size = sb->s_blocksize * 8; + unsigned int offset = num_clusters_in_group(sb, block_group); + + if (bitmap_size <= offset) + return 0; + + next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset); + + return (next_zero_bit < bitmap_size ? next_zero_bit : 0); +} + +struct ext4_group_info *ext4_get_group_info(struct super_block *sb, + ext4_group_t group) +{ + struct ext4_group_info **grp_info; + long indexv, indexh; + + if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) { + ext4_error(sb, "invalid group %u", group); + return NULL; + } + indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); + indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); + grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv); + return grp_info[indexh]; +} + /* * Return the block number which was discovered to be invalid, or 0 if * the block bitmap is valid. @@ -377,7 +409,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb, if (buffer_verified(bh)) return 0; - if (EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) + if (!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) return -EFSCORRUPTED; ext4_lock_group(sb, block_group); @@ -401,6 +433,15 @@ static int ext4_validate_block_bitmap(struct super_block *sb, EXT4_GROUP_INFO_BBITMAP_CORRUPT); return -EFSCORRUPTED; } + blk = ext4_valid_block_bitmap_padding(sb, block_group, bh); + if (unlikely(blk != 0)) { + ext4_unlock_group(sb, block_group); + ext4_error(sb, "bg %u: block %llu: padding at end of block bitmap is not set", + block_group, blk); + ext4_mark_group_bitmap_corrupted(sb, block_group, + EXT4_GROUP_INFO_BBITMAP_CORRUPT); + return -EFSCORRUPTED; + } set_buffer_verified(bh); verified: ext4_unlock_group(sb, block_group); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 9ebf7abad5569d5ef0a3e9cf56c6dda20d37cd95..a3fae88ae85a3af23be239a6f51ad271d16722d2 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1535,12 +1535,15 @@ struct ext4_sb_info { atomic_t s_bal_success; /* we found long enough chunks */ atomic_t s_bal_allocated; /* in blocks */ atomic_t s_bal_ex_scanned; /* total extents scanned */ + atomic_t s_bal_groups_scanned; /* number of groups scanned */ atomic_t s_bal_goals; /* goal hits */ atomic_t s_bal_breaks; /* too long searches */ atomic_t s_bal_2orders; /* 2^order hits */ - spinlock_t s_bal_lock; - unsigned long s_mb_buddies_generated; - unsigned long long s_mb_generation_time; + atomic64_t s_bal_cX_groups_considered[4]; + atomic64_t s_bal_cX_hits[4]; + atomic64_t s_bal_cX_failed[4]; /* cX loop didn't find blocks */ + atomic_t s_mb_buddies_generated; /* number of buddies generated */ + atomic64_t s_mb_generation_time; atomic_t s_mb_lost_chunks; atomic_t s_mb_preallocated; atomic_t s_mb_discarded; @@ -2569,6 +2572,8 @@ extern void ext4_check_blocks_bitmap(struct super_block *); extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, ext4_group_t block_group, struct buffer_head ** bh); +extern struct ext4_group_info *ext4_get_group_info(struct super_block *sb, + ext4_group_t group); extern int ext4_should_retry_alloc(struct super_block *sb, int *retries); extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb, @@ -2806,6 +2811,7 @@ int ext4_fc_record_regions(struct super_block *sb, int ino, extern const struct seq_operations ext4_mb_seq_groups_ops; extern long ext4_mb_stats; extern long ext4_mb_max_to_scan; +extern int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset); extern int ext4_mb_init(struct super_block *); extern int ext4_mb_release(struct super_block *); extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *, @@ -3210,19 +3216,6 @@ static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size) raw_inode->i_size_high = cpu_to_le32(i_size >> 32); } -static inline -struct ext4_group_info *ext4_get_group_info(struct super_block *sb, - ext4_group_t group) -{ - struct ext4_group_info **grp_info; - long indexv, indexh; - BUG_ON(group >= EXT4_SB(sb)->s_groups_count); - indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); - indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); - grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv); - return grp_info[indexh]; -} - /* * Reading s_groups_count requires using smp_rmb() afterwards. See * the locking protocol documented in the comments of ext4_group_add() diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index aa99a3659edfc77503feb1d8027855f453041a97..fee54ab42bbaa979ab6fbbab424d982cd3d9f348 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -269,14 +269,12 @@ static void __es_find_extent_range(struct inode *inode, /* see if the extent has been cached */ es->es_lblk = es->es_len = es->es_pblk = 0; - if (tree->cache_es) { - es1 = tree->cache_es; - if (in_range(lblk, es1->es_lblk, es1->es_len)) { - es_debug("%u cached by [%u/%u) %llu %x\n", - lblk, es1->es_lblk, es1->es_len, - ext4_es_pblock(es1), ext4_es_status(es1)); - goto out; - } + es1 = READ_ONCE(tree->cache_es); + if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { + es_debug("%u cached by [%u/%u) %llu %x\n", + lblk, es1->es_lblk, es1->es_len, + ext4_es_pblock(es1), ext4_es_status(es1)); + goto out; } es1 = __es_tree_search(&tree->root, lblk); @@ -295,7 +293,7 @@ static void __es_find_extent_range(struct inode *inode, } if (es1 && matching_fn(es1)) { - tree->cache_es = es1; + WRITE_ONCE(tree->cache_es, es1); es->es_lblk = es1->es_lblk; es->es_len = es1->es_len; es->es_pblk = es1->es_pblk; @@ -934,14 +932,12 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, /* find extent in cache firstly */ es->es_lblk = es->es_len = es->es_pblk = 0; - if (tree->cache_es) { - es1 = tree->cache_es; - if (in_range(lblk, es1->es_lblk, es1->es_len)) { - es_debug("%u cached by [%u/%u)\n", - lblk, es1->es_lblk, es1->es_len); - found = 1; - goto out; - } + es1 = READ_ONCE(tree->cache_es); + if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { + es_debug("%u cached by [%u/%u)\n", + lblk, es1->es_lblk, es1->es_len); + found = 1; + goto out; } node = tree->root.rb_node; diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index c53c9b1322049663fce56249774df28cff8d96c1..d178543ca13f1bd25f81670c33af7f927cc88769 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -91,7 +91,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb, if (buffer_verified(bh)) return 0; - if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) + if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) return -EFSCORRUPTED; ext4_lock_group(sb, block_group); @@ -293,7 +293,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) } if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { grp = ext4_get_group_info(sb, block_group); - if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) { + if (!grp || unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) { fatal = -EFSCORRUPTED; goto error_return; } @@ -1045,7 +1045,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, * Skip groups with already-known suspicious inode * tables */ - if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) + if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) goto next_group; } @@ -1180,6 +1180,10 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { grp = ext4_get_group_info(sb, group); + if (!grp) { + err = -EFSCORRUPTED; + goto out; + } down_read(&grp->alloc_sem); /* * protect vs itable * lazyinit @@ -1523,7 +1527,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, } gdp = ext4_get_group_desc(sb, group, &group_desc_bh); - if (!gdp) + if (!gdp || !grp) goto out; /* diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 4adf36c17e34f715f8e749bbc0bb01c9dcf20c81..436e09113c9f87c2ca6ab59b6b5d9f31bd21f0bd 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -32,6 +32,7 @@ static int get_max_inline_xattr_value_size(struct inode *inode, struct ext4_xattr_ibody_header *header; struct ext4_xattr_entry *entry; struct ext4_inode *raw_inode; + void *end; int free, min_offs; if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) @@ -55,14 +56,23 @@ static int get_max_inline_xattr_value_size(struct inode *inode, raw_inode = ext4_raw_inode(iloc); header = IHDR(inode, raw_inode); entry = IFIRST(header); + end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; /* Compute min_offs. */ - for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { + while (!IS_LAST_ENTRY(entry)) { + void *next = EXT4_XATTR_NEXT(entry); + + if (next >= end) { + EXT4_ERROR_INODE(inode, + "corrupt xattr in inline inode"); + return 0; + } if (!entry->e_value_inum && entry->e_value_size) { size_t offs = le16_to_cpu(entry->e_value_offs); if (offs < min_offs) min_offs = offs; } + entry = next; } free = min_offs - ((void *)entry - (void *)IFIRST(header)) - sizeof(__u32); @@ -348,7 +358,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode, error = ext4_xattr_ibody_get(inode, i.name_index, i.name, value, len); - if (error == -ENODATA) + if (error < 0) goto out; BUFFER_TRACE(is.iloc.bh, "get_write_access"); @@ -1178,6 +1188,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle, ext4_initialize_dirent_tail(dir_block, inode->i_sb->s_blocksize); set_buffer_uptodate(dir_block); + unlock_buffer(dir_block); err = ext4_handle_dirty_dirblock(handle, inode, dir_block); if (err) return err; @@ -1251,6 +1262,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle, if (!S_ISDIR(inode->i_mode)) { memcpy(data_bh->b_data, buf, inline_size); set_buffer_uptodate(data_bh); + unlock_buffer(data_bh); error = ext4_handle_dirty_metadata(handle, inode, data_bh); } else { @@ -1258,7 +1270,6 @@ static int ext4_convert_inline_data_nolock(handle_t *handle, buf, inline_size); } - unlock_buffer(data_bh); out_restore: if (error) ext4_restore_inline_data(handle, inode, iloc, buf, inline_size); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 8af9ee25def8937b741043b42c1d0df582ba1f51..da5432e5fb064bb8cd9fe2833c0dd83df4af9d3c 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3512,7 +3512,7 @@ static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset, */ flags &= ~IOMAP_WRITE; ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap); - WARN_ON_ONCE(iomap->type != IOMAP_MAPPED); + WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED); return ret; } diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 843840c2aced9b4d6634846a8d772d3099e8f908..8a51448b76700a4a612fd64721d3a05e4b90e48d 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -684,6 +684,8 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); grp = ext4_get_group_info(sb, e4b->bd_group); + if (!grp) + return NULL; list_for_each(cur, &grp->bb_prealloc_list) { ext4_group_t groupnr; struct ext4_prealloc_space *pa; @@ -767,9 +769,9 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) static noinline_for_stack void ext4_mb_generate_buddy(struct super_block *sb, - void *buddy, void *bitmap, ext4_group_t group) + void *buddy, void *bitmap, ext4_group_t group, + struct ext4_group_info *grp) { - struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); ext4_grpblk_t i = 0; @@ -816,28 +818,8 @@ void ext4_mb_generate_buddy(struct super_block *sb, clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); period = get_cycles() - period; - spin_lock(&sbi->s_bal_lock); - sbi->s_mb_buddies_generated++; - sbi->s_mb_generation_time += period; - spin_unlock(&sbi->s_bal_lock); -} - -static void mb_regenerate_buddy(struct ext4_buddy *e4b) -{ - int count; - int order = 1; - void *buddy; - - while ((buddy = mb_find_buddy(e4b, order++, &count))) { - ext4_set_bits(buddy, 0, count); - } - e4b->bd_info->bb_fragments = 0; - memset(e4b->bd_info->bb_counters, 0, - sizeof(*e4b->bd_info->bb_counters) * - (e4b->bd_sb->s_blocksize_bits + 2)); - - ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, - e4b->bd_bitmap, e4b->bd_group); + atomic_inc(&sbi->s_mb_buddies_generated); + atomic64_add(period, &sbi->s_mb_generation_time); } /* The buddy information is attached the buddy cache inode @@ -909,6 +891,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) break; grinfo = ext4_get_group_info(sb, group); + if (!grinfo) + continue; /* * If page is uptodate then we came here after online resize * which added some new uninitialized group info structs, so @@ -974,6 +958,10 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) group, page->index, i * blocksize); trace_ext4_mb_buddy_bitmap_load(sb, group); grinfo = ext4_get_group_info(sb, group); + if (!grinfo) { + err = -EFSCORRUPTED; + goto out; + } grinfo->bb_fragments = 0; memset(grinfo->bb_counters, 0, sizeof(*grinfo->bb_counters) * @@ -984,7 +972,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) ext4_lock_group(sb, group); /* init the buddy */ memset(data, 0xff, blocksize); - ext4_mb_generate_buddy(sb, data, incore, group); + ext4_mb_generate_buddy(sb, data, incore, group, grinfo); ext4_unlock_group(sb, group); incore = NULL; } else { @@ -1098,6 +1086,9 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) might_sleep(); mb_debug(sb, "init group %u\n", group); this_grp = ext4_get_group_info(sb, group); + if (!this_grp) + return -EFSCORRUPTED; + /* * This ensures that we don't reinit the buddy cache * page which map to the group from which we are already @@ -1172,6 +1163,8 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, blocks_per_page = PAGE_SIZE / sb->s_blocksize; grp = ext4_get_group_info(sb, group); + if (!grp) + return -EFSCORRUPTED; e4b->bd_blkbits = sb->s_blocksize_bits; e4b->bd_info = grp; @@ -1512,7 +1505,6 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, sb, e4b->bd_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); } - mb_regenerate_buddy(e4b); goto done; } @@ -1885,7 +1877,9 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); struct ext4_free_extent ex; - if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) + if (!grp) + return -EFSCORRUPTED; + if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) return 0; if (grp->bb_free == 0) return 0; @@ -2109,7 +2103,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac, BUG_ON(cr < 0 || cr >= 4); - if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) + if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp)) return false; free = grp->bb_free; @@ -2172,6 +2166,10 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, ext4_grpblk_t free; int ret = 0; + if (!grp) + return -EFSCORRUPTED; + if (sbi->s_mb_stats) + atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); if (should_lock) ext4_lock_group(sb, group); free = grp->bb_free; @@ -2242,7 +2240,7 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, * prefetch once, so we avoid getblk() call, which can * be expensive. */ - if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) && + if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) && EXT4_MB_GRP_NEED_INIT(grp) && ext4_free_group_clusters(sb, gdp) > 0 && !(ext4_has_group_desc_csum(sb) && @@ -2286,7 +2284,7 @@ void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, group--; grp = ext4_get_group_info(sb, group); - if (EXT4_MB_GRP_NEED_INIT(grp) && + if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) && ext4_free_group_clusters(sb, gdp) > 0 && !(ext4_has_group_desc_csum(sb) && (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { @@ -2446,6 +2444,9 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) if (ac->ac_status != AC_STATUS_CONTINUE) break; } + /* Processed all groups and haven't found blocks */ + if (sbi->s_mb_stats && i == ngroups) + atomic64_inc(&sbi->s_bal_cX_failed[cr]); } if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && @@ -2475,6 +2476,9 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) goto repeat; } } + + if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) + atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); out: if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) err = first_err; @@ -2538,6 +2542,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) sizeof(struct ext4_group_info); grinfo = ext4_get_group_info(sb, group); + if (!grinfo) + return 0; /* Load the group info in memory only if not already loaded. */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { err = ext4_mb_load_buddy(sb, group, &e4b); @@ -2548,7 +2554,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) buddy_loaded = 1; } - memcpy(&sg, ext4_get_group_info(sb, group), i); + memcpy(&sg, grinfo, i); if (buddy_loaded) ext4_mb_unload_buddy(&e4b); @@ -2574,6 +2580,67 @@ const struct seq_operations ext4_mb_seq_groups_ops = { .show = ext4_mb_seq_groups_show, }; +int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) +{ + struct super_block *sb = (struct super_block *)seq->private; + struct ext4_sb_info *sbi = EXT4_SB(sb); + + seq_puts(seq, "mballoc:\n"); + if (!sbi->s_mb_stats) { + seq_puts(seq, "\tmb stats collection turned off.\n"); + seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); + return 0; + } + seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); + seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); + + seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned)); + + seq_puts(seq, "\tcr0_stats:\n"); + seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0])); + seq_printf(seq, "\t\tgroups_considered: %llu\n", + atomic64_read(&sbi->s_bal_cX_groups_considered[0])); + seq_printf(seq, "\t\tuseless_loops: %llu\n", + atomic64_read(&sbi->s_bal_cX_failed[0])); + + seq_puts(seq, "\tcr1_stats:\n"); + seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1])); + seq_printf(seq, "\t\tgroups_considered: %llu\n", + atomic64_read(&sbi->s_bal_cX_groups_considered[1])); + seq_printf(seq, "\t\tuseless_loops: %llu\n", + atomic64_read(&sbi->s_bal_cX_failed[1])); + + seq_puts(seq, "\tcr2_stats:\n"); + seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2])); + seq_printf(seq, "\t\tgroups_considered: %llu\n", + atomic64_read(&sbi->s_bal_cX_groups_considered[2])); + seq_printf(seq, "\t\tuseless_loops: %llu\n", + atomic64_read(&sbi->s_bal_cX_failed[2])); + + seq_puts(seq, "\tcr3_stats:\n"); + seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3])); + seq_printf(seq, "\t\tgroups_considered: %llu\n", + atomic64_read(&sbi->s_bal_cX_groups_considered[3])); + seq_printf(seq, "\t\tuseless_loops: %llu\n", + atomic64_read(&sbi->s_bal_cX_failed[3])); + seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned)); + seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); + seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); + seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); + seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); + + seq_printf(seq, "\tbuddies_generated: %u/%u\n", + atomic_read(&sbi->s_mb_buddies_generated), + ext4_get_groups_count(sb)); + seq_printf(seq, "\tbuddies_time_used: %llu\n", + atomic64_read(&sbi->s_mb_generation_time)); + seq_printf(seq, "\tpreallocated: %u\n", + atomic_read(&sbi->s_mb_preallocated)); + seq_printf(seq, "\tdiscarded: %u\n", + atomic_read(&sbi->s_mb_discarded)); + return 0; +} + static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) { int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; @@ -2764,8 +2831,12 @@ static int ext4_mb_init_backend(struct super_block *sb) err_freebuddy: cachep = get_groupinfo_cache(sb->s_blocksize_bits); - while (i-- > 0) - kmem_cache_free(cachep, ext4_get_group_info(sb, i)); + while (i-- > 0) { + struct ext4_group_info *grp = ext4_get_group_info(sb, i); + + if (grp) + kmem_cache_free(cachep, grp); + } i = sbi->s_group_info_size; rcu_read_lock(); group_info = rcu_dereference(sbi->s_group_info); @@ -2874,7 +2945,6 @@ int ext4_mb_init(struct super_block *sb) } while (i <= sb->s_blocksize_bits + 1); spin_lock_init(&sbi->s_md_lock); - spin_lock_init(&sbi->s_bal_lock); sbi->s_mb_free_pending = 0; INIT_LIST_HEAD(&sbi->s_freed_data_list); @@ -2973,6 +3043,8 @@ int ext4_mb_release(struct super_block *sb) for (i = 0; i < ngroups; i++) { cond_resched(); grinfo = ext4_get_group_info(sb, i); + if (!grinfo) + continue; mb_group_bb_bitmap_free(grinfo); ext4_lock_group(sb, i); count = ext4_mb_cleanup_pa(grinfo); @@ -3002,17 +3074,18 @@ int ext4_mb_release(struct super_block *sb) atomic_read(&sbi->s_bal_reqs), atomic_read(&sbi->s_bal_success)); ext4_msg(sb, KERN_INFO, - "mballoc: %u extents scanned, %u goal hits, " + "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " "%u 2^N hits, %u breaks, %u lost", atomic_read(&sbi->s_bal_ex_scanned), + atomic_read(&sbi->s_bal_groups_scanned), atomic_read(&sbi->s_bal_goals), atomic_read(&sbi->s_bal_2orders), atomic_read(&sbi->s_bal_breaks), atomic_read(&sbi->s_mb_lost_chunks)); ext4_msg(sb, KERN_INFO, - "mballoc: %lu generated and it took %Lu", - sbi->s_mb_buddies_generated, - sbi->s_mb_generation_time); + "mballoc: %u generated and it took %llu", + atomic_read(&sbi->s_mb_buddies_generated), + atomic64_read(&sbi->s_mb_generation_time)); ext4_msg(sb, KERN_INFO, "mballoc: %u preallocated, %u discarded", atomic_read(&sbi->s_mb_preallocated), @@ -3439,6 +3512,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); + struct ext4_super_block *es = sbi->s_es; int bsbits, max; ext4_lblk_t end; loff_t size, start_off; @@ -3619,18 +3693,21 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); /* define goal start in order to merge */ - if (ar->pright && (ar->lright == (start + size))) { + if (ar->pright && (ar->lright == (start + size)) && + ar->pright >= size && + ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { /* merge to the right */ ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, - &ac->ac_f_ex.fe_group, - &ac->ac_f_ex.fe_start); + &ac->ac_g_ex.fe_group, + &ac->ac_g_ex.fe_start); ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } - if (ar->pleft && (ar->lleft + 1 == start)) { + if (ar->pleft && (ar->lleft + 1 == start) && + ar->pleft + 1 < ext4_blocks_count(es)) { /* merge to the left */ ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, - &ac->ac_f_ex.fe_group, - &ac->ac_f_ex.fe_start); + &ac->ac_g_ex.fe_group, + &ac->ac_g_ex.fe_start); ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } @@ -3642,12 +3719,13 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); - if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { + if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { atomic_inc(&sbi->s_bal_reqs); atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) atomic_inc(&sbi->s_bal_success); atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); + atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) atomic_inc(&sbi->s_bal_goals); @@ -3722,6 +3800,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, BUG_ON(start < pa->pa_pstart); BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); BUG_ON(pa->pa_free < len); + BUG_ON(ac->ac_b_ex.fe_len <= 0); pa->pa_free -= len; mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); @@ -3884,6 +3963,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, struct ext4_free_data *entry; grp = ext4_get_group_info(sb, group); + if (!grp) + return; n = rb_first(&(grp->bb_free_root)); while (n) { @@ -3911,6 +3992,9 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, int preallocated = 0; int len; + if (!grp) + return; + /* all form of preallocation discards first load group, * so the only competing code is preallocation use. * we don't need any locking here @@ -4046,10 +4130,8 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) pa = ac->ac_pa; if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { - int winl; - int wins; - int win; - int offs; + int new_bex_start; + int new_bex_end; /* we can't allocate as much as normalizer wants. * so, found space must get proper lstart @@ -4057,26 +4139,40 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); - /* we're limited by original request in that - * logical block must be covered any way - * winl is window we can move our chunk within */ - winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; + /* + * Use the below logic for adjusting best extent as it keeps + * fragmentation in check while ensuring logical range of best + * extent doesn't overflow out of goal extent: + * + * 1. Check if best ex can be kept at end of goal and still + * cover original start + * 2. Else, check if best ex can be kept at start of goal and + * still cover original start + * 3. Else, keep the best ex at start of original request. + */ + new_bex_end = ac->ac_g_ex.fe_logical + + EXT4_C2B(sbi, ac->ac_g_ex.fe_len); + new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len); + if (ac->ac_o_ex.fe_logical >= new_bex_start) + goto adjust_bex; - /* also, we should cover whole original request */ - wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); + new_bex_start = ac->ac_g_ex.fe_logical; + new_bex_end = + new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); + if (ac->ac_o_ex.fe_logical < new_bex_end) + goto adjust_bex; - /* the smallest one defines real window */ - win = min(winl, wins); + new_bex_start = ac->ac_o_ex.fe_logical; + new_bex_end = + new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len); - offs = ac->ac_o_ex.fe_logical % - EXT4_C2B(sbi, ac->ac_b_ex.fe_len); - if (offs && offs < win) - win = offs; +adjust_bex: + ac->ac_b_ex.fe_logical = new_bex_start; - ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - - EXT4_NUM_B2C(sbi, win); BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); + BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical + + EXT4_C2B(sbi, ac->ac_g_ex.fe_len))); } /* preallocation can change ac_b_ex, thus we store actually @@ -4102,6 +4198,8 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) ei = EXT4_I(ac->ac_inode); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); + if (!grp) + return; pa->pa_obj_lock = &ei->i_prealloc_lock; pa->pa_inode = ac->ac_inode; @@ -4155,6 +4253,8 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac) atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); + if (!grp) + return; lg = ac->ac_lg; BUG_ON(lg == NULL); @@ -4250,7 +4350,11 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b, trace_ext4_mb_release_group_pa(sb, pa); BUG_ON(pa->pa_deleted == 0); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); - BUG_ON(group != e4b->bd_group && pa->pa_len != 0); + if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { + ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu", + e4b->bd_group, group, pa->pa_pstart); + return 0; + } mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); @@ -4279,6 +4383,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, int err; int free = 0; + if (!grp) + return 0; mb_debug(sb, "discard preallocation for group %u\n", group); if (list_empty(&grp->bb_prealloc_list)) goto out_dbg; @@ -4516,6 +4622,9 @@ static inline void ext4_mb_show_pa(struct super_block *sb) struct ext4_prealloc_space *pa; ext4_grpblk_t start; struct list_head *cur; + + if (!grp) + continue; ext4_lock_group(sb, i); list_for_each(cur, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, @@ -5319,6 +5428,7 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, struct buffer_head *bitmap_bh = NULL; struct super_block *sb = inode->i_sb; struct ext4_group_desc *gdp; + struct ext4_group_info *grp; unsigned int overflow; ext4_grpblk_t bit; struct buffer_head *gd_bh; @@ -5344,8 +5454,8 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, overflow = 0; ext4_get_group_no_and_offset(sb, block, &block_group, &bit); - if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT( - ext4_get_group_info(sb, block_group)))) + grp = ext4_get_group_info(sb, block_group); + if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) return; /* @@ -5933,6 +6043,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) for (group = first_group; group <= last_group; group++) { grp = ext4_get_group_info(sb, group); + if (!grp) + continue; /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { ret = ext4_mb_init_group(sb, group, GFP_NOFS); diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c index cebea4270817e7c0218cc2f3034449ce0d0ef342..3e8bce19ad16da07419ca481666b30f90718993d 100644 --- a/fs/ext4/mmp.c +++ b/fs/ext4/mmp.c @@ -39,28 +39,36 @@ static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp) * Write the MMP block using REQ_SYNC to try to get the block on-disk * faster. */ -static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) +static int write_mmp_block_thawed(struct super_block *sb, + struct buffer_head *bh) { struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data); - /* - * We protect against freezing so that we don't create dirty buffers - * on frozen filesystem. - */ - sb_start_write(sb); ext4_mmp_csum_set(sb, mmp); lock_buffer(bh); bh->b_end_io = end_buffer_write_sync; get_bh(bh); submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh); wait_on_buffer(bh); - sb_end_write(sb); if (unlikely(!buffer_uptodate(bh))) return -EIO; - return 0; } +static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) +{ + int err; + + /* + * We protect against freezing so that we don't create dirty buffers + * on frozen filesystem. + */ + sb_start_write(sb); + err = write_mmp_block_thawed(sb, bh); + sb_end_write(sb); + return err; +} + /* * Read the MMP block. It _must_ be read from disk and hence we clear the * uptodate flag on the buffer. @@ -290,6 +298,7 @@ int ext4_multi_mount_protect(struct super_block *sb, if (mmp_block < le32_to_cpu(es->s_first_data_block) || mmp_block >= ext4_blocks_count(es)) { ext4_warning(sb, "Invalid MMP block in superblock"); + retval = -EINVAL; goto failed; } @@ -315,6 +324,7 @@ int ext4_multi_mount_protect(struct super_block *sb, if (seq == EXT4_MMP_SEQ_FSCK) { dump_mmp_msg(sb, mmp, "fsck is running on the filesystem"); + retval = -EBUSY; goto failed; } @@ -328,6 +338,7 @@ int ext4_multi_mount_protect(struct super_block *sb, if (schedule_timeout_interruptible(HZ * wait_time) != 0) { ext4_warning(sb, "MMP startup interrupted, failing mount\n"); + retval = -ETIMEDOUT; goto failed; } @@ -338,6 +349,7 @@ int ext4_multi_mount_protect(struct super_block *sb, if (seq != le32_to_cpu(mmp->mmp_seq)) { dump_mmp_msg(sb, mmp, "Device is already active on another node."); + retval = -EBUSY; goto failed; } @@ -348,7 +360,11 @@ int ext4_multi_mount_protect(struct super_block *sb, seq = mmp_new_seq(); mmp->mmp_seq = cpu_to_le32(seq); - retval = write_mmp_block(sb, bh); + /* + * On mount / remount we are protected against fs freezing (by s_umount + * semaphore) and grabbing freeze protection upsets lockdep + */ + retval = write_mmp_block_thawed(sb, bh); if (retval) goto failed; @@ -357,6 +373,7 @@ int ext4_multi_mount_protect(struct super_block *sb, */ if (schedule_timeout_interruptible(HZ * wait_time) != 0) { ext4_warning(sb, "MMP startup interrupted, failing mount"); + retval = -ETIMEDOUT; goto failed; } @@ -367,6 +384,7 @@ int ext4_multi_mount_protect(struct super_block *sb, if (seq != le32_to_cpu(mmp->mmp_seq)) { dump_mmp_msg(sb, mmp, "Device is already active on another node."); + retval = -EBUSY; goto failed; } @@ -383,6 +401,7 @@ int ext4_multi_mount_protect(struct super_block *sb, EXT4_SB(sb)->s_mmp_tsk = NULL; ext4_warning(sb, "Unable to create kmmpd thread for %s.", sb->s_id); + retval = -ENOMEM; goto failed; } @@ -390,5 +409,5 @@ int ext4_multi_mount_protect(struct super_block *sb, failed: brelse(bh); - return 1; + return retval; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 4d5d3c65050235aa2911845acf931cb48a1da1a1..7b87f704a6b49042d0dadb90f6f4d73fe11d92fd 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1017,6 +1017,8 @@ void ext4_mark_group_bitmap_corrupted(struct super_block *sb, struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); int ret; + if (!grp || !gdp) + return; if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) { ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); @@ -4764,9 +4766,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) needs_recovery = (es->s_last_orphan != 0 || ext4_has_feature_journal_needs_recovery(sb)); - if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) - if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) + if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb)) { + err = ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)); + if (err) goto failed_mount3a; + } /* * The first inode we look at is the journal inode. Don't try @@ -5835,11 +5839,12 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned long old_sb_flags, vfs_flags; struct ext4_mount_options old_opts; - int enable_quota = 0; ext4_group_t g; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; int err = 0; + int enable_rw = 0; #ifdef CONFIG_QUOTA + int enable_quota = 0; int i, j; char *to_free[EXT4_MAXQUOTAS]; #endif @@ -6033,14 +6038,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (err) goto restore_opts; - sb->s_flags &= ~SB_RDONLY; - if (ext4_has_feature_mmp(sb)) - if (ext4_multi_mount_protect(sb, - le64_to_cpu(es->s_mmp_block))) { - err = -EROFS; + enable_rw = 1; + if (ext4_has_feature_mmp(sb)) { + err = ext4_multi_mount_protect(sb, + le64_to_cpu(es->s_mmp_block)); + if (err) goto restore_opts; - } + } +#ifdef CONFIG_QUOTA enable_quota = 1; +#endif } } @@ -6074,9 +6081,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } #ifdef CONFIG_QUOTA - /* Release old quota file names */ - for (i = 0; i < EXT4_MAXQUOTAS; i++) - kfree(old_opts.s_qf_names[i]); if (enable_quota) { if (sb_any_quota_suspended(sb)) dquot_resume(sb, -1); @@ -6086,10 +6090,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } } + /* Release old quota file names */ + for (i = 0; i < EXT4_MAXQUOTAS; i++) + kfree(old_opts.s_qf_names[i]); #endif if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) ext4_release_system_zone(sb); + if (enable_rw) + sb->s_flags &= ~SB_RDONLY; + if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) ext4_stop_mmpd(sbi); @@ -6105,6 +6115,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) return 0; restore_opts: + /* + * If there was a failing r/w to ro transition, we may need to + * re-enable quota + */ + if ((sb->s_flags & SB_RDONLY) && !(old_sb_flags & SB_RDONLY) && + sb_any_quota_suspended(sb)) + dquot_resume(sb, -1); sb->s_flags = old_sb_flags; sbi->s_mount_opt = old_opts.s_mount_opt; sbi->s_mount_opt2 = old_opts.s_mount_opt2; diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index ce74cde6d8faa738d91432b10f5dd68ae4f761ce..b0bb4a92c9c94bee965d5fbfb341eef6fede3e70 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -539,6 +539,8 @@ int ext4_register_sysfs(struct super_block *sb) ext4_fc_info_show, sb); proc_create_seq_data("mb_groups", S_IRUGO, sbi->s_proc, &ext4_mb_seq_groups_ops, sb); + proc_create_single_data("mb_stats", 0444, sbi->s_proc, + ext4_seq_mb_stats_show, sb); } return 0; } diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 8af72ac28cee249c38ea5079b921f73c151ddf3d..3c6f1b5d0208e6802a71a9e76ec4d63a795fd24c 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -2557,6 +2557,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, .in_inode = !!entry->e_value_inum, }; struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode); + int needs_kvfree = 0; int error; is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS); @@ -2579,7 +2580,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, error = -ENOMEM; goto out; } - + needs_kvfree = 1; error = ext4_xattr_inode_get(inode, entry, buffer, value_size); if (error) goto out; @@ -2618,7 +2619,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode, out: kfree(b_entry_name); - if (entry->e_value_inum && buffer) + if (needs_kvfree && buffer) kvfree(buffer); if (is) brelse(is->iloc.bh); diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index cd46a64ace1b3601e2b259b79c1ab3660ed26db5..8ca549cc975e4edc8a774262348e41ddd56c389e 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -309,8 +309,15 @@ static int __f2fs_write_meta_page(struct page *page, trace_f2fs_writepage(page, META); - if (unlikely(f2fs_cp_error(sbi))) + if (unlikely(f2fs_cp_error(sbi))) { + if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) { + ClearPageUptodate(page); + dec_page_count(sbi, F2FS_DIRTY_META); + unlock_page(page); + return 0; + } goto redirty_out; + } if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0)) @@ -1283,7 +1290,8 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type) if (!get_pages(sbi, type)) break; - if (unlikely(f2fs_cp_error(sbi))) + if (unlikely(f2fs_cp_error(sbi) && + !is_sbi_flag_set(sbi, SBI_IS_CLOSE))) break; if (type == F2FS_DIRTY_META) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 14433392227be7c16dfda18d1e48ed98802370b7..fd8722da9c4f6215b860c126b3b31ed48d746553 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2818,7 +2818,8 @@ int f2fs_write_single_data_page(struct page *page, int *submitted, * don't drop any dirty dentry pages for keeping lastest * directory structure. */ - if (S_ISDIR(inode->i_mode)) + if (S_ISDIR(inode->i_mode) && + !is_sbi_flag_set(sbi, SBI_IS_CLOSE)) goto redirty_out; goto out; } diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 72b109685db47a971bd598325599d884f52154c4..98263180c0ead6295c94e6dc0bc5b4f1e6a08056 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -969,12 +969,20 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, goto out; } + /* + * Copied from ext4_rename: we need to protect against old.inode + * directory getting converted from inline directory format into + * a normal one. + */ + if (S_ISDIR(old_inode->i_mode)) + inode_lock_nested(old_inode, I_MUTEX_NONDIR2); + err = -ENOENT; old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { if (IS_ERR(old_page)) err = PTR_ERR(old_page); - goto out; + goto out_unlock_old; } if (S_ISDIR(old_inode->i_mode)) { @@ -1082,6 +1090,9 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, f2fs_unlock_op(sbi); + if (S_ISDIR(old_inode->i_mode)) + inode_unlock(old_inode); + if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); @@ -1096,6 +1107,9 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, f2fs_put_page(old_dir_page, 0); out_old: f2fs_put_page(old_page, 0); +out_unlock_old: + if (S_ISDIR(old_inode->i_mode)) + inode_unlock(old_inode); out: if (whiteout) iput(whiteout); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index db28c240dae356ee3e73266a5c3c07d27056f22e..87f8110884663590b79ad2683d934e0f65da3a80 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -405,6 +405,7 @@ static int inode_go_demote_ok(const struct gfs2_glock *gl) static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) { + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); const struct gfs2_dinode *str = buf; struct timespec64 atime; u16 height, depth; @@ -444,7 +445,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ gfs2_set_inode_flags(&ip->i_inode); height = be16_to_cpu(str->di_height); - if (unlikely(height > GFS2_MAX_META_HEIGHT)) + if (unlikely(height > sdp->sd_max_height)) goto corrupt; ip->i_height = (u8)height; diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index c60d5ceb0d31c3cf7d98b46667c674f19b9ae259..7e1d889dcc07a741392f3aa602b943a9859c805c 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -497,7 +497,11 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) if (type == HFSPLUS_FOLDER) { struct hfsplus_cat_folder *folder = &entry.folder; - WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_folder)); + if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) { + pr_err("bad catalog folder entry\n"); + res = -EIO; + goto out; + } hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, sizeof(struct hfsplus_cat_folder)); hfsplus_get_perms(inode, &folder->permissions, 1); @@ -517,7 +521,11 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) } else if (type == HFSPLUS_FILE) { struct hfsplus_cat_file *file = &entry.file; - WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_file)); + if (fd->entrylength < sizeof(struct hfsplus_cat_file)) { + pr_err("bad catalog file entry\n"); + res = -EIO; + goto out; + } hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, sizeof(struct hfsplus_cat_file)); @@ -548,6 +556,7 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) pr_err("bad catalog entry used to create inode\n"); res = -EIO; } +out: return res; } @@ -556,6 +565,7 @@ int hfsplus_cat_write_inode(struct inode *inode) struct inode *main_inode = inode; struct hfs_find_data fd; hfsplus_cat_entry entry; + int res = 0; if (HFSPLUS_IS_RSRC(inode)) main_inode = HFSPLUS_I(inode)->rsrc_inode; @@ -574,7 +584,11 @@ int hfsplus_cat_write_inode(struct inode *inode) if (S_ISDIR(main_inode->i_mode)) { struct hfsplus_cat_folder *folder = &entry.folder; - WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_folder)); + if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) { + pr_err("bad catalog folder entry\n"); + res = -EIO; + goto out; + } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, sizeof(struct hfsplus_cat_folder)); /* simple node checks? */ @@ -599,7 +613,11 @@ int hfsplus_cat_write_inode(struct inode *inode) } else { struct hfsplus_cat_file *file = &entry.file; - WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_file)); + if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { + pr_err("bad catalog file entry\n"); + res = -EIO; + goto out; + } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, sizeof(struct hfsplus_cat_file)); hfsplus_inode_write_fork(inode, &file->data_fork); @@ -620,5 +638,5 @@ int hfsplus_cat_write_inode(struct inode *inode) set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags); out: hfs_find_exit(&fd); - return 0; + return res; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 59411f2f6cedf4db9610e96ed658b3a72656928c..00205c2fcb5a08fd0c602e917600b7a6ebe24790 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -118,7 +118,6 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_CSTATE_STARTING, CPUHP_AP_PERF_XTENSA_STARTING, CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, - CPUHP_AP_ARM_SDEI_STARTING, CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, diff --git a/include/linux/device.h b/include/linux/device.h index 5dc0f81e4f9d41d9d9b59cd1197fb7c2d61ff6a6..4f7e0c85e11fa71b9b22ffb310cb2e2b712edbfb 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -818,6 +818,7 @@ int device_online(struct device *dev); void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void device_set_of_node_from_dev(struct device *dev, const struct device *dev2); +void device_set_node(struct device *dev, struct fwnode_handle *fwnode); static inline int dev_num_vf(struct device *dev) { diff --git a/include/linux/dim.h b/include/linux/dim.h index 6c5733981563eadf5f06c59c5dc97df961692b02..f343bc9aa2ec95adaa3ea685c38dd596758d88b5 100644 --- a/include/linux/dim.h +++ b/include/linux/dim.h @@ -236,8 +236,9 @@ void dim_park_tired(struct dim *dim); * * Calculate the delta between two samples (in data rates). * Takes into consideration counter wrap-around. + * Returned boolean indicates whether curr_stats are reliable. */ -void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, +bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end, struct dim_stats *curr_stats); /** diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 41a518336673b496faf7ce0ea2a65068fe6814f2..4e7e72f3da5bd7aad6385eb49ce9b04f65b74e05 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -626,6 +626,23 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb) return __vlan_get_protocol(skb, skb->protocol, NULL); } +/* This version of __vlan_get_protocol() also pulls mac header in skb->head */ +static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb, + __be16 type, int *depth) +{ + int maclen; + + type = __vlan_get_protocol(skb, type, &maclen); + + if (type) { + if (!pskb_may_pull(skb, maclen)) + type = 0; + else if (depth) + *depth = maclen; + } + return type; +} + /* A getter for the SKB protocol field which will handle VLAN tags consistently * whether VLAN acceleration is enabled or not. */ diff --git a/include/linux/printk.h b/include/linux/printk.h index fe7eb2351610d0181caa8c42d0bccaa12bd3f790..344f6da3d4c367134888438dee919fbc42cf60f5 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -623,4 +623,23 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type, #define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \ print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true) +#ifdef CONFIG_PRINTK +extern void __printk_safe_enter(void); +extern void __printk_safe_exit(void); +/* + * The printk_deferred_enter/exit macros are available only as a hack for + * some code paths that need to defer all printk console printing. Interrupts + * must be disabled for the deferred duration. + */ +#define printk_deferred_enter __printk_safe_enter +#define printk_deferred_exit __printk_safe_exit +#else +static inline void printk_deferred_enter(void) +{ +} +static inline void printk_deferred_exit(void) +{ +} +#endif + #endif diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index d609e957a3ec0180d11f3f8bd097d643a69796c3..c02c3bb0fe091dc664aec86a71ce3cf012cfd5c4 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -549,8 +549,10 @@ struct ip_vs_conn { */ struct ip_vs_app *app; /* bound ip_vs_app object */ void *app_data; /* Application private data */ - struct ip_vs_seq in_seq; /* incoming seq. struct */ - struct ip_vs_seq out_seq; /* outgoing seq. struct */ + struct_group(sync_conn_opt, + struct ip_vs_seq in_seq; /* incoming seq. struct */ + struct ip_vs_seq out_seq; /* outgoing seq. struct */ + ); const struct ip_vs_pe *pe; char *pe_data; diff --git a/include/net/sock.h b/include/net/sock.h index 0c35ee2cf92fd87e8386a8b930d3ba4a3eb58a68..07b554644e811fe0824925eec5f325e4184c004c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2543,7 +2543,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, __sock_recv_ts_and_drops(msg, sk, skb); else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP))) sock_write_timestamp(sk, skb->tstamp); - else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP)) + else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP)) sock_write_timestamp(sk, 0); } diff --git a/include/net/tcp.h b/include/net/tcp.h index 08e3a2488779ed08097ace5c6eb98e98ef8f71e2..114ed8a65a887c4a673045a31aebd0743ec8ecd2 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -383,6 +383,7 @@ void tcp_update_metrics(struct sock *sk); void tcp_init_metrics(struct sock *sk); void tcp_metrics_init(void); bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst); +void __tcp_close(struct sock *sk, long timeout); void tcp_close(struct sock *sk, long timeout); void tcp_init_sock(struct sock *sk); void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb); diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c index 8aaaaef99f09fc26440e27e650bb1f01a92e5a8b..f753965726205c7f6b7f59366065048a5e0095d3 100644 --- a/kernel/bpf/bpf_local_storage.c +++ b/kernel/bpf/bpf_local_storage.c @@ -48,11 +48,21 @@ owner_storage(struct bpf_local_storage_map *smap, void *owner) return map->ops->map_owner_storage_ptr(owner); } +static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem) +{ + return !hlist_unhashed_lockless(&selem->snode); +} + static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem) { return !hlist_unhashed(&selem->snode); } +static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem) +{ + return !hlist_unhashed_lockless(&selem->map_node); +} + static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem) { return !hlist_unhashed(&selem->map_node); @@ -140,7 +150,7 @@ static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem) struct bpf_local_storage *local_storage; bool free_local_storage = false; - if (unlikely(!selem_linked_to_storage(selem))) + if (unlikely(!selem_linked_to_storage_lockless(selem))) /* selem has already been unlinked from sk */ return; @@ -167,7 +177,7 @@ void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem) struct bpf_local_storage_map *smap; struct bpf_local_storage_map_bucket *b; - if (unlikely(!selem_linked_to_map(selem))) + if (unlikely(!selem_linked_to_map_lockless(selem))) /* selem has already be unlinked from smap */ return; @@ -365,7 +375,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, err = check_flags(old_sdata, map_flags); if (err) return ERR_PTR(err); - if (old_sdata && selem_linked_to_storage(SELEM(old_sdata))) { + if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) { copy_map_value_locked(&smap->map, old_sdata->data, value, false); return old_sdata; diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 952595c678b37299a111e288cb8d4e711759dd14..4e419ca6d61149f40d2f1692f89b1a907bfa4bf4 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -625,7 +625,7 @@ ref_scale_cleanup(void) static int ref_scale_shutdown(void *arg) { - wait_event(shutdown_wq, shutdown_start); + wait_event_idle(shutdown_wq, shutdown_start); smp_mb(); // Wake before output. ref_scale_cleanup(); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index ef6570137dcd5411a0f877d2c3c0906049c08d45..401c1f331cafa51a3055c011391605aa5d95f2a0 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -707,9 +707,11 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp) int ndetected = 0; struct task_struct *t; - if (!READ_ONCE(rnp->exp_tasks)) - return 0; raw_spin_lock_irqsave_rcu_node(rnp, flags); + if (!rnp->exp_tasks) { + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + return 0; + } t = list_entry(rnp->exp_tasks->prev, struct task_struct, rcu_node_entry); list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index f08d9c56f712e76c968c31b4195865fb9d5ce3d7..e77f12bb3c7748fa872a730778be9780e8f22e1e 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -232,7 +232,8 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap) for (index = 0; index < rmap->used; index++) { glue = rmap->obj[index]; - irq_set_affinity_notifier(glue->notify.irq, NULL); + if (glue) + irq_set_affinity_notifier(glue->notify.irq, NULL); } cpu_rmap_put(rmap); @@ -268,6 +269,7 @@ static void irq_cpu_rmap_release(struct kref *ref) container_of(ref, struct irq_glue, notify.kref); cpu_rmap_put(glue->rmap); + glue->rmap->obj[glue->index] = NULL; kfree(glue); } @@ -297,6 +299,7 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) rc = irq_set_affinity_notifier(irq, &glue->notify); if (rc) { cpu_rmap_put(glue->rmap); + rmap->obj[glue->index] = NULL; kfree(glue); } return rc; diff --git a/lib/dim/dim.c b/lib/dim/dim.c index 38045d6d05381ebb89d73369b0c60766505ef9be..e89aaf07bde502ec4def08d1775f925c62de7838 100644 --- a/lib/dim/dim.c +++ b/lib/dim/dim.c @@ -54,7 +54,7 @@ void dim_park_tired(struct dim *dim) } EXPORT_SYMBOL(dim_park_tired); -void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, +bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end, struct dim_stats *curr_stats) { /* u32 holds up to 71 minutes, should be enough */ @@ -66,7 +66,7 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, start->comp_ctr); if (!delta_us) - return; + return false; curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); @@ -79,5 +79,6 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end, else curr_stats->cpe_ratio = 0; + return true; } EXPORT_SYMBOL(dim_calc_stats); diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c index dae3b51ac3d9befb2cb57a8ce77fc6d2a8e1fde1..0e4f3a686f1dec28b9912cbd95ab00637dfa705a 100644 --- a/lib/dim/net_dim.c +++ b/lib/dim/net_dim.c @@ -227,7 +227,8 @@ void net_dim(struct dim *dim, struct dim_sample end_sample) dim->start_sample.event_ctr); if (nevents < DIM_NEVENTS) break; - dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats); + if (!dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats)) + break; if (net_dim_decision(&curr_stats, dim)) { dim->state = DIM_APPLY_NEW_PROFILE; schedule_work(&dim->work); diff --git a/lib/dim/rdma_dim.c b/lib/dim/rdma_dim.c index f7e26c7b4749fc725c9756a4f9899de7a9163ed1..d32c8b105adc96f203bddfbf180b7c6bdaa0c610 100644 --- a/lib/dim/rdma_dim.c +++ b/lib/dim/rdma_dim.c @@ -88,7 +88,8 @@ void rdma_dim(struct dim *dim, u64 completions) nevents = curr_sample->event_ctr - dim->start_sample.event_ctr; if (nevents < DIM_NEVENTS) break; - dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats); + if (!dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats)) + break; if (rdma_dim_decision(&curr_stats, dim)) { dim->state = DIM_APPLY_NEW_PROFILE; schedule_work(&dim->work); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fefcaf0180a70704a503394987e8285fbddf51ed..6993c6b27dcfa81d59d91a582e534928f7980689 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6058,7 +6058,21 @@ static void __build_all_zonelists(void *data) int nid; int __maybe_unused cpu; pg_data_t *self = data; + unsigned long flags; + /* + * Explicitly disable this CPU's interrupts before taking seqlock + * to prevent any IRQ handler from calling into the page allocator + * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock. + */ + local_irq_save(flags); + /* + * Explicitly disable this CPU's synchronous printk() before taking + * seqlock to prevent any printk() from trying to hold port->lock, for + * tty_insert_flip_string_and_push_buffer() on other CPU might be + * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. + */ + printk_deferred_enter(); write_seqlock(&zonelist_update_seq); #ifdef CONFIG_NUMA @@ -6093,6 +6107,8 @@ static void __build_all_zonelists(void *data) } write_sequnlock(&zonelist_update_seq); + printk_deferred_exit(); + local_irq_restore(flags); } static noinline void __init diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index e28ffadd1371933e83c84a4bfcaf7784de48c44e..4610f3a13966fa4f1379545c8fd1a2cf2f3ec273 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -43,7 +43,7 @@ int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb skb->protocol == htons(ETH_P_8021AD))) { int depth; - if (!__vlan_get_protocol(skb, skb->protocol, &depth)) + if (!vlan_get_protocol_and_depth(skb, skb->protocol, &depth)) goto drop; skb_set_network_header(skb, depth); diff --git a/net/core/datagram.c b/net/core/datagram.c index bc92683fdcdb4fc6fc2693808d0dceeeab24a463..9e77695d1bdc23d62fcdd46de665de4d00f65cdc 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -799,18 +799,21 @@ __poll_t datagram_poll(struct file *file, struct socket *sock, { struct sock *sk = sock->sk; __poll_t mask; + u8 shutdown; sock_poll_wait(file, sock, wait); mask = 0; /* exceptional events? */ - if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) + if (READ_ONCE(sk->sk_err) || + !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); - if (sk->sk_shutdown & RCV_SHUTDOWN) + shutdown = READ_ONCE(sk->sk_shutdown); + if (shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; - if (sk->sk_shutdown == SHUTDOWN_MASK) + if (shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; /* readable? */ @@ -819,10 +822,12 @@ __poll_t datagram_poll(struct file *file, struct socket *sock, /* Connection-based need to check for termination and startup */ if (connection_based(sk)) { - if (sk->sk_state == TCP_CLOSE) + int state = READ_ONCE(sk->sk_state); + + if (state == TCP_CLOSE) mask |= EPOLLHUP; /* connection hasn't started yet? */ - if (sk->sk_state == TCP_SYN_SENT) + if (state == TCP_SYN_SENT) return mask; } diff --git a/net/core/dev.c b/net/core/dev.c index 413c2a08d79db8c1d56ff990a0fb333e6fc4ba8d..29e6e11c481c64e1cb6094bd024128fc74f66d68 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2628,6 +2628,8 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, bool active = false; unsigned int nr_ids; + WARN_ON_ONCE(index >= dev->num_tx_queues); + if (dev->num_tc) { /* Do not allow XPS on subordinate device directly */ num_tc = dev->num_tc; @@ -3320,7 +3322,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth) type = eth->h_proto; } - return __vlan_get_protocol(skb, type, depth); + return vlan_get_protocol_and_depth(skb, type, depth); } /** diff --git a/net/core/stream.c b/net/core/stream.c index cd60746877b1e509aa78b25ee0dca2082b13d606..422ee97e4f2be1ee47dd04d53fadfe12a1d1e9bd 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -73,8 +73,8 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p) add_wait_queue(sk_sleep(sk), &wait); sk->sk_write_pending++; done = sk_wait_event(sk, timeo_p, - !sk->sk_err && - !((1 << sk->sk_state) & + !READ_ONCE(sk->sk_err) && + !((1 << READ_ONCE(sk->sk_state)) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)), &wait); remove_wait_queue(sk_sleep(sk), &wait); sk->sk_write_pending--; @@ -87,9 +87,9 @@ EXPORT_SYMBOL(sk_stream_wait_connect); * sk_stream_closing - Return 1 if we still have things to send in our buffers. * @sk: socket to verify */ -static inline int sk_stream_closing(struct sock *sk) +static int sk_stream_closing(const struct sock *sk) { - return (1 << sk->sk_state) & + return (1 << READ_ONCE(sk->sk_state)) & (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK); } @@ -142,8 +142,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; - sk_wait_event(sk, ¤t_timeo, sk->sk_err || - (sk->sk_shutdown & SEND_SHUTDOWN) || + sk_wait_event(sk, ¤t_timeo, READ_ONCE(sk->sk_err) || + (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) || (sk_stream_memory_free(sk) && !vm_wait), &wait); sk->sk_write_pending--; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index b8671d601978f14c1a845c29a97f14505afb299c..5fd87bd42745ef1babc8ca48d63b3258a58c345e 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -893,7 +893,7 @@ int inet_shutdown(struct socket *sock, int how) EPOLLHUP, even on eg. unconnected UDP sockets -- RR */ fallthrough; default: - sk->sk_shutdown |= how; + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how); if (sk->sk_prot->shutdown) sk->sk_prot->shutdown(sk, how); break; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f8cce16dd48c43da725537f5f548d47286da2881..7e133ccfd3deb7324b915a386e9eaf8c567ec18e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -506,6 +506,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) __poll_t mask; struct sock *sk = sock->sk; const struct tcp_sock *tp = tcp_sk(sk); + u8 shutdown; int state; sock_poll_wait(file, sock, wait); @@ -548,9 +549,10 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) * NOTE. Check for TCP_CLOSE is added. The goal is to prevent * blocking on fresh not-connected or disconnected socket. --ANK */ - if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) + shutdown = READ_ONCE(sk->sk_shutdown); + if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) mask |= EPOLLHUP; - if (sk->sk_shutdown & RCV_SHUTDOWN) + if (shutdown & RCV_SHUTDOWN) mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; /* Connected or passive Fast Open socket? */ @@ -566,7 +568,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) if (tcp_stream_is_readable(tp, target, sk)) mask |= EPOLLIN | EPOLLRDNORM; - if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { + if (!(shutdown & SEND_SHUTDOWN)) { if (__sk_stream_is_writeable(sk, 1)) { mask |= EPOLLOUT | EPOLLWRNORM; } else { /* send SIGIO later */ @@ -2510,14 +2512,13 @@ bool tcp_check_oom(struct sock *sk, int shift) return too_many_orphans || out_of_socket_memory; } -void tcp_close(struct sock *sk, long timeout) +void __tcp_close(struct sock *sk, long timeout) { struct sk_buff *skb; int data_was_unread = 0; int state; - lock_sock(sk); - sk->sk_shutdown = SHUTDOWN_MASK; + WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); @@ -2680,6 +2681,12 @@ void tcp_close(struct sock *sk, long timeout) out: bh_unlock_sock(sk); local_bh_enable(); +} + +void tcp_close(struct sock *sk, long timeout) +{ + lock_sock(sk); + __tcp_close(sk, timeout); release_sock(sk); sock_put(sk); } @@ -2777,7 +2784,7 @@ int tcp_disconnect(struct sock *sk, int flags) if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); - sk->sk_shutdown = 0; + WRITE_ONCE(sk->sk_shutdown, 0); sock_reset_flag(sk, SOCK_DONE); tp->srtt_us = 0; tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); @@ -4166,7 +4173,7 @@ void tcp_done(struct sock *sk) if (req) reqsk_fastopen_remove(sk, req, false); - sk->sk_shutdown = SHUTDOWN_MASK; + WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 926e29e84b40bbf0c115a8c389ce556e7b247685..d0ca1fc325cd6210336a92a600444ff2b98cfbef 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -262,7 +262,7 @@ static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock, sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); ret = sk_wait_event(sk, &timeo, !list_empty(&psock->ingress_msg) || - !skb_queue_empty(&sk->sk_receive_queue), &wait); + !skb_queue_empty_lockless(&sk->sk_receive_queue), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); remove_wait_queue(sk_sleep(sk), &wait); return ret; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 541758cd0b81f3311fccf329f925cfc936cb42b6..b98b7920c402923c1de0082e73c1371c2e1ef1ea 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4323,7 +4323,7 @@ void tcp_fin(struct sock *sk) inet_csk_schedule_ack(sk); - sk->sk_shutdown |= RCV_SHUTDOWN; + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN); sock_set_flag(sk, SOCK_DONE); switch (sk->sk_state) { @@ -6504,7 +6504,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) break; tcp_set_state(sk, TCP_FIN_WAIT2); - sk->sk_shutdown |= SEND_SHUTDOWN; + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | SEND_SHUTDOWN); sk_dst_confirm(sk); diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 99a37c411323edcf59733486a07a8a1e1ce6b3a6..01e26698285a05c4e14d293d9fd8b40b41a688b8 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -582,7 +582,8 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout) add_wait_queue(sk_sleep(sk), &wait); while (1) { - if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE, &wait)) + if (sk_wait_event(sk, &timeout, + READ_ONCE(sk->sk_state) == TCP_CLOSE, &wait)) break; rc = -ERESTARTSYS; if (signal_pending(current)) @@ -602,7 +603,8 @@ static bool llc_ui_wait_for_conn(struct sock *sk, long timeout) add_wait_queue(sk_sleep(sk), &wait); while (1) { - if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT, &wait)) + if (sk_wait_event(sk, &timeout, + READ_ONCE(sk->sk_state) != TCP_SYN_SENT, &wait)) break; if (signal_pending(current) || !timeout) break; @@ -621,7 +623,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout) while (1) { rc = 0; if (sk_wait_event(sk, &timeout, - (sk->sk_shutdown & RCV_SHUTDOWN) || + (READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN) || (!llc_data_accept_state(llc->state) && !llc->remote_busy_flag && !llc->p_flag), &wait)) diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 60332fdb6dd4429fb1e22a645476b3954d9f04e3..5b7578adbf0f1ca793769045f0ce49b58f3f2a9e 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -674,9 +674,11 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct) rcu_read_lock(); ct_hook = rcu_dereference(nf_ct_hook); - BUG_ON(ct_hook == NULL); - ct_hook->destroy(nfct); + if (ct_hook) + ct_hook->destroy(nfct); rcu_read_unlock(); + + WARN_ON(!ct_hook); } EXPORT_SYMBOL(nf_conntrack_destroy); diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index daab857c52a807c87e3afc334b105430a98e69cb..fc8db03d3efca089ce91e40fcb8b1fe87271c167 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -603,7 +603,7 @@ static void ip_vs_sync_conn_v0(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { struct ip_vs_sync_conn_options *opt = (struct ip_vs_sync_conn_options *)&s[1]; - memcpy(opt, &cp->in_seq, sizeof(*opt)); + memcpy(opt, &cp->sync_conn_opt, sizeof(*opt)); } m->nr_conns++; diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index e12b52019a550c584ae0436b22f522a7cf9022dc..b613de96ad855d76e8cd9241769e13480b171e15 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -1170,11 +1170,12 @@ static int __init nf_conntrack_standalone_init(void) nf_conntrack_htable_size_user = nf_conntrack_htable_size; #endif + nf_conntrack_init_end(); + ret = register_pernet_subsys(&nf_conntrack_net_ops); if (ret < 0) goto out_pernet; - nf_conntrack_init_end(); return 0; out_pernet: diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index eedb16517f16a36c228189eb1cbef9ae93b93001..651f8ca912af061498fec03b1bc06fa93ac28876 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1992,7 +1992,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, skb_free_datagram(sk, skb); - if (nlk->cb_running && + if (READ_ONCE(nlk->cb_running) && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { ret = netlink_dump(sk); if (ret) { @@ -2304,7 +2304,7 @@ static int netlink_dump(struct sock *sk) if (cb->done) cb->done(cb); - nlk->cb_running = false; + WRITE_ONCE(nlk->cb_running, false); module = cb->module; skb = cb->skb; mutex_unlock(nlk->cb_mutex); @@ -2367,7 +2367,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, goto error_put; } - nlk->cb_running = true; + WRITE_ONCE(nlk->cb_running, true); nlk->dump_done_errno = INT_MAX; mutex_unlock(nlk->cb_mutex); @@ -2653,7 +2653,7 @@ static int netlink_native_seq_show(struct seq_file *seq, void *v) nlk->groups ? (u32)nlk->groups[0] : 0, sk_rmem_alloc_get(s), sk_wmem_alloc_get(s), - nlk->cb_running, + READ_ONCE(nlk->cb_running), refcount_read(&s->sk_refcnt), atomic_read(&s->sk_drops), sock_i_ino(s) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 2e766490a739bb1791e18e836af81aa47af93709..3c05414cd3f83fb8033e942facbb121aebfda1d5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1897,10 +1897,8 @@ static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) /* Move network header to the right position for VLAN tagged packets */ if (likely(skb->dev->type == ARPHRD_ETHER) && eth_type_vlan(skb->protocol) && - __vlan_get_protocol(skb, skb->protocol, &depth) != 0) { - if (pskb_may_pull(skb, depth)) - skb_set_network_header(skb, depth); - } + vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0) + skb_set_network_header(skb, depth); skb_probe_transport_header(skb); } diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index 84102db5bb31452ed1e8aec94514418e4741d464..149a59ecd299f5f2255f93fb9861512658a26a45 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c @@ -64,8 +64,8 @@ static void smc_close_stream_wait(struct smc_sock *smc, long timeout) rc = sk_wait_event(sk, &timeout, !smc_tx_prepared_sends(&smc->conn) || - sk->sk_err == ECONNABORTED || - sk->sk_err == ECONNRESET || + READ_ONCE(sk->sk_err) == ECONNABORTED || + READ_ONCE(sk->sk_err) == ECONNRESET || smc->conn.killed, &wait); if (rc) diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index 7f7e983e42b1f44a3d8089865c8c82b51fa52de4..3757aff6c2f0044df3d400c79d252d22782a5a8b 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -203,9 +203,9 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo, sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); add_wait_queue(sk_sleep(sk), &wait); rc = sk_wait_event(sk, timeo, - sk->sk_err || + READ_ONCE(sk->sk_err) || cflags->peer_conn_abort || - sk->sk_shutdown & RCV_SHUTDOWN || + READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN || conn->killed || fcrit(conn), &wait); diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 52ef1fca0b604ce49d7d93da88c765748b14ae08..2429f9fc7e0e70baf90e1a0384c802d54a263365 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -110,8 +110,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags) break; /* at least 1 byte of free & no urgent data */ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk_wait_event(sk, &timeo, - sk->sk_err || - (sk->sk_shutdown & SEND_SHUTDOWN) || + READ_ONCE(sk->sk_err) || + (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) || smc_cdc_rxed_any_close(conn) || (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend), diff --git a/net/socket.c b/net/socket.c index 8657112a687a4a80205aff2b1d914e9a1a058881..84223419da8620520c2390d2a4b9e6ccedc55f07 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2764,7 +2764,7 @@ static int do_recvmmsg(int fd, struct mmsghdr __user *mmsg, * error to return on the next call or if the * app asks about it using getsockopt(SO_ERROR). */ - sock->sk->sk_err = -err; + WRITE_ONCE(sock->sk->sk_err, -err); } out_put: fput_light(sock->file, fput_needed); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 8f3c9fbb991651ba7e0a6e19b6c7214f7e56aaf9..7cf9b40b5c73ba3dc0e206f16a9d050a715ee4a0 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -300,9 +300,9 @@ static void tsk_rej_rx_queue(struct sock *sk, int error) tipc_sk_respond(sk, skb, error); } -static bool tipc_sk_connected(struct sock *sk) +static bool tipc_sk_connected(const struct sock *sk) { - return sk->sk_state == TIPC_ESTABLISHED; + return READ_ONCE(sk->sk_state) == TIPC_ESTABLISHED; } /* tipc_sk_type_connectionless - check if the socket is datagram socket diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 54863e68f30405a9d1958bb3105b307cd859a257..7ee3c8b03a39ef849052f18957009e87ca199d65 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -92,7 +92,8 @@ int wait_on_pending_writer(struct sock *sk, long *timeo) break; } - if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait)) + if (sk_wait_event(sk, timeo, + !READ_ONCE(sk->sk_write_pending), &wait)) break; } remove_wait_queue(sk_sleep(sk), &wait); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 45aa3801e320b23be8ae3cf2038be9e14360cbb5..d60313daf7ef9e49067d901ea63751195204c4d8 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -529,7 +529,7 @@ static void unix_release_sock(struct sock *sk, int embrion) /* Clear state */ unix_state_lock(sk); sock_orphan(sk); - sk->sk_shutdown = SHUTDOWN_MASK; + WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); path = u->path; u->path.dentry = NULL; u->path.mnt = NULL; @@ -547,7 +547,7 @@ static void unix_release_sock(struct sock *sk, int embrion) if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { unix_state_lock(skpair); /* No more writes */ - skpair->sk_shutdown = SHUTDOWN_MASK; + WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK); if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) skpair->sk_err = ECONNRESET; unix_state_unlock(skpair); @@ -1236,7 +1236,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo) sched = !sock_flag(other, SOCK_DEAD) && !(other->sk_shutdown & RCV_SHUTDOWN) && - unix_recvq_full(other); + unix_recvq_full_lockless(other); unix_state_unlock(other); @@ -2580,7 +2580,7 @@ static int unix_shutdown(struct socket *sock, int mode) ++mode; unix_state_lock(sk); - sk->sk_shutdown |= mode; + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | mode); other = unix_peer(sk); if (other) sock_hold(other); @@ -2597,7 +2597,7 @@ static int unix_shutdown(struct socket *sock, int mode) if (mode&SEND_SHUTDOWN) peer_mode |= RCV_SHUTDOWN; unix_state_lock(other); - other->sk_shutdown |= peer_mode; + WRITE_ONCE(other->sk_shutdown, other->sk_shutdown | peer_mode); unix_state_unlock(other); other->sk_state_change(other); if (peer_mode == SHUTDOWN_MASK) @@ -2716,16 +2716,18 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa { struct sock *sk = sock->sk; __poll_t mask; + u8 shutdown; sock_poll_wait(file, sock, wait); mask = 0; + shutdown = READ_ONCE(sk->sk_shutdown); /* exceptional events? */ if (sk->sk_err) mask |= EPOLLERR; - if (sk->sk_shutdown == SHUTDOWN_MASK) + if (shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; - if (sk->sk_shutdown & RCV_SHUTDOWN) + if (shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; /* readable? */ @@ -2753,18 +2755,20 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, struct sock *sk = sock->sk, *other; unsigned int writable; __poll_t mask; + u8 shutdown; sock_poll_wait(file, sock, wait); mask = 0; + shutdown = READ_ONCE(sk->sk_shutdown); /* exceptional events? */ if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); - if (sk->sk_shutdown & RCV_SHUTDOWN) + if (shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; - if (sk->sk_shutdown == SHUTDOWN_MASK) + if (shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; /* readable? */ diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c index ff4c533dfac2981c1f0f965739b6b8d65f1dea47..8e48489b96ae96bc5573fcbec64e8c1ae10014c8 100644 --- a/samples/bpf/hbm.c +++ b/samples/bpf/hbm.c @@ -308,6 +308,7 @@ static int run_bpf_prog(char *prog, int cg_id) fout = fopen(fname, "w"); fprintf(fout, "id:%d\n", cg_id); fprintf(fout, "ERROR: Could not lookup queue_stats\n"); + fclose(fout); } else if (stats_flag && qstats.lastPacketTime > qstats.firstPacketTime) { long long delta_us = (qstats.lastPacketTime -