diff --git a/Makefile b/Makefile index b7ac9cce8bfd6d929fab6e3729a1ab05b2922fba..6343e149148dafffd1623cdf52bdca2280193eaf 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 189 +SUBLEVEL = 190 EXTRAVERSION = NAME = Dare mighty things diff --git a/README.OpenSource b/README.OpenSource index ad1abbaebdb4809caf62a1ed66d3cb2c257aec1c..67edd0a7fdb7b7f0c92f8fc3b07f408f41a2f6f5 100644 --- a/README.OpenSource +++ b/README.OpenSource @@ -3,7 +3,7 @@ "Name": "linux-5.10", "License": "GPL-2.0+", "License File": "COPYING", - "Version Number": "5.10.189", + "Version Number": "5.10.190", "Owner": "liuyu82@huawei.com", "Upstream URL": "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/log/?h=linux-5.10.y", "Description": "linux kernel 5.10" diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 7858ae5d39df7627c9e40dff4853b80e7e79a2ac..7d81992dfafc6a35f64243e0c7113c7d2633a38a 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -45,6 +45,10 @@ aliases { spi1 = &ecspi2; spi2 = &ecspi3; spi3 = &ecspi4; + usb0 = &usbotg; + usb1 = &usbh1; + usb2 = &usbh2; + usb3 = &usbh3; usbphy0 = &usbphy1; usbphy1 = &usbphy2; }; diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi index c184a6d5bc4206e97c04eef6601a973d2560fa0b..5b4dfc62030e8f1376052390f97f8e6e18f97c97 100644 --- a/arch/arm/boot/dts/imx6sl.dtsi +++ b/arch/arm/boot/dts/imx6sl.dtsi @@ -39,6 +39,9 @@ aliases { spi1 = &ecspi2; spi2 = &ecspi3; spi3 = &ecspi4; + usb0 = &usbotg1; + usb1 = &usbotg2; + usb2 = &usbh; usbphy0 = &usbphy1; usbphy1 = &usbphy2; }; diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi index bf5b262b91f9195247324d913e1dff0733487868..3659fd5ecfa620e665edf570dd9d2b8c0f24bc09 100644 --- a/arch/arm/boot/dts/imx6sll.dtsi +++ b/arch/arm/boot/dts/imx6sll.dtsi @@ -36,6 +36,8 @@ aliases { spi1 = &ecspi2; spi3 = &ecspi3; spi4 = &ecspi4; + usb0 = &usbotg1; + usb1 = &usbotg2; usbphy0 = &usbphy1; usbphy1 = &usbphy2; }; @@ -49,20 +51,18 @@ cpu0: cpu@0 { device_type = "cpu"; reg = <0>; next-level-cache = <&L2>; - operating-points = < + operating-points = /* kHz uV */ - 996000 1275000 - 792000 1175000 - 396000 1075000 - 198000 975000 - >; - fsl,soc-operating-points = < + <996000 1275000>, + <792000 1175000>, + <396000 1075000>, + <198000 975000>; + fsl,soc-operating-points = /* ARM kHz SOC-PU uV */ - 996000 1175000 - 792000 1175000 - 396000 1175000 - 198000 1175000 - >; + <996000 1175000>, + <792000 1175000>, + <396000 1175000>, + <198000 1175000>; clock-latency = <61036>; /* two CLK32 periods */ #cooling-cells = <2>; clocks = <&clks IMX6SLL_CLK_ARM>, @@ -552,7 +552,7 @@ usbphy2: usb-phy@20ca000 { reg = <0x020ca000 0x1000>; interrupts = ; clocks = <&clks IMX6SLL_CLK_USBPHY2>; - phy-reg_3p0-supply = <®_3p0>; + phy-3p0-supply = <®_3p0>; fsl,anatop = <&anatop>; }; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index c399919943c3488173b47aa26768407eee5b7c86..629c6a7432d9b61f942076eead0a81e70a913fd2 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -49,6 +49,9 @@ aliases { spi2 = &ecspi3; spi3 = &ecspi4; spi4 = &ecspi5; + usb0 = &usbotg1; + usb1 = &usbotg2; + usb2 = &usbh; usbphy0 = &usbphy1; usbphy1 = &usbphy2; }; diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi index c40684ad11b8e4d69108fbddb32793a83fdd6296..b40dd0c198028f33c93f44dc6c875bc7c4e3e4c3 100644 --- a/arch/arm/boot/dts/imx6ul.dtsi +++ b/arch/arm/boot/dts/imx6ul.dtsi @@ -47,6 +47,8 @@ aliases { spi1 = &ecspi2; spi2 = &ecspi3; spi3 = &ecspi4; + usb0 = &usbotg1; + usb1 = &usbotg2; usbphy0 = &usbphy1; usbphy1 = &usbphy2; }; diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi index cff875b80b60e241c3b2974bcaa8d5b3225dd217..b0bcfa9094a308093508f36283da16e1a9368d85 100644 --- a/arch/arm/boot/dts/imx7d.dtsi +++ b/arch/arm/boot/dts/imx7d.dtsi @@ -7,6 +7,12 @@ #include / { + aliases { + usb0 = &usbotg1; + usb1 = &usbotg2; + usb2 = &usbh; + }; + cpus { cpu0: cpu@0 { clock-frequency = <996000000>; diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi index 43b39ad9ddceeded13edc3b2c7e6d51f940a1c39..334e781663cc25ccd335f5bc958b093d568e2966 100644 --- a/arch/arm/boot/dts/imx7s.dtsi +++ b/arch/arm/boot/dts/imx7s.dtsi @@ -47,6 +47,8 @@ aliases { spi1 = &ecspi2; spi2 = &ecspi3; spi3 = &ecspi4; + usb0 = &usbotg1; + usb1 = &usbh; }; cpus { diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index 46e558ab7729b990fe98eef9df8091cede619f5f..f0e8af12442a4408080aa56a87e8ed6ae8087d1c 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -129,7 +129,7 @@ &i2c1 { status = "okay"; clock-frequency = <100000>; i2c-sda-falling-time-ns = <890>; /* hcnt */ - i2c-sdl-falling-time-ns = <890>; /* lcnt */ + i2c-scl-falling-time-ns = <890>; /* lcnt */ adc@14 { compatible = "lltc,ltc2497"; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts index f9b4a39683cf41cbc8938f216fc61c44916e3cd8..92ac3c86ebd56376d3e43733880ce6e04b6701e5 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts @@ -162,7 +162,7 @@ &i2c2 { status = "okay"; clock-frequency = <100000>; i2c-sda-falling-time-ns = <890>; /* hcnt */ - i2c-sdl-falling-time-ns = <890>; /* lcnt */ + i2c-scl-falling-time-ns = <890>; /* lcnt */ adc@14 { compatible = "lltc,ltc2497"; diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index b76cd49d521b9d45dfa442c142e10648ea69b313..db040f34c0046f12cca54799016270d5282a3a3c 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -313,8 +313,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, start = ALIGN_DOWN(start, page_size); if (altmap) { alt_start = altmap->base_pfn; - alt_end = altmap->base_pfn + altmap->reserve + - altmap->free + altmap->alloc + altmap->align; + alt_end = altmap->base_pfn + altmap->reserve + altmap->free; } pr_debug("vmemmap_free %lx...%lx\n", start, end); diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h index aff6c33ab0c0837488f687c729e3f56fc38e2f5f..4c58ee7f95ecfa65ae11de96eda76347e747fe56 100644 --- a/arch/riscv/include/asm/mmio.h +++ b/arch/riscv/include/asm/mmio.h @@ -101,9 +101,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * Relaxed I/O memory access primitives. These follow the Device memory * ordering rules but do not guarantee any ordering relative to Normal memory * accesses. These are defined to order the indicated access (either a read or - * write) with all other I/O memory accesses. Since the platform specification - * defines that all I/O regions are strongly ordered on channel 2, no explicit - * fences are required to enforce this ordering. + * write) with all other I/O memory accesses to the same peripheral. Since the + * platform specification defines that all I/O regions are strongly ordered on + * channel 0, no explicit fences are required to enforce this ordering. */ /* FIXME: These are now the same as asm-generic */ #define __io_rbr() do {} while (0) @@ -125,14 +125,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #endif /* - * I/O memory access primitives. Reads are ordered relative to any - * following Normal memory access. Writes are ordered relative to any prior - * Normal memory access. The memory barriers here are necessary as RISC-V + * I/O memory access primitives. Reads are ordered relative to any following + * Normal memory read and delay() loop. Writes are ordered relative to any + * prior Normal memory write. The memory barriers here are necessary as RISC-V * doesn't define any ordering between the memory space and the I/O space. */ #define __io_br() do {} while (0) -#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory") -#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory") +#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); }) +#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) #define __io_aw() mmiowb_set_pending() #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 5876289e48d8994303ac9800a0566426bd9f0031..2b5e04cc45e414c57515a602edf400bfc095de0c 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -339,8 +339,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) /* Round the lowest possible end address up to a PMD boundary. */ end = (start + len + PMD_SIZE - 1) & PMD_MASK; - if (end >= TASK_SIZE_MAX) - end = TASK_SIZE_MAX; + if (end >= DEFAULT_MAP_WINDOW) + end = DEFAULT_MAP_WINDOW; end -= len; if (end > start) { diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 1c55903cf65c77d36b607ccd7c08853feaec4209..cc71ed3fff349896095fb999e96a3a7d26c59a6f 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -452,4 +452,5 @@ /* BUG word 2 */ #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ +#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 6a9de5b1fe458d06ea70e0bb2f8f7f7b39217fe5..d7e017b0b4c3b10902dacac052685488639780d6 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -809,10 +809,12 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow); extern u16 amd_get_nb_id(int cpu); extern u32 amd_get_nodes_per_socket(void); extern bool cpu_has_ibpb_brtype_microcode(void); +extern void amd_clear_divider(void); #else static inline u16 amd_get_nb_id(int cpu) { return 0; } static inline u32 amd_get_nodes_per_socket(void) { return 0; } static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; } +static inline void amd_clear_divider(void) { } #endif static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) @@ -862,4 +864,6 @@ enum mds_mitigations { MDS_MITIGATION_VMWERV, }; +extern bool gds_ucode_mitigated(void); + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ffecf4e9444ea82df2bde7e5e3e3cb24b7de2695..7f351093cd947810b9c4eaabc022923d001b0fc4 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -74,8 +74,13 @@ static const int amd_erratum_1054[] = static const int amd_zenbleed[] = AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), + AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf), AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); +static const int amd_div0[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), + AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { int osvw_id = *erratum++; @@ -1168,6 +1173,11 @@ static void init_amd(struct cpuinfo_x86 *c) check_null_seg_clears_base(c); zenbleed_check(c); + + if (cpu_has_amd_erratum(c, amd_div0)) { + pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); + setup_force_cpu_bug(X86_BUG_DIV0); + } } #ifdef CONFIG_X86_32 @@ -1312,3 +1322,13 @@ void amd_check_microcode(void) { on_each_cpu(zenbleed_check_cpu, NULL, 1); } + +/* + * Issue a DIV 0/1 insn to clear any division data from previous DIV + * operations. + */ +void noinstr amd_clear_divider(void) +{ + asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) + :: "a" (0), "d" (0), "r" (1)); +} diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c1f23603091204661f27568825344f950de9fc8e..c1ff75ad1135822d411e161b214a00e22a0d914f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -472,8 +472,6 @@ static bool pku_disabled; static __always_inline void setup_pku(struct cpuinfo_x86 *c) { - struct pkru_state *pk; - /* check the boot processor, plus compile options for PKU: */ if (!cpu_feature_enabled(X86_FEATURE_PKU)) return; @@ -484,9 +482,6 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c) return; cr4_set_bits(X86_CR4_PKE); - pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU); - if (pk) - pk->pkru = init_pkru_value; /* * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE * cpuid bit to be set. We need to ensure that we diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 98838b784524e7710d091a367c902c89feee09b1..28f5cc0a9decbe98fbcf6654da7cf71dd8cc7c15 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -198,6 +198,8 @@ DEFINE_IDTENTRY(exc_divide_error) { do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, FPE_INTDIV, error_get_trap_addr(regs)); + + amd_clear_divider(); } DEFINE_IDTENTRY(exc_overflow) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 4955cb5cc001686cde9e4c745d39ac36b176ce3f..72ba175cb9d4cec32cb1ebe98095b0f6398054a1 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -524,11 +524,17 @@ INIT_PER_CPU(irq_stack_backing_store); #ifdef CONFIG_CPU_SRSO /* - * GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR + * GNU ld cannot do XOR until 2.41. + * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1 + * + * LLVM lld cannot do XOR until lld-17. + * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb + * + * Instead do: (A | B) - (A & B) in order to compute the XOR * of the two function addresses: */ -. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) - - (srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), +. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - + (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), "SRSO function pair won't alias"); #endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cf47392005663f8386f81e410c022e9c546076c8..9d3015863e5817b8aa12ac4ef9a62e0f102bb121 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -255,8 +255,6 @@ static struct kmem_cache *x86_fpu_cache; static struct kmem_cache *x86_emulator_cache; -extern bool gds_ucode_mitigated(void); - /* * When called, it means the previous get/set msr reached an invalid msr. * Return true if we want to ignore/silent this failed msr access. diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c index 8873ed1438a978ec9b7e959556bbc1450161c5ba..379c396127935fb97747cc2624f6ccac6dd2b3da 100644 --- a/arch/x86/mm/pkeys.c +++ b/arch/x86/mm/pkeys.c @@ -10,7 +10,6 @@ #include /* boot_cpu_has, ... */ #include /* vma_pkey() */ -#include /* init_fpstate */ int __execute_only_pkey(struct mm_struct *mm) { @@ -154,7 +153,6 @@ static ssize_t init_pkru_read_file(struct file *file, char __user *user_buf, static ssize_t init_pkru_write_file(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - struct pkru_state *pk; char buf[32]; ssize_t len; u32 new_init_pkru; @@ -177,10 +175,6 @@ static ssize_t init_pkru_write_file(struct file *file, return -EINVAL; WRITE_ONCE(init_pkru_value, new_init_pkru); - pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU); - if (!pk) - return -EINVAL; - pk->pkru = new_init_pkru; return count; } diff --git a/drivers/android/binder.c b/drivers/android/binder.c index a38959e912aef716203e9c1b21f46dd49f80d6bf..10eea3463dff3a10bb58a1ea20c7b9ddfd8aba76 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -6867,6 +6867,7 @@ static int __init binder_init(void) err_alloc_device_names_failed: debugfs_remove_recursive(binder_debugfs_dir_entry_root); + binder_alloc_shrinker_exit(); return ret; } diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 3c93e6c05c4d88b74424921b5273aa4310d998df..61406bfa454bdca6936e46ffd247f57483d15a1c 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -1090,6 +1090,12 @@ int binder_alloc_shrinker_init(void) return ret; } +void binder_alloc_shrinker_exit(void) +{ + unregister_shrinker(&binder_shrinker); + list_lru_destroy(&binder_alloc_lru); +} + /** * check_buffer() - verify that buffer/offset is safe to access * @alloc: binder_alloc for this proc diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 6e8e001381af4ba2c2f73a2ab7bbca1feff5397f..f6052c97bce522a549855826a78b99fc29f3ec75 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -125,6 +125,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, int pid); extern void binder_alloc_init(struct binder_alloc *alloc); extern int binder_alloc_shrinker_init(void); +extern void binder_alloc_shrinker_exit(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); extern struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 54292cdd7808bda4d14280c271b37f943c8d21c2..922ed457db191249f2c0223a822bc76e27f16d22 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -25,8 +25,11 @@ extern u64 pm_runtime_active_time(struct device *dev); #define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0) #define WAKE_IRQ_DEDICATED_MANAGED BIT(1) +#define WAKE_IRQ_DEDICATED_REVERSE BIT(2) #define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \ - WAKE_IRQ_DEDICATED_MANAGED) + WAKE_IRQ_DEDICATED_MANAGED | \ + WAKE_IRQ_DEDICATED_REVERSE) +#define WAKE_IRQ_DEDICATED_ENABLED BIT(3) struct wake_irq { struct device *dev; @@ -39,7 +42,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq); extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq); extern void dev_pm_enable_wake_irq_check(struct device *dev, bool can_change_status); -extern void dev_pm_disable_wake_irq_check(struct device *dev); +extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable); +extern void dev_pm_enable_wake_irq_complete(struct device *dev); #ifdef CONFIG_PM_SLEEP diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 360094692d29e3b94d744ca641aff4f66989d0b2..fbbc3ed143f27d337944dfe1736df32e236501aa 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -675,6 +675,8 @@ static int rpm_suspend(struct device *dev, int rpmflags) if (retval) goto fail; + dev_pm_enable_wake_irq_complete(dev); + no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); @@ -720,7 +722,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) return retval; fail: - dev_pm_disable_wake_irq_check(dev); + dev_pm_disable_wake_irq_check(dev, true); __update_runtime_status(dev, RPM_ACTIVE); dev->power.deferred_resume = false; wake_up_all(&dev->power.wait_queue); @@ -903,7 +905,7 @@ static int rpm_resume(struct device *dev, int rpmflags) callback = RPM_GET_CALLBACK(dev, runtime_resume); - dev_pm_disable_wake_irq_check(dev); + dev_pm_disable_wake_irq_check(dev, false); retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_SUSPENDED); diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index 8e021082dba8ce0a3b43c16353e18670bc9fb704..aea690c64e3946d0cf405331ce966ca78ed94ce9 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -145,24 +145,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq) return IRQ_HANDLED; } -/** - * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt - * @dev: Device entry - * @irq: Device wake-up interrupt - * - * Unless your hardware has separate wake-up interrupts in addition - * to the device IO interrupts, you don't need this. - * - * Sets up a threaded interrupt handler for a device that has - * a dedicated wake-up interrupt in addition to the device IO - * interrupt. - * - * The interrupt starts disabled, and needs to be managed for - * the device by the bus code or the device driver using - * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq() - * functions. - */ -int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) +static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag) { struct wake_irq *wirq; int err; @@ -200,7 +183,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) if (err) goto err_free_irq; - wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED; + wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag; return err; @@ -213,8 +196,57 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) return err; } + + +/** + * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt + * @dev: Device entry + * @irq: Device wake-up interrupt + * + * Unless your hardware has separate wake-up interrupts in addition + * to the device IO interrupts, you don't need this. + * + * Sets up a threaded interrupt handler for a device that has + * a dedicated wake-up interrupt in addition to the device IO + * interrupt. + * + * The interrupt starts disabled, and needs to be managed for + * the device by the bus code or the device driver using + * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() + * functions. + */ +int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) +{ + return __dev_pm_set_dedicated_wake_irq(dev, irq, 0); +} EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq); +/** + * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt + * with reverse enable ordering + * @dev: Device entry + * @irq: Device wake-up interrupt + * + * Unless your hardware has separate wake-up interrupts in addition + * to the device IO interrupts, you don't need this. + * + * Sets up a threaded interrupt handler for a device that has a dedicated + * wake-up interrupt in addition to the device IO interrupt. It sets + * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend() + * to enable dedicated wake-up interrupt after running the runtime suspend + * callback for @dev. + * + * The interrupt starts disabled, and needs to be managed for + * the device by the bus code or the device driver using + * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() + * functions. + */ +int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) +{ + return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE); +} +EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse); + /** * dev_pm_enable_wake_irq - Enable device wake-up interrupt * @dev: Device @@ -285,25 +317,56 @@ void dev_pm_enable_wake_irq_check(struct device *dev, return; enable: - enable_irq(wirq->irq); + if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) { + enable_irq(wirq->irq); + wirq->status |= WAKE_IRQ_DEDICATED_ENABLED; + } } /** * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt * @dev: Device + * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE * * Disables wake-up interrupt conditionally based on status. * Should be only called from rpm_suspend() and rpm_resume() path. */ -void dev_pm_disable_wake_irq_check(struct device *dev) +void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable) { struct wake_irq *wirq = dev->power.wakeirq; if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK)) return; - if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) + if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) + return; + + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) { + wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED; disable_irq_nosync(wirq->irq); + } +} + +/** + * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before + * @dev: Device using the wake IRQ + * + * Enable wake IRQ conditionally based on status, mainly used if want to + * enable wake IRQ after running ->runtime_suspend() which depends on + * WAKE_IRQ_DEDICATED_REVERSE. + * + * Should be only called from rpm_suspend() path. + */ +void dev_pm_enable_wake_irq_complete(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK)) + return; + + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED && + wirq->status & WAKE_IRQ_DEDICATED_REVERSE) + enable_irq(wirq->irq); } /** @@ -320,7 +383,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq) if (device_may_wakeup(wirq->dev)) { if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && - !pm_runtime_status_suspended(wirq->dev)) + !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) enable_irq(wirq->irq); enable_irq_wake(wirq->irq); @@ -343,7 +406,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq) disable_irq_wake(wirq->irq); if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && - !pm_runtime_status_suspended(wirq->dev)) + !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) disable_irq_nosync(wirq->irq); } } diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index e12b754e6398d8cecf2ebaf6cd1444e737341694..60d3c5f09ad675f7a7fd3e2e7dd1242d949b2d34 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c @@ -191,7 +191,13 @@ static int mcf_edma_probe(struct platform_device *pdev) return -EINVAL; } - chans = pdata->dma_channels; + if (!pdata->dma_channels) { + dev_info(&pdev->dev, "setting default channel number to 64"); + chans = 64; + } else { + chans = pdata->dma_channels; + } + len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans; mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); if (!mcf_edma) @@ -203,11 +209,6 @@ static int mcf_edma_probe(struct platform_device *pdev) mcf_edma->drvdata = &mcf_data; mcf_edma->big_endian = 1; - if (!mcf_edma->n_chans) { - dev_info(&pdev->dev, "setting default channel number to 64"); - mcf_edma->n_chans = 64; - } - mutex_init(&mcf_edma->fsl_edma_mutex); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6f697b3f2c1845c5ec2799c79db54118862df075..627aa9e9bc12a72394342af9d8c28320f2cf1ba6 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -403,6 +403,12 @@ enum desc_status { * of a channel can be BUSY at any time. */ BUSY, + /* + * Pause was called while descriptor was BUSY. Due to hardware + * limitations, only termination is possible for descriptors + * that have been paused. + */ + PAUSED, /* * Sitting on the channel work_list but xfer done * by PL330 core @@ -2043,7 +2049,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) list_for_each_entry(desc, &pch->work_list, node) { /* If already submitted */ - if (desc->status == BUSY) + if (desc->status == BUSY || desc->status == PAUSED) continue; ret = pl330_submit_req(pch->thread, desc); @@ -2328,6 +2334,7 @@ static int pl330_pause(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); struct pl330_dmac *pl330 = pch->dmac; + struct dma_pl330_desc *desc; unsigned long flags; pm_runtime_get_sync(pl330->ddma.dev); @@ -2337,6 +2344,10 @@ static int pl330_pause(struct dma_chan *chan) _stop(pch->thread); spin_unlock(&pl330->lock); + list_for_each_entry(desc, &pch->work_list, node) { + if (desc->status == BUSY) + desc->status = PAUSED; + } spin_unlock_irqrestore(&pch->lock, flags); pm_runtime_mark_last_busy(pl330->ddma.dev); pm_runtime_put_autosuspend(pl330->ddma.dev); @@ -2427,7 +2438,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, else if (running && desc == running) transferred = pl330_get_current_xferred_count(pch, desc); - else if (desc->status == BUSY) + else if (desc->status == BUSY || desc->status == PAUSED) /* * Busy but not running means either just enqueued, * or finished and not yet marked done @@ -2444,6 +2455,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, case DONE: ret = DMA_COMPLETE; break; + case PAUSED: + ret = DMA_PAUSED; + break; case PREP: case BUSY: ret = DMA_IN_PROGRESS; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index 0eb881f2e0d61324e8bf8ab277f8cd2d99b48163..8e729ef85b13e3067a2eb785003fc03ca445c2bd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -354,8 +354,11 @@ void dpp3_set_cursor_attributes( int cur_rom_en = 0; if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || - color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) - cur_rom_en = 1; + color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } + } REG_UPDATE_3(CURSOR0_CONTROL, CUR0_MODE, color_format, diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index b7bb5610dfe211e7bd04ab5a13d7518aa67b762d..e8f07305e279a6aa0b7e18920310ae45fbac6613 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -614,7 +614,13 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) int ret; if (obj->import_attach) { + /* Reset both vm_ops and vm_private_data, so we don't end up with + * vm_ops pointing to our implementation if the dma-buf backend + * doesn't set those fields. + */ vma->vm_private_data = NULL; + vma->vm_ops = NULL; + ret = dma_buf_mmap(obj->dma_buf, vma, 0); /* Drop the reference drm_gem_mmap_obj() acquired.*/ diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index b8884272d65e03368257ec55e24bae3a4ccc7079..d2cefca120c4b3052c0c34d7666ab5631cf5e5d5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -947,7 +947,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) /* Determine display colour depth for everything except LVDS now, * DP requires this before mode_valid() is called. */ - if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode) + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) nouveau_connector_detect_depth(connector); /* Find the native mode if this is a digital panel, if we didn't diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h index 32bbddc0993e8a455b2583161cb82bf2bd2d16ca..679aff79f4d6b002f27dce6fbcaf560d812d4fcc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h @@ -123,6 +123,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *); extern const struct gf100_grctx_func gk110_grctx; void gk110_grctx_generate_r419eb0(struct gf100_gr *); +void gk110_grctx_generate_r419f78(struct gf100_gr *); extern const struct gf100_grctx_func gk110b_grctx; extern const struct gf100_grctx_func gk208_grctx; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index 304e9d268bad42e8a8be6831bb80cc78f5f6caad..f894f82548242fa97f085b7d12b42ca446372a91 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c @@ -916,7 +916,9 @@ static void gk104_grctx_generate_r419f78(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; - nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000); + + /* bit 3 set disables loads in fp helper invocations, we need it enabled */ + nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c index 86547cfc38dce6583efd79205c543fae6385096b..e88740d4e54d446ef3f2b978cead12d05733c890 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c @@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr) nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000); } +void +gk110_grctx_generate_r419f78(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + /* bit 3 set disables loads in fp helper invocations, we need it enabled */ + nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000); +} + const struct gf100_grctx_func gk110_grctx = { .main = gf100_grctx_generate_main, @@ -852,4 +861,5 @@ gk110_grctx = { .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, .r419eb0 = gk110_grctx_generate_r419eb0, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c index ebb947bd1446bfda0c9574ef98f40fd0cc483a2c..086e4d49e11211653ff2011ecd8f77671da1bc20 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c @@ -101,4 +101,5 @@ gk110b_grctx = { .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, .r419eb0 = gk110_grctx_generate_r419eb0, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c index 4d40512b5c998588b532f14c708210640e0e1eda..0bf438c3f7cbc9fe2cde323c44ae6a162d6081cf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c @@ -566,4 +566,5 @@ gk208_grctx = { .dist_skip_table = gf117_grctx_generate_dist_skip_table, .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c index 0b3964e6b36e2b773e715f33ffd4cb5272161e63..acdf0932a99e187adbb0863f9acfb0b795255987 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c @@ -991,4 +991,5 @@ gm107_grctx = { .r406500 = gm107_grctx_generate_r406500, .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r419e00 = gm107_grctx_generate_r419e00, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index dca4dfdd332d32dbf107df2ea993780ec0ccdbb9..8fe3be20af62f9fbbf56c29371a4f08aa3ba3c57 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -674,7 +674,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, if (bo->pin_count) { *locked = false; - *busy = false; + if (busy) + *busy = false; return false; } diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c index 2c5b853d6c7fc3da69913c2ca4c86b3df1d0f9d2..fc3d9a9159f2e5fd317302e11b37c149ea1f27fd 100644 --- a/drivers/hwmon/pmbus/bel-pfe.c +++ b/drivers/hwmon/pmbus/bel-pfe.c @@ -17,12 +17,13 @@ enum chips {pfe1100, pfe3000}; /* - * Disable status check for pfe3000 devices, because some devices report - * communication error (invalid command) for VOUT_MODE command (0x20) - * although correct VOUT_MODE (0x16) is returned: it leads to incorrect - * exponent in linear mode. + * Disable status check because some devices report communication error + * (invalid command) for VOUT_MODE command (0x20) although the correct + * VOUT_MODE (0x16) is returned: it leads to incorrect exponent in linear + * mode. + * This affects both pfe3000 and pfe1100. */ -static struct pmbus_platform_data pfe3000_plat_data = { +static struct pmbus_platform_data pfe_plat_data = { .flags = PMBUS_SKIP_STATUS_CHECK, }; @@ -94,16 +95,15 @@ static int pfe_pmbus_probe(struct i2c_client *client) int model; model = (int)i2c_match_id(pfe_device_id, client)->driver_data; + client->dev.platform_data = &pfe_plat_data; /* * PFE3000-12-069RA devices may not stay in page 0 during device * probe which leads to probe failure (read status word failed). * So let's set the device to page 0 at the beginning. */ - if (model == pfe3000) { - client->dev.platform_data = &pfe3000_plat_data; + if (model == pfe3000) i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0); - } return pmbus_do_probe(client, &pfe_driver_info[model]); } diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index e3f507771f17e6d38aa0c5f0dd0d4ac42ef56651..829077bcd09d6147f0879585719687988e5c6728 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -263,7 +263,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, platform_set_drvdata(pdev, indio_dev); state->ec = ec->ec_dev; - state->msg = devm_kzalloc(&pdev->dev, + state->msg = devm_kzalloc(&pdev->dev, sizeof(*state->msg) + max((u16)sizeof(struct ec_params_motion_sense), state->ec->max_response), GFP_KERNEL); if (!state->msg) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 4b41f35668b2027ce841577ea10e768dcf2ef1e2..c74868f016379c062b40f549ebdd1f675ad397d5 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -12348,6 +12348,7 @@ static void free_cntrs(struct hfi1_devdata *dd) if (dd->synth_stats_timer.function) del_timer_sync(&dd->synth_stats_timer); + cancel_work_sync(&dd->update_cntr_work); ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { kfree(ppd->cntrs); diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h index fa09d511a8eda23cfc65ac42c1a998659584d559..baf31258f5c900654565da9f9659bee9e900a412 100644 --- a/drivers/isdn/mISDN/dsp.h +++ b/drivers/isdn/mISDN/dsp.h @@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp); extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id); extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb); extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb); -extern void dsp_cmx_send(void *arg); +extern void dsp_cmx_send(struct timer_list *arg); extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb); extern int dsp_cmx_del_conf_member(struct dsp *dsp); extern int dsp_cmx_del_conf(struct dsp_conf *conf); diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c index 6d2088fbaf69cc3f208114c05d17f7e815c80dec..1b73af5013976378270304311726b4b3b31a676f 100644 --- a/drivers/isdn/mISDN/dsp_cmx.c +++ b/drivers/isdn/mISDN/dsp_cmx.c @@ -1625,7 +1625,7 @@ static u16 dsp_count; /* last sample count */ static int dsp_count_valid; /* if we have last sample count */ void -dsp_cmx_send(void *arg) +dsp_cmx_send(struct timer_list *arg) { struct dsp_conf *conf; struct dsp_conf_member *member; diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 038e72a84b3344eb0ff1c91936dfcbfb2f259fea..5b954012e39485afe1416d3ce06a3526b8979911 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -1200,7 +1200,7 @@ static int __init dsp_init(void) } /* set sample timer */ - timer_setup(&dsp_spl_tl, (void *)dsp_cmx_send, 0); + timer_setup(&dsp_spl_tl, dsp_cmx_send, 0); dsp_spl_tl.expires = jiffies + dsp_tics; dsp_spl_jiffies = dsp_spl_tl.expires; add_timer(&dsp_spl_tl); diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index fb96bb76eefbe3860f017eb5a1437ace0dcd47a6..c4baf8afbf8dc7d85e8fe086076f04e1905a418e 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -339,13 +339,7 @@ static void moxart_transfer_pio(struct moxart_host *host) return; } for (len = 0; len < remain && len < host->fifo_width;) { - /* SCR data must be read in big endian. */ - if (data->mrq->cmd->opcode == SD_APP_SEND_SCR) - *sgp = ioread32be(host->base + - REG_DATA_WINDOW); - else - *sgp = ioread32(host->base + - REG_DATA_WINDOW); + *sgp = ioread32(host->base + REG_DATA_WINDOW); sgp++; len += 4; } diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index d5813b9abc8e7092a39727a977a7dbb29c85095a..9f934466dd9756c24f1659f4d8f431c7d1600161 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -136,7 +136,7 @@ static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op, unsigned int i; int ret; - if (op->cs > NAND_MAX_CHIPS) + if (op->cs >= NAND_MAX_CHIPS) return -EINVAL; if (check_only) diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index ee3976b7e197e8a711f226a90733b6fd34e963f1..6bb0fca4a91d01ec3649758b6084a87fb7727880 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1180,7 +1180,6 @@ static int meson_nand_attach_chip(struct nand_chip *nand) struct meson_nfc *nfc = nand_get_controller_data(nand); struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); struct mtd_info *mtd = nand_to_mtd(nand); - int nsectors = mtd->writesize / 1024; int ret; if (!mtd->name) { @@ -1198,7 +1197,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand) nand->options |= NAND_NO_SUBPAGE_WRITE; ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps, - mtd->oobsize - 2 * nsectors); + mtd->oobsize - 2); if (ret) { dev_err(nfc->dev, "failed to ECC init\n"); return -EINVAL; diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 4b799521a427a6e337f9cde788c8d3826f29de7b..dad17fa0b514e2b41b486c63ce482021b3ab58d2 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -174,17 +174,17 @@ static void elm_load_syndrome(struct elm_info *info, switch (info->bch_type) { case BCH8_ECC: /* syndrome fragment 0 = ecc[9-12B] */ - val = cpu_to_be32(*(u32 *) &ecc[9]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[5-8B] */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[5]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]); elm_write_reg(info, offset, val); /* syndrome fragment 2 = ecc[1-4B] */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[1]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]); elm_write_reg(info, offset, val); /* syndrome fragment 3 = ecc[0B] */ @@ -194,35 +194,35 @@ static void elm_load_syndrome(struct elm_info *info, break; case BCH4_ECC: /* syndrome fragment 0 = ecc[20-52b] bits */ - val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) | + val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) | ((ecc[2] & 0xf) << 28); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[0-20b] bits */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12; + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12; elm_write_reg(info, offset, val); break; case BCH16_ECC: - val = cpu_to_be32(*(u32 *) &ecc[22]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[18]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[14]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[10]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[6]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[2]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16; + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16; elm_write_reg(info, offset, val); break; default: diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 6fe7bd2a94d2847e26bb4ee9d7f248117dbbcc8f..daa49c0603681a2210369716349bf9b64de1c14e 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); u8 mbf = 0; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf); + struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf); switch (status & STATUS_ECC_MASK) { case STATUS_ECC_NO_BITFLIPS: @@ -92,7 +92,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, if (spi_mem_exec_op(spinand->spimem, &op)) return nanddev_get_ecc_requirements(nand)->strength; - mbf >>= 4; + mbf = *(spinand->scratchbuf) >> 4; if (WARN_ON(mbf > nanddev_get_ecc_requirements(nand)->strength || !mbf)) return nanddev_get_ecc_requirements(nand)->strength; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a260740269e9f37ebadaa865c4a859efcf9f5599..bcb019121d8352e253aa6427457f09f5137e6dbd 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4920,7 +4920,9 @@ void bond_setup(struct net_device *bond_dev) bond_dev->hw_features = BOND_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; #ifdef CONFIG_XFRM_OFFLOAD diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 1ec1709446bab2415a55a731e5b8c2cdf1b7ae47..47f8f66cf7ecdc538ca287631e2251f055fb6832 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -71,6 +71,8 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev); static void hclge_sync_mac_table(struct hclge_dev *hdev); static void hclge_restore_hw_table(struct hclge_dev *hdev); static void hclge_sync_promisc_mode(struct hclge_dev *hdev); +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, + int wait_cnt); static struct hnae3_ae_algo ae_algo; @@ -6558,6 +6560,8 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) { +#define HCLGE_LINK_STATUS_WAIT_CNT 3 + struct hclge_desc desc; struct hclge_config_mac_mode_cmd *req = (struct hclge_config_mac_mode_cmd *)desc.data; @@ -6582,9 +6586,15 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) + if (ret) { dev_err(&hdev->pdev->dev, "mac enable fail, ret =%d.\n", ret); + return; + } + + if (!enable) + hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, + HCLGE_LINK_STATUS_WAIT_CNT); } static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, @@ -6647,10 +6657,9 @@ static void hclge_phy_link_status_wait(struct hclge_dev *hdev, } while (++i < HCLGE_PHY_LINK_STATUS_NUM); } -static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, + int wait_cnt) { -#define HCLGE_MAC_LINK_STATUS_NUM 100 - int link_status; int i = 0; int ret; @@ -6663,13 +6672,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) return 0; msleep(HCLGE_LINK_STATUS_MS); - } while (++i < HCLGE_MAC_LINK_STATUS_NUM); + } while (++i < wait_cnt); return -EBUSY; } static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, bool is_phy) { +#define HCLGE_MAC_LINK_STATUS_NUM 100 + int link_ret; link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; @@ -6677,7 +6688,8 @@ static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, if (is_phy) hclge_phy_link_status_wait(hdev, link_ret); - return hclge_mac_link_status_wait(hdev, link_ret); + return hclge_mac_link_status_wait(hdev, link_ret, + HCLGE_MAC_LINK_STATUS_NUM); } static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 7fe2e47dc83dc8d7cdd1f9f3997fb20a09f6a226..84da6ccaf33950f48826c2952150e5f464b9b7de 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -929,12 +929,22 @@ static int ibmvnic_login(struct net_device *netdev) static void release_login_buffer(struct ibmvnic_adapter *adapter) { + if (!adapter->login_buf) + return; + + dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token, + adapter->login_buf_sz, DMA_TO_DEVICE); kfree(adapter->login_buf); adapter->login_buf = NULL; } static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) { + if (!adapter->login_rsp_buf) + return; + + dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token, + adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); kfree(adapter->login_rsp_buf); adapter->login_rsp_buf = NULL; } @@ -3861,11 +3871,14 @@ static int send_login(struct ibmvnic_adapter *adapter) if (rc) { adapter->login_pending = false; netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); - goto buf_rsp_map_failed; + goto buf_send_failed; } return 0; +buf_send_failed: + dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size, + DMA_FROM_DEVICE); buf_rsp_map_failed: kfree(login_rsp_buffer); adapter->login_rsp_buf = NULL; @@ -4430,6 +4443,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, int num_tx_pools; int num_rx_pools; u64 *size_array; + u32 rsp_len; int i; /* CHECK: Test/set of login_pending does not need to be atomic @@ -4441,11 +4455,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, } adapter->login_pending = false; - dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, - DMA_TO_DEVICE); - dma_unmap_single(dev, adapter->login_rsp_buf_token, - adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); - /* If the number of queues requested can't be allocated by the * server, the login response will return with code 1. We will need * to resend the login buffer with fewer queues requested. @@ -4481,6 +4490,23 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, ibmvnic_reset(adapter, VNIC_RESET_FATAL); return -EIO; } + + rsp_len = be32_to_cpu(login_rsp->len); + if (be32_to_cpu(login->login_rsp_len) < rsp_len || + rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) || + rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) || + rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) || + rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) { + /* This can happen if a login request times out and there are + * 2 outstanding login requests sent, the LOGIN_RSP crq + * could have been for the older login request. So we are + * parsing the newer response buffer which may be incomplete + */ + dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n"); + ibmvnic_reset(adapter, VNIC_RESET_FATAL); + return -EIO; + } + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); /* variable buffer sizes are not supported, so just read the diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 3094d20297a9758638f43252a3efc41505bddc68..ee9287d7d1d30ee842c24b9cf46724d3c51c3c9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -211,8 +211,7 @@ static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev) host_total_vfs = MLX5_GET(query_esw_functions_out, out, host_params_context.host_total_vfs); kvfree(out); - if (host_total_vfs) - return host_total_vfs; + return host_total_vfs; } done: diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index ed601a7e46a0cdd24d9594caba1c879c352830c9..ac373510b486e2098cd9ce9949e6b55dd45a10fb 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -1115,8 +1115,6 @@ static struct phy_driver at803x_driver[] = { .flags = PHY_POLL_CABLE_TEST, .config_init = at803x_config_init, .link_change_notify = at803x_link_change_notify, - .set_wol = at803x_set_wol, - .get_wol = at803x_get_wol, .suspend = at803x_suspend, .resume = at803x_resume, /* PHY_BASIC_FEATURES */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 0e70877c932c7129ced008834cda3ed72d2dc347..191bf0df146619332f3a2d403fc0451ab0c5a47f 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1604,7 +1604,7 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, if (zerocopy) return false; - if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + + if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) return false; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 935cd296887f27db12eb34146a090a6c71f5d079..9f3446d6dde7656a2bbc5167fa8fc68911540e6f 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -604,9 +604,23 @@ static const struct usb_device_id products[] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x8005, /* A-300 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8006, /* B-500/SL-5600 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, @@ -614,6 +628,13 @@ static const struct usb_device_id products[] = { .idProduct = 0x8007, /* C-700 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8007, /* C-700 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 43d70348343b2a79f18a45645484d2f17530c2ec..481a41d879b53e6802e47b4ece844864595f9698 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1738,6 +1738,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) } else if (!info->in || !info->out) status = usbnet_get_endpoints (dev, udev); else { + u8 ep_addrs[3] = { + info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0 + }; + dev->in = usb_rcvbulkpipe (xdev, info->in); dev->out = usb_sndbulkpipe (xdev, info->out); if (!(info->flags & FLAG_NO_SETINT)) @@ -1747,6 +1751,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) else status = 0; + if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs)) + status = -EINVAL; } if (status >= 0 && dev->status) status = init_status (dev, udev); diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 7984f2157d222dbe2971702221dac46b2a370721..df3617c4c44e8ff41b2736f5ac6188b02ca3f92e 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -289,9 +289,23 @@ static const struct usb_device_id products [] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x8005, /* A-300 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8006, /* B-500/SL-5600 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, @@ -299,6 +313,13 @@ static const struct usb_device_id products [] = { .idProduct = 0x8007, /* C-700 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8007, /* C-700 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c index 5bf7822c53f18ab75f1b256a3e64ab14f161c825..0ba714ca5185cd94124bd121a49117b79b9898bc 100644 --- a/drivers/net/wireguard/allowedips.c +++ b/drivers/net/wireguard/allowedips.c @@ -6,7 +6,7 @@ #include "allowedips.h" #include "peer.h" -enum { MAX_ALLOWEDIPS_BITS = 128 }; +enum { MAX_ALLOWEDIPS_DEPTH = 129 }; static struct kmem_cache *node_cache; @@ -42,7 +42,7 @@ static void push_rcu(struct allowedips_node **stack, struct allowedips_node __rcu *p, unsigned int *len) { if (rcu_access_pointer(p)) { - if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_BITS)) + if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_DEPTH)) return; stack[(*len)++] = rcu_dereference_raw(p); } @@ -55,7 +55,7 @@ static void node_free_rcu(struct rcu_head *rcu) static void root_free_rcu(struct rcu_head *rcu) { - struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { + struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { container_of(rcu, struct allowedips_node, rcu) }; unsigned int len = 1; @@ -68,7 +68,7 @@ static void root_free_rcu(struct rcu_head *rcu) static void root_remove_peer_lists(struct allowedips_node *root) { - struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { root }; + struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { root }; unsigned int len = 1; while (len > 0 && (node = stack[--len])) { diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c index 41db10f9be4985619b21a0922159ad91f87a803b..2c9eec24eec45a157cf4cf1179871c3577e27210 100644 --- a/drivers/net/wireguard/selftest/allowedips.c +++ b/drivers/net/wireguard/selftest/allowedips.c @@ -593,16 +593,20 @@ bool __init wg_allowedips_selftest(void) wg_allowedips_remove_by_peer(&t, a, &mutex); test_negative(4, a, 192, 168, 0, 1); - /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node + /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_DEPTH) in free_node * if something goes wrong. */ - for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) { - part = cpu_to_be64(~(1LLU << (i % 64))); - memset(&ip, 0xff, 16); - memcpy((u8 *)&ip + (i < 64) * 8, &part, 8); + for (i = 0; i < 64; ++i) { + part = cpu_to_be64(~0LLU << i); + memset(&ip, 0xff, 8); + memcpy((u8 *)&ip + 8, &part, 8); + wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); + memcpy(&ip, &part, 8); + memset((u8 *)&ip + 8, 0, 8); wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); } - + memset(&ip, 0, 16); + wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); wg_allowedips_free(&t, &mutex); wg_allowedips_init(&t); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 81ff3b4c6c1b38098c0a0b8309f6e2d95c21c679..dc1191aa0443e11ff62f11d3f1a791a0c53829b6 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -160,9 +160,9 @@ static void mt76_init_stream_cap(struct mt76_phy *phy, void mt76_set_stream_caps(struct mt76_phy *phy, bool vht) { - if (phy->dev->cap.has_2ghz) + if (phy->cap.has_2ghz) mt76_init_stream_cap(phy, &phy->sband_2g.sband, false); - if (phy->dev->cap.has_5ghz) + if (phy->cap.has_5ghz) mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht); } EXPORT_SYMBOL_GPL(mt76_set_stream_caps); @@ -463,13 +463,13 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, dev_set_drvdata(dev->dev, dev); mt76_phy_init(dev, hw); - if (dev->cap.has_2ghz) { + if (phy->cap.has_2ghz) { ret = mt76_init_sband_2g(dev, rates, n_rates); if (ret) return ret; } - if (dev->cap.has_5ghz) { + if (phy->cap.has_5ghz) { ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 5a8060790a61f350284b532f42b10789ccb2fe57..16e65020a242d4d189e8088da72d70324e92dde9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -561,6 +561,7 @@ struct mt76_phy { struct mt76_channel_state *chan_state; ktime_t survey_time; + struct mt76_hw_cap cap; struct mt76_sband sband_2g; struct mt76_sband sband_5g; @@ -630,7 +631,6 @@ struct mt76_dev { struct debugfs_blob_wrapper eeprom; struct debugfs_blob_wrapper otp; - struct mt76_hw_cap cap; struct mt76_rate_power rate_power; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c index 01f1e0da5ee1e5dd3a7f5ceb0c71de17b7943f4f..a6df733aca4920be53c7f609c95f597c19e45b60 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c @@ -170,7 +170,7 @@ int mt7603_eeprom_init(struct mt7603_dev *dev) } eeprom = (u8 *)dev->mt76.eeprom.data; - dev->mt76.cap.has_2ghz = true; + dev->mphy.cap.has_2ghz = true; memcpy(dev->mt76.macaddr, eeprom + MT_EE_MAC_ADDR, ETH_ALEN); /* Check for 1SS devices */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index bf8ae14121dba5b74a46111169999cf84ae21266..637ef0882436caada555c03dda7a2dea62a7dd78 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -202,7 +202,7 @@ int mt7615_dma_init(struct mt7615_dev *dev) int ret; /* Increase buffer size to receive large VHT MPDUs */ - if (dev->mt76.cap.has_5ghz) + if (dev->mphy.cap.has_5ghz) rx_buf_size *= 2; mt76_dma_attach(&dev->mt76); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index e9cdcdc54d5c3e2e08a118d14cf63fba9b4469b5..85f56487feff275864aeb76cbd0705f3d90618aa 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -100,20 +100,20 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev) if (is_mt7663(&dev->mt76)) { /* dual band */ - dev->mt76.cap.has_2ghz = true; - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_2ghz = true; + dev->mphy.cap.has_5ghz = true; return; } if (is_mt7622(&dev->mt76)) { /* 2GHz only */ - dev->mt76.cap.has_2ghz = true; + dev->mphy.cap.has_2ghz = true; return; } if (is_mt7611(&dev->mt76)) { /* 5GHz only */ - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_5ghz = true; return; } @@ -121,17 +121,17 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev) eeprom[MT_EE_WIFI_CONF]); switch (val) { case MT_EE_5GHZ: - dev->mt76.cap.has_5ghz = true; - break; - case MT_EE_2GHZ: - dev->mt76.cap.has_2ghz = true; + dev->mphy.cap.has_5ghz = true; break; case MT_EE_DBDC: dev->dbdc_support = true; - /* fall through */ + fallthrough; + case MT_EE_2GHZ: + dev->mphy.cap.has_2ghz = true; + break; default: - dev->mt76.cap.has_2ghz = true; - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_2ghz = true; + dev->mphy.cap.has_5ghz = true; break; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 9087607b621e8d30e2800306d586582b60c86657..ebf4c96532d3129a16b0da91a8ec9511ae756324 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -52,15 +52,15 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev) mt76x02_eeprom_parse_hw_cap(dev); dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n", - dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz); + dev->mphy.cap.has_2ghz, dev->mphy.cap.has_5ghz); if (dev->no_2ghz) { - dev->mt76.cap.has_2ghz = false; + dev->mphy.cap.has_2ghz = false; dev_dbg(dev->mt76.dev, "mask out 2GHz support\n"); } if (is_mt7630(dev)) { - dev->mt76.cap.has_5ghz = false; + dev->mphy.cap.has_5ghz = false; dev_dbg(dev->mt76.dev, "mask out 5GHz support\n"); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c index d78866bf41ba37629c859a6dd5c1d81e4014f8d6..0bac39bf3b66d79672f82205d1475fe27bfe2a0d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c @@ -245,7 +245,7 @@ int mt76x0_register_device(struct mt76x02_dev *dev) if (ret) return ret; - if (dev->mt76.cap.has_5ghz) { + if (dev->mphy.cap.has_5ghz) { struct ieee80211_supported_band *sband; sband = &dev->mphy.sband_5g.sband; @@ -253,7 +253,7 @@ int mt76x0_register_device(struct mt76x02_dev *dev) mt76x0_init_txpower(dev, sband); } - if (dev->mt76.cap.has_2ghz) + if (dev->mphy.cap.has_2ghz) mt76x0_init_txpower(dev, &dev->mphy.sband_2g.sband); mt76x02_init_debugfs(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 3de33aadf7941bf6ceafc90e66551c2aff56a4de..e91c314cdfac569349a1638d9f8176930987c97f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -447,11 +447,11 @@ static void mt76x0_phy_ant_select(struct mt76x02_dev *dev) else coex3 |= BIT(4); coex3 |= BIT(3); - if (dev->mt76.cap.has_2ghz) + if (dev->mphy.cap.has_2ghz) wlan |= BIT(6); } else { /* sigle antenna mode */ - if (dev->mt76.cap.has_5ghz) { + if (dev->mphy.cap.has_5ghz) { coex3 |= BIT(3) | BIT(4); } else { wlan |= BIT(6); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c index c54c50fd639a9b4debc4aaadbb693a938f3b5c20..0acabba2d1a50c5653f5866108998dac9f046632 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c @@ -75,14 +75,14 @@ void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev) switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) { case BOARD_TYPE_5GHZ: - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_5ghz = true; break; case BOARD_TYPE_2GHZ: - dev->mt76.cap.has_2ghz = true; + dev->mphy.cap.has_2ghz = true; break; default: - dev->mt76.cap.has_2ghz = true; - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_2ghz = true; + dev->mphy.cap.has_5ghz = true; break; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c index e4c5f968f706d8bf2710c6c4a7aafec2c7c90280..5f6c527611f206b6390a7bb7f732e19d0a163931 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c @@ -57,14 +57,14 @@ static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev) val = FIELD_GET(MT_EE_WIFI_CONF_BAND_SEL, val); switch (val) { case MT_EE_5GHZ: - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_5ghz = true; break; case MT_EE_2GHZ: - dev->mt76.cap.has_2ghz = true; + dev->mphy.cap.has_2ghz = true; break; default: - dev->mt76.cap.has_2ghz = true; - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_2ghz = true; + dev->mphy.cap.has_5ghz = true; break; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 8f01ca1694bca564faef7d3c776b11f786bf9cf8..99683688a8363477e1f58235f28845c8fad7eaa3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -528,10 +528,9 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy) { struct ieee80211_sband_iftype_data *data; struct ieee80211_supported_band *band; - struct mt76_dev *mdev = &phy->dev->mt76; int n; - if (mdev->cap.has_2ghz) { + if (phy->mt76->cap.has_2ghz) { data = phy->iftype[NL80211_BAND_2GHZ]; n = mt7915_init_he_caps(phy, NL80211_BAND_2GHZ, data); @@ -540,7 +539,7 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy) band->n_iftype_data = n; } - if (mdev->cap.has_5ghz) { + if (phy->mt76->cap.has_5ghz) { data = phy->iftype[NL80211_BAND_5GHZ]; n = mt7915_init_he_caps(phy, NL80211_BAND_5GHZ, data); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 29156e635ea0ca8529f3fd595951ee710727f010..61c875ab590f0040b2e452b6b1609d1b95ed82ea 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -989,6 +989,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) goto out_cleanup_connect_q; if (!new) { + nvme_start_freeze(&ctrl->ctrl); nvme_start_queues(&ctrl->ctrl); if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { /* @@ -997,6 +998,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) * to be safe. */ ret = -ENODEV; + nvme_unfreeze(&ctrl->ctrl); goto out_wait_freeze_timed_out; } blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, @@ -1042,7 +1044,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { if (ctrl->ctrl.queue_count > 1) { - nvme_start_freeze(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 167bd7ddd0efe1ce660ff27d432e0c36bc08914f..0bee26791ebdc2cf69bbc079e212ddf8e4d7de94 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1859,6 +1859,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) goto out_cleanup_connect_q; if (!new) { + nvme_start_freeze(ctrl); nvme_start_queues(ctrl); if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { /* @@ -1867,6 +1868,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) * to be safe. */ ret = -ENODEV; + nvme_unfreeze(ctrl); goto out_wait_freeze_timed_out; } blk_mq_update_nr_hw_queues(ctrl->tagset, @@ -1989,7 +1991,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, if (ctrl->queue_count <= 1) return; nvme_stop_admin_queue(ctrl); - nvme_start_freeze(ctrl); nvme_stop_queues(ctrl); nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index b61acbb09be3ba1a0091fa3b5bc49200e149311e..d323f9985c482759dde3c82e9db53df352e6c4c0 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -534,8 +534,7 @@ static void zfcp_fc_adisc_handler(void *data) /* re-init to undo drop from zfcp_fc_adisc() */ port->d_id = ntoh24(adisc_resp->adisc_port_id); - /* port is good, unblock rport without going through erp */ - zfcp_scsi_schedule_rport_register(port); + /* port is still good, nothing to do */ out: atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); put_device(&port->dev); @@ -595,9 +594,6 @@ void zfcp_fc_link_test_work(struct work_struct *work) int retval; set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */ - get_device(&port->dev); - port->rport_task = RPORT_DEL; - zfcp_scsi_rport_work(&port->rport_work); /* only issue one test command at one time per port */ if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 3317a02bcc170da0d87617eccbe575a0fddfda07..0e0a19030c35beb3da09fcc93eae8dbb37f68d65 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -797,19 +797,19 @@ static void sdw_modify_slave_status(struct sdw_slave *slave, if (status == SDW_SLAVE_UNATTACHED) { dev_dbg(&slave->dev, - "%s: initializing completion for Slave %d\n", + "%s: initializing enumeration and init completion for Slave %d\n", __func__, slave->dev_num); - init_completion(&slave->enumeration_complete); - init_completion(&slave->initialization_complete); + reinit_completion(&slave->enumeration_complete); + reinit_completion(&slave->initialization_complete); } else if ((status == SDW_SLAVE_ATTACHED) && (slave->status == SDW_SLAVE_UNATTACHED)) { dev_dbg(&slave->dev, - "%s: signaling completion for Slave %d\n", + "%s: signaling enumeration completion for Slave %d\n", __func__, slave->dev_num); - complete(&slave->enumeration_complete); + complete_all(&slave->enumeration_complete); } slave->status = status; mutex_unlock(&slave->bus->bus_lock); @@ -1734,8 +1734,25 @@ int sdw_handle_slave_status(struct sdw_bus *bus, if (ret) dev_err(slave->bus->dev, "Update Slave status failed:%d\n", ret); - if (attached_initializing) - complete(&slave->initialization_complete); + if (attached_initializing) { + dev_dbg(&slave->dev, + "%s: signaling initialization completion for Slave %d\n", + __func__, slave->dev_num); + + complete_all(&slave->initialization_complete); + + /* + * If the manager became pm_runtime active, the peripherals will be + * restarted and attach, but their pm_runtime status may remain + * suspended. If the 'update_slave_status' callback initiates + * any sort of deferred processing, this processing would not be + * cancelled on pm_runtime suspend. + * To avoid such zombie states, we queue a request to resume. + * This would be a no-op in case the peripheral was being resumed + * by e.g. the ALSA/ASoC framework. + */ + pm_request_resume(&slave->dev); + } } return ret; diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c index c9545a4eff664df1c46ebfd3cf2876a046059445..02446092520c896190fefd6ed7065a931de2e2d6 100644 --- a/drivers/usb/common/usb-conn-gpio.c +++ b/drivers/usb/common/usb-conn-gpio.c @@ -42,6 +42,7 @@ struct usb_conn_info { struct power_supply_desc desc; struct power_supply *charger; + bool initial_detection; }; /* @@ -86,11 +87,13 @@ static void usb_conn_detect_cable(struct work_struct *work) dev_dbg(info->dev, "role %d/%d, gpios: id %d, vbus %d\n", info->last_role, role, id, vbus); - if (info->last_role == role) { + if (!info->initial_detection && info->last_role == role) { dev_warn(info->dev, "repeated role: %d\n", role); return; } + info->initial_detection = false; + if (info->last_role == USB_ROLE_HOST && info->vbus) regulator_disable(info->vbus); @@ -277,6 +280,7 @@ static int usb_conn_probe(struct platform_device *pdev) platform_set_drvdata(pdev, info); /* Perform initial detection */ + info->initial_detection = true; usb_conn_queue_dwork(info, 0); return 0; diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c index 7e4ce0e7e05a778a0c935cc5ffd43680e705c64c..dcc4778d1ae9976f7f8e962e19c780a574b397e5 100644 --- a/drivers/usb/storage/alauda.c +++ b/drivers/usb/storage/alauda.c @@ -318,7 +318,8 @@ static int alauda_get_media_status(struct us_data *us, unsigned char *data) rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, command, 0xc0, 0, 1, data, 2); - usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]); + if (rc == USB_STOR_XFER_GOOD) + usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]); return rc; } @@ -454,9 +455,14 @@ static int alauda_init_media(struct us_data *us) static int alauda_check_media(struct us_data *us) { struct alauda_info *info = (struct alauda_info *) us->extra; - unsigned char status[2]; + unsigned char *status = us->iobuf; + int rc; - alauda_get_media_status(us, status); + rc = alauda_get_media_status(us, status); + if (rc != USB_STOR_XFER_GOOD) { + status[0] = 0xF0; /* Pretend there's no media */ + status[1] = 0; + } /* Check for no media or door open */ if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7d9b8050b09cd62c0842877930cd20f7c7a9eb4d..9cdf50e2484e1ef73722895e1f464cdc0f02966d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4138,8 +4138,11 @@ static noinline int find_free_extent(struct btrfs_root *root, ret = 0; } - if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) + if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) { + if (!cache_block_group_error) + cache_block_group_error = -EIO; goto loop; + } bg_ret = NULL; ret = do_allocation(block_group, &ffe_ctl, &bg_ret); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5fc65a780f83bae66b3035dcec8b2be7cbf2114d..0e266772beaefb43cae6fc999d6b0c7d37bc1481 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4034,11 +4034,12 @@ int btree_write_cache_pages(struct address_space *mapping, free_extent_buffer(eb); /* - * the filesystem may choose to bump up nr_to_write. + * The filesystem may choose to bump up nr_to_write. * We have to make sure to honor the new nr_to_write - * at any time + * at any time. */ - nr_to_write_done = wbc->nr_to_write <= 0; + nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE && + wbc->nr_to_write <= 0); } pagevec_release(&pvec); cond_resched(); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 87a9e9096421accd243912cce5d3609da4809f06..df1ecb8bfebf7071c9f7bc50893fade5f04e9675 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -4511,7 +4511,7 @@ static void delayed_work(struct work_struct *work) dout("mdsc delayed_work\n"); - if (mdsc->stopping) + if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED) return; mutex_lock(&mdsc->mutex); @@ -4701,7 +4701,7 @@ void send_flush_mdlog(struct ceph_mds_session *s) void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) { dout("pre_umount\n"); - mdsc->stopping = 1; + mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN; ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true); ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index a92e42e8a9f82f6194f940c2342522acaba29e7b..1c958510f00f59eca3497e69e75ad01c269ef907 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -372,6 +372,11 @@ struct cap_wait { int want; }; +enum { + CEPH_MDSC_STOPPING_BEGIN = 1, + CEPH_MDSC_STOPPING_FLUSHED = 2, +}; + /* * mds client state */ diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 08c8d34c9809120426e9879be89f49968b7c92da..f2aff97348bc9d6529553679e137a3376d036b5a 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -1222,6 +1222,16 @@ static void ceph_kill_sb(struct super_block *s) ceph_mdsc_pre_umount(fsc->mdsc); flush_fs_workqueues(fsc); + /* + * Though the kill_anon_super() will finally trigger the + * sync_filesystem() anyway, we still need to do it here + * and then bump the stage of shutdown to stop the work + * queue as earlier as possible. + */ + sync_filesystem(s); + + fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED; + kill_anon_super(s); fsc->client->extra_mon_dispatch = NULL; diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c index 258b6bb5762a478b650f572780a2c742e69f1e5b..ab091440e8b934464d06c83dff1aee2e6913d7f4 100644 --- a/fs/exfat/balloc.c +++ b/fs/exfat/balloc.c @@ -69,7 +69,7 @@ static int exfat_allocate_bitmap(struct super_block *sb, } sbi->map_sectors = ((need_map_size - 1) >> (sb->s_blocksize_bits)) + 1; - sbi->vol_amap = kmalloc_array(sbi->map_sectors, + sbi->vol_amap = kvmalloc_array(sbi->map_sectors, sizeof(struct buffer_head *), GFP_KERNEL); if (!sbi->vol_amap) return -ENOMEM; @@ -84,7 +84,7 @@ static int exfat_allocate_bitmap(struct super_block *sb, while (j < i) brelse(sbi->vol_amap[j++]); - kfree(sbi->vol_amap); + kvfree(sbi->vol_amap); sbi->vol_amap = NULL; return -EIO; } @@ -138,7 +138,7 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi) for (i = 0; i < sbi->map_sectors; i++) __brelse(sbi->vol_amap[i]); - kfree(sbi->vol_amap); + kvfree(sbi->vol_amap); } int exfat_set_bitmap(struct inode *inode, unsigned int clu) diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 6a61aef49c2a1110f172601b2f66c3f3c6fc235b..db735a0d32fc61bdc5aca064896f2fcec0aec2d7 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -152,7 +152,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent 0); *uni_name.name = 0x0; - exfat_get_uniname_from_ext_entry(sb, &dir, dentry, + exfat_get_uniname_from_ext_entry(sb, &clu, i, uni_name.name); exfat_utf16_to_nls(sb, &uni_name, dir_entry->namebuf.lfn, @@ -214,7 +214,10 @@ static void exfat_free_namebuf(struct exfat_dentry_namebuf *nb) exfat_init_namebuf(nb); } -/* skip iterating emit_dots when dir is empty */ +/* + * Before calling dir_emit*(), sbi->s_lock should be released + * because page fault can occur in dir_emit*(). + */ #define ITER_POS_FILLED_DOTS (2) static int exfat_iterate(struct file *filp, struct dir_context *ctx) { @@ -229,11 +232,10 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx) int err = 0, fake_offset = 0; exfat_init_namebuf(nb); - mutex_lock(&EXFAT_SB(sb)->s_lock); cpos = ctx->pos; if (!dir_emit_dots(filp, ctx)) - goto unlock; + goto out; if (ctx->pos == ITER_POS_FILLED_DOTS) { cpos = 0; @@ -245,16 +247,18 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx) /* name buffer should be allocated before use */ err = exfat_alloc_namebuf(nb); if (err) - goto unlock; + goto out; get_new: + mutex_lock(&EXFAT_SB(sb)->s_lock); + if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode)) goto end_of_dir; err = exfat_readdir(inode, &cpos, &de); if (err) { /* - * At least we tried to read a sector. Move cpos to next sector - * position (should be aligned). + * At least we tried to read a sector. + * Move cpos to next sector position (should be aligned). */ if (err == -EIO) { cpos += 1 << (sb->s_blocksize_bits); @@ -277,16 +281,10 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx) inum = iunique(sb, EXFAT_ROOT_INO); } - /* - * Before calling dir_emit(), sb_lock should be released. - * Because page fault can occur in dir_emit() when the size - * of buffer given from user is larger than one page size. - */ mutex_unlock(&EXFAT_SB(sb)->s_lock); if (!dir_emit(ctx, nb->lfn, strlen(nb->lfn), inum, (de.attr & ATTR_SUBDIR) ? DT_DIR : DT_REG)) - goto out_unlocked; - mutex_lock(&EXFAT_SB(sb)->s_lock); + goto out; ctx->pos = cpos; goto get_new; @@ -294,9 +292,8 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx) if (!cpos && fake_offset) cpos = ITER_POS_FILLED_DOTS; ctx->pos = cpos; -unlock: mutex_unlock(&EXFAT_SB(sb)->s_lock); -out_unlocked: +out: /* * To improve performance, free namebuf after unlock sb_lock. * If namebuf is not allocated, this function do nothing @@ -616,6 +613,10 @@ int exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync) bforget(es->bh[i]); else brelse(es->bh[i]); + + if (IS_DYNAMIC_ES(es)) + kfree(es->bh); + kfree(es); return err; } @@ -851,6 +852,7 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb, /* byte offset in sector */ off = EXFAT_BLK_OFFSET(byte_offset, sb); es->start_off = off; + es->bh = es->__bh; /* sector offset in cluster */ sec = EXFAT_B_TO_BLK(byte_offset, sb); @@ -870,6 +872,16 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb, es->num_entries = num_entries; num_bh = EXFAT_B_TO_BLK_ROUND_UP(off + num_entries * DENTRY_SIZE, sb); + if (num_bh > ARRAY_SIZE(es->__bh)) { + es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_KERNEL); + if (!es->bh) { + brelse(bh); + kfree(es); + return NULL; + } + es->bh[0] = bh; + } + for (i = 1; i < num_bh; i++) { /* get the next sector */ if (exfat_is_last_sector_in_cluster(sbi, sec)) { @@ -909,14 +921,19 @@ enum { }; /* - * return values: - * >= 0 : return dir entiry position with the name in dir - * -ENOENT : entry with the name does not exist - * -EIO : I/O error + * @ei: inode info of parent directory + * @p_dir: directory structure of parent directory + * @num_entries:entry size of p_uniname + * @hint_opt: If p_uniname is found, filled with optimized dir/entry + * for traversing cluster chain. + * @return: + * >= 0: file directory entry position where the name exists + * -ENOENT: entry with the name does not exist + * -EIO: I/O error */ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei, struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname, - int num_entries, unsigned int type) + int num_entries, unsigned int type, struct exfat_hint *hint_opt) { int i, rewind = 0, dentry = 0, end_eidx = 0, num_ext = 0, len; int order, step, name_len = 0; @@ -993,6 +1010,8 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei, if (entry_type == TYPE_FILE || entry_type == TYPE_DIR) { step = DIRENT_STEP_FILE; + hint_opt->clu = clu.dir; + hint_opt->eidx = i; if (type == TYPE_ALL || type == entry_type) { num_ext = ep->dentry.file.num_ext; step = DIRENT_STEP_STRM; diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 07b09af57436fba33ca5033a37ff0ca2b7a73439..11e579a2598d8a851afce10d00519883ef6a163d 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -170,10 +170,13 @@ struct exfat_entry_set_cache { bool modified; unsigned int start_off; int num_bh; - struct buffer_head *bh[DIR_CACHE_SIZE]; + struct buffer_head *__bh[DIR_CACHE_SIZE]; + struct buffer_head **bh; unsigned int num_entries; }; +#define IS_DYNAMIC_ES(es) ((es)->__bh != (es)->bh) + struct exfat_dir_entry { struct exfat_chain dir; int entry; @@ -458,7 +461,7 @@ void exfat_update_dir_chksum_with_entry_set(struct exfat_entry_set_cache *es); int exfat_calc_num_entries(struct exfat_uni_name *p_uniname); int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei, struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname, - int num_entries, unsigned int type); + int num_entries, unsigned int type, struct exfat_hint *hint_opt); int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu); int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir, int entry, sector_t *sector, int *offset); diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 1382d816912c8c00b25970c11208a62ee61259c2..bd00afc5e4c16f655d69b16024365c215db5ab02 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -596,6 +596,8 @@ static int exfat_find(struct inode *dir, struct qstr *qname, struct exfat_inode_info *ei = EXFAT_I(dir); struct exfat_dentry *ep, *ep2; struct exfat_entry_set_cache *es; + /* for optimized dir & entry to prevent long traverse of cluster chain */ + struct exfat_hint hint_opt; if (qname->len == 0) return -ENOENT; @@ -619,7 +621,7 @@ static int exfat_find(struct inode *dir, struct qstr *qname, /* search the file name for directories */ dentry = exfat_find_dir_entry(sb, ei, &cdir, &uni_name, - num_entries, TYPE_ALL); + num_entries, TYPE_ALL, &hint_opt); if (dentry < 0) return dentry; /* -error value */ @@ -628,6 +630,11 @@ static int exfat_find(struct inode *dir, struct qstr *qname, info->entry = dentry; info->num_subdirs = 0; + /* adjust cdir to the optimized value */ + cdir.dir = hint_opt.clu; + if (cdir.flags & ALLOC_NO_FAT_CHAIN) + cdir.size -= dentry / sbi->dentries_per_clu; + dentry = hint_opt.eidx; es = exfat_get_dentry_set(sb, &cdir, dentry, ES_2_ENTRIES); if (!es) return -EIO; diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index f06367cfd76417509a55314511fbfabd285c9622..fb2086bcbbf8380ec33fa9fb8f4a962826c38f12 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -68,10 +68,7 @@ struct mb_cache; * second extended-fs super-block data in memory */ struct ext2_sb_info { - unsigned long s_frag_size; /* Size of a fragment in bytes */ - unsigned long s_frags_per_block;/* Number of fragments per block */ unsigned long s_inodes_per_block;/* Number of inodes per block */ - unsigned long s_frags_per_group;/* Number of fragments in a group */ unsigned long s_blocks_per_group;/* Number of blocks in a group */ unsigned long s_inodes_per_group;/* Number of inodes in a group */ unsigned long s_itb_per_group; /* Number of inode table blocks per group */ @@ -185,15 +182,6 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb) #define EXT2_INODE_SIZE(s) (EXT2_SB(s)->s_inode_size) #define EXT2_FIRST_INO(s) (EXT2_SB(s)->s_first_ino) -/* - * Macro-instructions used to manage fragments - */ -#define EXT2_MIN_FRAG_SIZE 1024 -#define EXT2_MAX_FRAG_SIZE 4096 -#define EXT2_MIN_FRAG_LOG_SIZE 10 -#define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->s_frag_size) -#define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->s_frags_per_block) - /* * Structure of a blocks group descriptor */ diff --git a/fs/ext2/super.c b/fs/ext2/super.c index ab01ec7ac48c5ae9fbaaa39188e764b4fe3de7a0..a810b9c9e8eb554fc09a8e057f19b71758ca7d16 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -673,10 +673,9 @@ static int ext2_setup_super (struct super_block * sb, es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); if (test_opt (sb, DEBUG)) - ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, " + ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, gc=%lu, " "bpg=%lu, ipg=%lu, mo=%04lx]", EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize, - sbi->s_frag_size, sbi->s_groups_count, EXT2_BLOCKS_PER_GROUP(sb), EXT2_INODES_PER_GROUP(sb), @@ -1014,14 +1013,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) } } - sbi->s_frag_size = EXT2_MIN_FRAG_SIZE << - le32_to_cpu(es->s_log_frag_size); - if (sbi->s_frag_size == 0) - goto cantfind_ext2; - sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size; - sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); - sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); @@ -1047,11 +1039,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } - if (sb->s_blocksize != sbi->s_frag_size) { + if (es->s_log_frag_size != es->s_log_block_size) { ext2_msg(sb, KERN_ERR, - "error: fragsize %lu != blocksize %lu" - "(not supported yet)", - sbi->s_frag_size, sb->s_blocksize); + "error: fragsize log %u != blocksize log %u", + le32_to_cpu(es->s_log_frag_size), sb->s_blocksize_bits); goto failed_mount; } @@ -1061,12 +1052,6 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) sbi->s_blocks_per_group); goto failed_mount; } - if (sbi->s_frags_per_group > sb->s_blocksize * 8) { - ext2_msg(sb, KERN_ERR, - "error: #fragments per group too big: %lu", - sbi->s_frags_per_group); - goto failed_mount; - } if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || sbi->s_inodes_per_group > sb->s_blocksize * 8) { ext2_msg(sb, KERN_ERR, diff --git a/fs/file.c b/fs/file.c index 2feeccec3f4dcff2ec6c21114af23beaa3777d94..d6bc73960e4ac9df2258a4344ea5ab386965887c 100644 --- a/fs/file.c +++ b/fs/file.c @@ -1007,12 +1007,28 @@ unsigned long __fdget_raw(unsigned int fd) return __fget_light(fd, 0); } +/* + * Try to avoid f_pos locking. We only need it if the + * file is marked for FMODE_ATOMIC_POS, and it can be + * accessed multiple ways. + * + * Always do it for directories, because pidfd_getfd() + * can make a file accessible even if it otherwise would + * not be, and for directories this is a correctness + * issue, not a "POSIX requirement". + */ +static inline bool file_needs_f_pos_lock(struct file *file) +{ + return (file->f_mode & FMODE_ATOMIC_POS) && + (file_count(file) > 1 || S_ISDIR(file_inode(file)->i_mode)); +} + unsigned long __fdget_pos(unsigned int fd) { unsigned long v = __fdget(fd); struct file *file = (struct file *)(v & ~3); - if (file && (file->f_mode & FMODE_ATOMIC_POS)) { + if (file && file_needs_f_pos_lock(file)) { v |= FDPUT_POS_UNLOCK; mutex_lock(&file->f_pos_lock); } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 042f4512e47d9bb1d3e0101341d212a95b110ae6..3a3cdc3706471cdc2f13ab94f19165a9e311400c 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -1103,9 +1103,17 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) int __nilfs_mark_inode_dirty(struct inode *inode, int flags) { + struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct buffer_head *ibh; int err; + /* + * Do not dirty inodes after the log writer has been detached + * and its nilfs_root struct has been freed. + */ + if (unlikely(nilfs_purging(nilfs))) + return 0; + err = nilfs_load_inode_block(inode, &ibh); if (unlikely(err)) { nilfs_warn(inode->i_sb, diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 4a910c8a569134ab9fa36266a234670170630876..e4686e30b1a7dad7a864b14d6fcb1cdce7e5a35d 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2850,6 +2850,7 @@ void nilfs_detach_log_writer(struct super_block *sb) nilfs_segctor_destroy(nilfs->ns_writer); nilfs->ns_writer = NULL; } + set_nilfs_purging(nilfs); /* Force to free the list of dirty files */ spin_lock(&nilfs->ns_inode_lock); @@ -2862,4 +2863,5 @@ void nilfs_detach_log_writer(struct super_block *sb) up_write(&nilfs->ns_segctor_sem); nilfs_dispose_list(nilfs, &garbage_list, 1); + clear_nilfs_purging(nilfs); } diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index b55cdeb4d16991cd463598b4f4e687533016934d..01914089e76df9fa60b4df47b23f3ca5e4889095 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -29,6 +29,7 @@ enum { THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */ THE_NILFS_GC_RUNNING, /* gc process is running */ THE_NILFS_SB_DIRTY, /* super block is dirty */ + THE_NILFS_PURGING, /* disposing dirty files for cleanup */ }; /** @@ -208,6 +209,7 @@ THE_NILFS_FNS(INIT, init) THE_NILFS_FNS(DISCONTINUED, discontinued) THE_NILFS_FNS(GC_RUNNING, gc_running) THE_NILFS_FNS(SB_DIRTY, sb_dirty) +THE_NILFS_FNS(PURGING, purging) /* * Mount option operations diff --git a/fs/open.c b/fs/open.c index 7aec6f14273950536087b123c0c8f526055146f9..dde3415cbc8a9cba6a1ebb1dd89c18dcec86e02f 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1101,7 +1101,7 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op) lookup_flags |= LOOKUP_IN_ROOT; if (how->resolve & RESOLVE_CACHED) { /* Don't bother even trying for create/truncate/tmpfile open */ - if (flags & (O_TRUNC | O_CREAT | O_TMPFILE)) + if (flags & (O_TRUNC | O_CREAT | __O_TMPFILE)) return -EAGAIN; lookup_flags |= LOOKUP_CACHED; } diff --git a/fs/super.c b/fs/super.c index 7629f9dd031ccdb3ed884a41abfcb2bfad7d3bfa..f9795e72e3bf8bb61e9b0decf25b318756f01e31 100644 --- a/fs/super.c +++ b/fs/super.c @@ -906,6 +906,7 @@ int reconfigure_super(struct fs_context *fc) struct super_block *sb = fc->root->d_sb; int retval; bool remount_ro = false; + bool remount_rw = false; bool force = fc->sb_flags & SB_FORCE; if (fc->sb_flags_mask & ~MS_RMT_MASK) @@ -922,7 +923,7 @@ int reconfigure_super(struct fs_context *fc) if (!(fc->sb_flags & SB_RDONLY) && bdev_read_only(sb->s_bdev)) return -EACCES; #endif - + remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb); remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb); } @@ -952,6 +953,14 @@ int reconfigure_super(struct fs_context *fc) if (retval) return retval; } + } else if (remount_rw) { + /* + * We set s_readonly_remount here to protect filesystem's + * reconfigure code from writes from userspace until + * reconfigure finishes. + */ + sb->s_readonly_remount = 1; + smp_wmb(); } if (fc->ops->reconfigure) { diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 31f66053e2393bf9beea05f47c75887052fc6a53..e3d1673b8ec9728c265b54dc3752b096f257a663 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -145,6 +145,10 @@ static int alloc_branch(struct inode *inode, */ parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key); bh = sb_getblk(inode->i_sb, parent); + if (!bh) { + sysv_free_block(inode->i_sb, branch[n].key); + break; + } lock_buffer(bh); memset(bh->b_data, 0, blocksize); branch[n].bh = bh; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 358905a8599386f9769c39272170357400488603..6d7ab016127c9e7b8bcaa35bc6e902bcb26a26ff 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -72,6 +72,8 @@ extern ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_gds(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h index cd5b62db908454dc7f7fe2acc9479c467e1a1f67..e63a63aa47a374d27a5cc9da1ce96d7593036d5f 100644 --- a/include/linux/pm_wakeirq.h +++ b/include/linux/pm_wakeirq.h @@ -17,8 +17,8 @@ #ifdef CONFIG_PM extern int dev_pm_set_wake_irq(struct device *dev, int irq); -extern int dev_pm_set_dedicated_wake_irq(struct device *dev, - int irq); +extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq); +extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq); extern void dev_pm_clear_wake_irq(struct device *dev); extern void dev_pm_enable_wake_irq(struct device *dev); extern void dev_pm_disable_wake_irq(struct device *dev); @@ -35,6 +35,11 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) return 0; } +static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) +{ + return 0; +} + static inline void dev_pm_clear_wake_irq(struct device *dev) { } diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 2a819be384a78edb2d3ae60e187a0d9977835add..4536a122c4bc55cd48521318b00d58df1b6b4586 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -513,6 +513,9 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, if (WARN_ON(iftype >= NL80211_IFTYPE_MAX)) return NULL; + if (iftype == NL80211_IFTYPE_AP_VLAN) + iftype = NL80211_IFTYPE_AP; + for (i = 0; i < sband->n_iftype_data; i++) { const struct ieee80211_sband_iftype_data *data = &sband->iftype_data[i]; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index fb3c5f6907506b04d160054731818aa69f5736bf..eec29dd6681cabf091e1573098074bb4ce56d324 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1073,6 +1073,29 @@ int __nft_release_basechain(struct nft_ctx *ctx); unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv); +static inline bool nft_use_inc(u32 *use) +{ + if (*use == UINT_MAX) + return false; + + (*use)++; + + return true; +} + +static inline void nft_use_dec(u32 *use) +{ + WARN_ON_ONCE((*use)-- == 0); +} + +/* For error and abort path: restore use counter to previous state. */ +static inline void nft_use_inc_restore(u32 *use) +{ + WARN_ON_ONCE(!nft_use_inc(use)); +} + +#define nft_use_dec_restore nft_use_dec + /** * struct nft_table - nf_tables table * @@ -1150,8 +1173,8 @@ struct nft_object { struct list_head list; struct rhlist_head rhlhead; struct nft_object_hash_key key; - u32 genmask:2, - use:30; + u32 genmask:2; + u32 use; u64 handle; u16 udlen; u8 *udata; @@ -1253,8 +1276,8 @@ struct nft_flowtable { char *name; int hooknum; int ops_len; - u32 genmask:2, - use:30; + u32 genmask:2; + u32 use; u64 handle; /* runtime data below here */ struct list_head hook_list ____cacheline_aligned; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 99fa0de3c66524c32870dbeb1093da2630247a0f..c555488f1cdc896afae6fd174612c41298bc188d 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -4229,9 +4229,11 @@ static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) if (issue_flags & IO_URING_F_NONBLOCK) { /* * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open, - * it'll always -EAGAIN + * it'll always -EAGAIN. Note that we test for __O_TMPFILE + * because O_TMPFILE includes O_DIRECTORY, which isn't a flag + * we need to force async for. */ - if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE)) + if (req->open.how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE)) return -EAGAIN; op.lookup_flags |= LOOKUP_CACHED; op.open_flag |= O_NONBLOCK; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f1deefc9bdcdeefa826f11a7ea8264def98112be..e00228cd1d060622d7e5797a977d90629af76183 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1360,7 +1360,7 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env, reg->type = SCALAR_VALUE; reg->var_off = tnum_unknown; reg->frameno = 0; - reg->precise = env->subprog_cnt > 1 || !env->bpf_capable; + reg->precise = !env->bpf_capable; __mark_reg_unbounded(reg); } @@ -2024,8 +2024,11 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, /* big hammer: mark all scalars precise in this path. * pop_stack may still get !precise scalars. + * We also skip current state and go straight to first parent state, + * because precision markings in current non-checkpointed state are + * not needed. See why in the comment in __mark_chain_precision below. */ - for (; st; st = st->parent) + for (st = st->parent; st; st = st->parent) { for (i = 0; i <= st->curframe; i++) { func = st->frame[i]; for (j = 0; j < BPF_REG_FP; j++) { @@ -2043,8 +2046,121 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, reg->precise = true; } } + } +} + +static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st) +{ + struct bpf_func_state *func; + struct bpf_reg_state *reg; + int i, j; + + for (i = 0; i <= st->curframe; i++) { + func = st->frame[i]; + for (j = 0; j < BPF_REG_FP; j++) { + reg = &func->regs[j]; + if (reg->type != SCALAR_VALUE) + continue; + reg->precise = false; + } + for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { + if (!is_spilled_reg(&func->stack[j])) + continue; + reg = &func->stack[j].spilled_ptr; + if (reg->type != SCALAR_VALUE) + continue; + reg->precise = false; + } + } } +/* + * __mark_chain_precision() backtracks BPF program instruction sequence and + * chain of verifier states making sure that register *regno* (if regno >= 0) + * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked + * SCALARS, as well as any other registers and slots that contribute to + * a tracked state of given registers/stack slots, depending on specific BPF + * assembly instructions (see backtrack_insns() for exact instruction handling + * logic). This backtracking relies on recorded jmp_history and is able to + * traverse entire chain of parent states. This process ends only when all the + * necessary registers/slots and their transitive dependencies are marked as + * precise. + * + * One important and subtle aspect is that precise marks *do not matter* in + * the currently verified state (current state). It is important to understand + * why this is the case. + * + * First, note that current state is the state that is not yet "checkpointed", + * i.e., it is not yet put into env->explored_states, and it has no children + * states as well. It's ephemeral, and can end up either a) being discarded if + * compatible explored state is found at some point or BPF_EXIT instruction is + * reached or b) checkpointed and put into env->explored_states, branching out + * into one or more children states. + * + * In the former case, precise markings in current state are completely + * ignored by state comparison code (see regsafe() for details). Only + * checkpointed ("old") state precise markings are important, and if old + * state's register/slot is precise, regsafe() assumes current state's + * register/slot as precise and checks value ranges exactly and precisely. If + * states turn out to be compatible, current state's necessary precise + * markings and any required parent states' precise markings are enforced + * after the fact with propagate_precision() logic, after the fact. But it's + * important to realize that in this case, even after marking current state + * registers/slots as precise, we immediately discard current state. So what + * actually matters is any of the precise markings propagated into current + * state's parent states, which are always checkpointed (due to b) case above). + * As such, for scenario a) it doesn't matter if current state has precise + * markings set or not. + * + * Now, for the scenario b), checkpointing and forking into child(ren) + * state(s). Note that before current state gets to checkpointing step, any + * processed instruction always assumes precise SCALAR register/slot + * knowledge: if precise value or range is useful to prune jump branch, BPF + * verifier takes this opportunity enthusiastically. Similarly, when + * register's value is used to calculate offset or memory address, exact + * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to + * what we mentioned above about state comparison ignoring precise markings + * during state comparison, BPF verifier ignores and also assumes precise + * markings *at will* during instruction verification process. But as verifier + * assumes precision, it also propagates any precision dependencies across + * parent states, which are not yet finalized, so can be further restricted + * based on new knowledge gained from restrictions enforced by their children + * states. This is so that once those parent states are finalized, i.e., when + * they have no more active children state, state comparison logic in + * is_state_visited() would enforce strict and precise SCALAR ranges, if + * required for correctness. + * + * To build a bit more intuition, note also that once a state is checkpointed, + * the path we took to get to that state is not important. This is crucial + * property for state pruning. When state is checkpointed and finalized at + * some instruction index, it can be correctly and safely used to "short + * circuit" any *compatible* state that reaches exactly the same instruction + * index. I.e., if we jumped to that instruction from a completely different + * code path than original finalized state was derived from, it doesn't + * matter, current state can be discarded because from that instruction + * forward having a compatible state will ensure we will safely reach the + * exit. States describe preconditions for further exploration, but completely + * forget the history of how we got here. + * + * This also means that even if we needed precise SCALAR range to get to + * finalized state, but from that point forward *that same* SCALAR register is + * never used in a precise context (i.e., it's precise value is not needed for + * correctness), it's correct and safe to mark such register as "imprecise" + * (i.e., precise marking set to false). This is what we rely on when we do + * not set precise marking in current state. If no child state requires + * precision for any given SCALAR register, it's safe to dictate that it can + * be imprecise. If any child state does require this register to be precise, + * we'll mark it precise later retroactively during precise markings + * propagation from child state to parent states. + * + * Skipping precise marking setting in current state is a mild version of + * relying on the above observation. But we can utilize this property even + * more aggressively by proactively forgetting any precise marking in the + * current state (which we inherited from the parent state), right before we + * checkpoint it and branch off into new child state. This is done by + * mark_all_scalars_imprecise() to hopefully get more permissive and generic + * finalized states which help in short circuiting more future states. + */ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno, int spi) { @@ -2062,6 +2178,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r if (!env->bpf_capable) return 0; + /* Do sanity checks against current state of register and/or stack + * slot, but don't set precise flag in current state, as precision + * tracking in the current state is unnecessary. + */ func = st->frame[frame]; if (regno >= 0) { reg = &func->regs[regno]; @@ -2069,11 +2189,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r WARN_ONCE(1, "backtracing misuse"); return -EFAULT; } - if (!reg->precise) - new_marks = true; - else - reg_mask = 0; - reg->precise = true; + new_marks = true; } while (spi >= 0) { @@ -2086,11 +2202,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r stack_mask = 0; break; } - if (!reg->precise) - new_marks = true; - else - stack_mask = 0; - reg->precise = true; + new_marks = true; break; } @@ -2098,12 +2210,42 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r return 0; if (!reg_mask && !stack_mask) return 0; + for (;;) { DECLARE_BITMAP(mask, 64); u32 history = st->jmp_history_cnt; if (env->log.level & BPF_LOG_LEVEL) verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); + + if (last_idx < 0) { + /* we are at the entry into subprog, which + * is expected for global funcs, but only if + * requested precise registers are R1-R5 + * (which are global func's input arguments) + */ + if (st->curframe == 0 && + st->frame[0]->subprogno > 0 && + st->frame[0]->callsite == BPF_MAIN_FUNC && + stack_mask == 0 && (reg_mask & ~0x3e) == 0) { + bitmap_from_u64(mask, reg_mask); + for_each_set_bit(i, mask, 32) { + reg = &st->frame[0]->regs[i]; + if (reg->type != SCALAR_VALUE) { + reg_mask &= ~(1u << i); + continue; + } + reg->precise = true; + } + return 0; + } + + verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n", + st->frame[0]->subprogno, reg_mask, stack_mask); + WARN_ONCE(1, "verifier backtracking bug"); + return -EFAULT; + } + for (i = last_idx;;) { if (skip_first) { err = 0; @@ -9283,7 +9425,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, if (env->explore_alu_limits) return false; if (rcur->type == SCALAR_VALUE) { - if (!rold->precise && !rcur->precise) + if (!rold->precise) return true; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && @@ -9815,6 +9957,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) env->prev_jmps_processed = env->jmps_processed; env->prev_insn_processed = env->insn_processed; + /* forget precise markings we inherited, see __mark_chain_precision */ + if (env->bpf_capable) + mark_all_scalars_imprecise(env, cur); + /* add new state to the head of linked list */ new = &new_sl->state; err = copy_verifier_state(new, cur); @@ -11891,6 +12037,9 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog) 0 /* frameno */, subprog); + state->first_insn_idx = env->subprog_info[subprog].start; + state->last_insn_idx = -1; + regs = state->frame[state->curframe]->regs; if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { ret = btf_prepare_func_args(env, subprog, regs); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 95b9d615bf54f333d63223ef15903daeeaac1be1..2a014f115f1e1526a64081cbcc1d7ac3b7c669d9 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -970,7 +970,6 @@ static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) { - int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); struct perf_raw_frag frag = { .copy = ctx_copy, .size = ctx_size, @@ -987,8 +986,12 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, }; struct perf_sample_data *sd; struct pt_regs *regs; + int nest_level; u64 ret; + preempt_disable(); + nest_level = this_cpu_inc_return(bpf_event_output_nest_level); + if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { ret = -EBUSY; goto out; @@ -1003,6 +1006,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, ret = __bpf_perf_event_output(regs, map, flags, sd); out: this_cpu_dec(bpf_event_output_nest_level); + preempt_enable(); return ret; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index bed792f0657586e3f7dc26c0a56961286ddb8090..7e99319bd5365026b3a0c983b6a8e2d89be8782c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -9416,6 +9416,12 @@ void trace_init_global_iter(struct trace_iterator *iter) /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[iter->tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; + + /* Can not use kmalloc for iter.temp and iter.fmt */ + iter->temp = static_temp_buf; + iter->temp_size = STATIC_TEMP_BUF_SIZE; + iter->fmt = static_fmt_buf; + iter->fmt_size = STATIC_FMT_BUF_SIZE; } void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) @@ -9449,11 +9455,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) /* Simulate the iterator */ trace_init_global_iter(&iter); - /* Can not use kmalloc for iter.temp and iter.fmt */ - iter.temp = static_temp_buf; - iter.temp_size = STATIC_TEMP_BUF_SIZE; - iter.fmt = static_fmt_buf; - iter.fmt_size = STATIC_FMT_BUF_SIZE; for_each_tracing_cpu(cpu) { atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 1e9fab79e2456fdae2a367d3f414bd353266f603..d594cd501861a8d1fcca60f01656053d2d391113 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -3330,17 +3330,24 @@ static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq) int ret; dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); - ret = wait_for_completion_interruptible(&lreq->reg_commit_wait); + ret = wait_for_completion_killable(&lreq->reg_commit_wait); return ret ?: lreq->reg_commit_error; } -static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq) +static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq, + unsigned long timeout) { - int ret; + long left; dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); - ret = wait_for_completion_interruptible(&lreq->notify_finish_wait); - return ret ?: lreq->notify_finish_error; + left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait, + ceph_timeout_jiffies(timeout)); + if (left <= 0) + left = left ?: -ETIMEDOUT; + else + left = lreq->notify_finish_error; /* completed */ + + return left; } /* @@ -4888,7 +4895,8 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc, linger_submit(lreq); ret = linger_reg_commit_wait(lreq); if (!ret) - ret = linger_notify_finish_wait(lreq); + ret = linger_notify_finish_wait(lreq, + msecs_to_jiffies(2 * timeout * MSEC_PER_SEC)); else dout("lreq %p failed to initiate notify %d\n", lreq, ret); diff --git a/net/dccp/output.c b/net/dccp/output.c index 50e6d5699bb2979f9a144e3dc62ef6f84aa02661..d679032f003404c87c9ee0fea265b0af8d8bb45a 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -185,7 +185,7 @@ unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) /* And store cached results */ icsk->icsk_pmtu_cookie = pmtu; - dp->dccps_mss_cache = cur_mps; + WRITE_ONCE(dp->dccps_mss_cache, cur_mps); return cur_mps; } diff --git a/net/dccp/proto.c b/net/dccp/proto.c index e946211758c05f08c7be9d4d0441c47c2c13df9d..3293c1e3aa5d5dfdec5ca187ed1ff837a1ba736e 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -639,7 +639,7 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname, return dccp_getsockopt_service(sk, len, (__be32 __user *)optval, optlen); case DCCP_SOCKOPT_GET_CUR_MPS: - val = dp->dccps_mss_cache; + val = READ_ONCE(dp->dccps_mss_cache); break; case DCCP_SOCKOPT_AVAILABLE_CCIDS: return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen); @@ -748,7 +748,7 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) trace_dccp_probe(sk, len); - if (len > dp->dccps_mss_cache) + if (len > READ_ONCE(dp->dccps_mss_cache)) return -EMSGSIZE; lock_sock(sk); @@ -781,6 +781,12 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) goto out_discard; } + /* We need to check dccps_mss_cache after socket is locked. */ + if (len > dp->dccps_mss_cache) { + rc = -EMSGSIZE; + goto out_discard; + } + skb_reserve(skb, sk->sk_prot->max_header); rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc != 0) diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 4b74c67f13c9d46efa96cd50598bedd595074271..da9a55c68e11ec8d708519ecc8bc60e651ac1f25 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -224,7 +224,7 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu) .un.frag.__unused = 0, .un.frag.mtu = ntohs(mtu), }; - icmph->checksum = ip_compute_csum(icmph, len); + icmph->checksum = csum_fold(skb_checksum(skb, 0, len, 0)); skb_reset_transport_header(skb); niph = skb_push(skb, sizeof(*niph)); diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 76717478f1733fb753c067efaa5dd210320e0261..ac1e51087b1d87e646ae372ee646b099ea5b435d 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -196,7 +196,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, static inline int ndisc_is_useropt(const struct net_device *dev, struct nd_opt_hdr *opt) { - return opt->nd_opt_type == ND_OPT_RDNSS || + return opt->nd_opt_type == ND_OPT_PREFIX_INFO || + opt->nd_opt_type == ND_OPT_RDNSS || opt->nd_opt_type == ND_OPT_DNSSL || opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL || opt->nd_opt_type == ND_OPT_PREF64 || diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 48d11ffd399538b5ce9cf4531a0ec2876719a808..984855f9eea21a7cdfcd607caf1f1951da6dcf27 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -257,8 +257,10 @@ int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain) if (chain->bound) return -EBUSY; + if (!nft_use_inc(&chain->use)) + return -EMFILE; + chain->bound = true; - chain->use++; nft_chain_trans_bind(ctx, chain); return 0; @@ -427,7 +429,7 @@ static int nft_delchain(struct nft_ctx *ctx) if (IS_ERR(trans)) return PTR_ERR(trans); - ctx->table->use--; + nft_use_dec(&ctx->table->use); nft_deactivate_next(ctx->net, ctx->chain); return 0; @@ -466,7 +468,7 @@ nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) /* You cannot delete the same rule twice */ if (nft_is_active_next(ctx->net, rule)) { nft_deactivate_next(ctx->net, rule); - ctx->chain->use--; + nft_use_dec(&ctx->chain->use); return 0; } return -ENOENT; @@ -594,7 +596,7 @@ static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set) nft_map_deactivate(ctx, set); nft_deactivate_next(ctx->net, set); - ctx->table->use--; + nft_use_dec(&ctx->table->use); return err; } @@ -626,7 +628,7 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj) return err; nft_deactivate_next(ctx->net, obj); - ctx->table->use--; + nft_use_dec(&ctx->table->use); return err; } @@ -661,7 +663,7 @@ static int nft_delflowtable(struct nft_ctx *ctx, return err; nft_deactivate_next(ctx->net, flowtable); - ctx->table->use--; + nft_use_dec(&ctx->table->use); return err; } @@ -2185,9 +2187,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, struct nft_rule **rules; int err; - if (table->use == UINT_MAX) - return -EOVERFLOW; - if (nla[NFTA_CHAIN_HOOK]) { struct nft_stats __percpu *stats = NULL; struct nft_chain_hook hook; @@ -2283,6 +2282,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (err < 0) goto err_destroy_chain; + if (!nft_use_inc(&table->use)) { + err = -EMFILE; + goto err_use; + } + trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN); if (IS_ERR(trans)) { err = PTR_ERR(trans); @@ -2299,10 +2303,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, goto err_unregister_hook; } - table->use++; - return 0; + err_unregister_hook: + nft_use_dec_restore(&table->use); +err_use: nf_tables_unregister_hook(net, table, chain); err_destroy_chain: nf_tables_chain_destroy(ctx); @@ -3414,9 +3419,6 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, return -EINVAL; handle = nf_tables_alloc_handle(table); - if (chain->use == UINT_MAX) - return -EOVERFLOW; - if (nla[NFTA_RULE_POSITION]) { pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION])); old_rule = __nft_rule_lookup(chain, pos_handle); @@ -3502,16 +3504,21 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, expr = nft_expr_next(expr); } + if (!nft_use_inc(&chain->use)) { + err = -EMFILE; + goto err2; + } + if (nlh->nlmsg_flags & NLM_F_REPLACE) { trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule); if (trans == NULL) { err = -ENOMEM; - goto err2; + goto err_destroy_flow_rule; } err = nft_delrule(&ctx, old_rule); if (err < 0) { nft_trans_destroy(trans); - goto err2; + goto err_destroy_flow_rule; } list_add_tail_rcu(&rule->list, &old_rule->list); @@ -3519,7 +3526,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule); if (!trans) { err = -ENOMEM; - goto err2; + goto err_destroy_flow_rule; } if (nlh->nlmsg_flags & NLM_F_APPEND) { @@ -3535,7 +3542,6 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, } } kvfree(info); - chain->use++; if (nft_net->validate_state == NFT_VALIDATE_DO) return nft_table_validate(net, table); @@ -3549,6 +3555,9 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, } return 0; + +err_destroy_flow_rule: + nft_use_dec_restore(&chain->use); err2: nft_rule_expr_deactivate(&ctx, rule, NFT_TRANS_PREPARE_ERROR); nf_tables_rule_destroy(&ctx, rule); @@ -4464,9 +4473,15 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, alloc_size = sizeof(*set) + size + udlen; if (alloc_size < size || alloc_size > INT_MAX) return -ENOMEM; + + if (!nft_use_inc(&table->use)) + return -EMFILE; + set = kvzalloc(alloc_size, GFP_KERNEL); - if (!set) - return -ENOMEM; + if (!set) { + err = -ENOMEM; + goto err_alloc; + } name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL); if (!name) { @@ -4527,7 +4542,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, goto err_set_expr_alloc; list_add_tail_rcu(&set->list, &table->sets); - table->use++; + return 0; err_set_expr_alloc: @@ -4539,6 +4554,9 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, kfree(set->name); err_set_name: kvfree(set); +err_alloc: + nft_use_dec_restore(&table->use); + return err; } @@ -4632,9 +4650,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *i; struct nft_set_iter iter; - if (set->use == UINT_MAX) - return -EOVERFLOW; - if (!list_empty(&set->bindings) && nft_set_is_anonymous(set)) return -EBUSY; @@ -4659,10 +4674,12 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, return iter.err; } bind: + if (!nft_use_inc(&set->use)) + return -EMFILE; + binding->chain = ctx->chain; list_add_tail_rcu(&binding->list, &set->bindings); nft_set_trans_bind(ctx, set); - set->use++; return 0; } @@ -4715,7 +4732,7 @@ void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set) nft_clear(ctx->net, set); } - set->use++; + nft_use_inc_restore(&set->use); } EXPORT_SYMBOL_GPL(nf_tables_activate_set); @@ -4731,7 +4748,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, else list_del_rcu(&binding->list); - set->use--; + nft_use_dec(&set->use); break; case NFT_TRANS_PREPARE: if (nft_set_is_anonymous(set)) { @@ -4740,7 +4757,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, nft_deactivate_next(ctx->net, set); } - set->use--; + nft_use_dec(&set->use); return; case NFT_TRANS_ABORT: case NFT_TRANS_RELEASE: @@ -4748,7 +4765,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, set->flags & (NFT_SET_MAP | NFT_SET_OBJECT)) nft_map_deactivate(ctx, set); - set->use--; + nft_use_dec(&set->use); fallthrough; default: nf_tables_unbind_set(ctx, set, binding, @@ -5371,7 +5388,7 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem, nft_set_elem_expr_destroy(&ctx, nft_set_ext_expr(ext)); if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) - (*nft_set_ext_obj(ext))->use--; + nft_use_dec(&(*nft_set_ext_obj(ext))->use); kfree(elem); } EXPORT_SYMBOL_GPL(nft_set_elem_destroy); @@ -5545,8 +5562,16 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, set->objtype, genmask); if (IS_ERR(obj)) { err = PTR_ERR(obj); + obj = NULL; + goto err_parse_key_end; + } + + if (!nft_use_inc(&obj->use)) { + err = -EMFILE; + obj = NULL; goto err_parse_key_end; } + nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF); } @@ -5611,10 +5636,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, udata->len = ulen - 1; nla_memcpy(&udata->data, nla[NFTA_SET_ELEM_USERDATA], ulen); } - if (obj) { + if (obj) *nft_set_ext_obj(ext) = obj; - obj->use++; - } err = nft_set_elem_expr_setup(ctx, ext, expr); if (err < 0) @@ -5670,14 +5693,14 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, err_element_clash: kfree(trans); err_elem_expr: - if (obj) - obj->use--; - nf_tables_set_elem_destroy(ctx, set, elem.priv); err_parse_data: if (nla[NFTA_SET_ELEM_DATA] != NULL) nft_data_release(&elem.data.val, desc.type); err_parse_key_end: + if (obj) + nft_use_dec_restore(&obj->use); + nft_data_release(&elem.key_end.val, NFT_DATA_VALUE); err_parse_key: nft_data_release(&elem.key.val, NFT_DATA_VALUE); @@ -5749,7 +5772,7 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type) case NFT_JUMP: case NFT_GOTO: chain = data->verdict.chain; - chain->use++; + nft_use_inc_restore(&chain->use); break; } } @@ -5764,7 +5787,7 @@ static void nft_setelem_data_activate(const struct net *net, if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) nft_data_hold(nft_set_ext_data(ext), set->dtype); if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) - (*nft_set_ext_obj(ext))->use++; + nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use); } static void nft_setelem_data_deactivate(const struct net *net, @@ -5776,7 +5799,7 @@ static void nft_setelem_data_deactivate(const struct net *net, if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) nft_data_release(nft_set_ext_data(ext), set->dtype); if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) - (*nft_set_ext_obj(ext))->use--; + nft_use_dec(&(*nft_set_ext_obj(ext))->use); } static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, @@ -6243,9 +6266,14 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk, nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); + if (!nft_use_inc(&table->use)) + return -EMFILE; + type = nft_obj_type_get(net, objtype); - if (IS_ERR(type)) - return PTR_ERR(type); + if (IS_ERR(type)) { + err = PTR_ERR(type); + goto err_type; + } obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]); if (IS_ERR(obj)) { @@ -6279,7 +6307,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk, goto err_obj_ht; list_add_tail_rcu(&obj->list, &table->objects); - table->use++; + return 0; err_obj_ht: /* queued in transaction log */ @@ -6295,6 +6323,9 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk, kfree(obj); err_init: module_put(type->owner); +err_type: + nft_use_dec_restore(&table->use); + return err; } @@ -6685,7 +6716,7 @@ void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx, case NFT_TRANS_PREPARE: case NFT_TRANS_ABORT: case NFT_TRANS_RELEASE: - flowtable->use--; + nft_use_dec(&flowtable->use); fallthrough; default: return; @@ -7022,9 +7053,14 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); + if (!nft_use_inc(&table->use)) + return -EMFILE; + flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL); - if (!flowtable) - return -ENOMEM; + if (!flowtable) { + err = -ENOMEM; + goto flowtable_alloc; + } flowtable->table = table; flowtable->handle = nf_tables_alloc_handle(table); @@ -7079,7 +7115,6 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, goto err5; list_add_tail_rcu(&flowtable->list, &table->flowtables); - table->use++; return 0; err5: @@ -7096,6 +7131,9 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, kfree(flowtable->name); err1: kfree(flowtable); +flowtable_alloc: + nft_use_dec_restore(&table->use); + return err; } @@ -8281,7 +8319,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) */ if (nft_set_is_anonymous(nft_trans_set(trans)) && !list_empty(&nft_trans_set(trans)->bindings)) - trans->ctx.table->use--; + nft_use_dec(&trans->ctx.table->use); nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), NFT_MSG_NEWSET, GFP_KERNEL); @@ -8465,7 +8503,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; } - trans->ctx.table->use--; + nft_use_dec_restore(&trans->ctx.table->use); nft_chain_del(trans->ctx.chain); nf_tables_unregister_hook(trans->ctx.net, trans->ctx.table, @@ -8473,7 +8511,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) } break; case NFT_MSG_DELCHAIN: - trans->ctx.table->use++; + nft_use_inc_restore(&trans->ctx.table->use); nft_clear(trans->ctx.net, trans->ctx.chain); nft_trans_destroy(trans); break; @@ -8482,20 +8520,20 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; } - trans->ctx.chain->use--; + nft_use_dec_restore(&trans->ctx.chain->use); list_del_rcu(&nft_trans_rule(trans)->list); nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans), NFT_TRANS_ABORT); break; case NFT_MSG_DELRULE: - trans->ctx.chain->use++; + nft_use_inc_restore(&trans->ctx.chain->use); nft_clear(trans->ctx.net, nft_trans_rule(trans)); nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans)); nft_trans_destroy(trans); break; case NFT_MSG_NEWSET: - trans->ctx.table->use--; + nft_use_dec_restore(&trans->ctx.table->use); if (nft_trans_set_bound(trans)) { nft_trans_destroy(trans); break; @@ -8503,7 +8541,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) list_del_rcu(&nft_trans_set(trans)->list); break; case NFT_MSG_DELSET: - trans->ctx.table->use++; + nft_use_inc_restore(&trans->ctx.table->use); nft_clear(trans->ctx.net, nft_trans_set(trans)); if (nft_trans_set(trans)->flags & (NFT_SET_MAP | NFT_SET_OBJECT)) nft_map_activate(&trans->ctx, nft_trans_set(trans)); @@ -8533,12 +8571,12 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans)); nft_trans_destroy(trans); } else { - trans->ctx.table->use--; + nft_use_dec_restore(&trans->ctx.table->use); nft_obj_del(nft_trans_obj(trans)); } break; case NFT_MSG_DELOBJ: - trans->ctx.table->use++; + nft_use_inc_restore(&trans->ctx.table->use); nft_clear(trans->ctx.net, nft_trans_obj(trans)); nft_trans_destroy(trans); break; @@ -8547,7 +8585,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_unregister_flowtable_net_hooks(net, &nft_trans_flowtable_hooks(trans)); } else { - trans->ctx.table->use--; + nft_use_dec_restore(&trans->ctx.table->use); list_del_rcu(&nft_trans_flowtable(trans)->list); nft_unregister_flowtable_net_hooks(net, &nft_trans_flowtable(trans)->hook_list); @@ -8558,7 +8596,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) list_splice(&nft_trans_flowtable_hooks(trans), &nft_trans_flowtable(trans)->hook_list); } else { - trans->ctx.table->use++; + nft_use_inc_restore(&trans->ctx.table->use); nft_clear(trans->ctx.net, nft_trans_flowtable(trans)); } nft_trans_destroy(trans); @@ -8990,8 +9028,9 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, if (desc->flags & NFT_DATA_DESC_SETELEM && chain->flags & NFT_CHAIN_BINDING) return -EINVAL; + if (!nft_use_inc(&chain->use)) + return -EMFILE; - chain->use++; data->verdict.chain = chain; break; default: @@ -9011,7 +9050,7 @@ static void nft_verdict_uninit(const struct nft_data *data) case NFT_JUMP: case NFT_GOTO: chain = data->verdict.chain; - chain->use--; + nft_use_dec(&chain->use); break; } } @@ -9180,11 +9219,11 @@ int __nft_release_basechain(struct nft_ctx *ctx) nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain); list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { list_del(&rule->list); - ctx->chain->use--; + nft_use_dec(&ctx->chain->use); nf_tables_rule_release(ctx, rule); } nft_chain_del(ctx->chain); - ctx->table->use--; + nft_use_dec(&ctx->table->use); nf_tables_chain_destroy(ctx); return 0; @@ -9224,18 +9263,18 @@ static void __nft_release_table(struct net *net, struct nft_table *table) ctx.chain = chain; list_for_each_entry_safe(rule, nr, &chain->rules, list) { list_del(&rule->list); - chain->use--; + nft_use_dec(&chain->use); nf_tables_rule_release(&ctx, rule); } } list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) { list_del(&flowtable->list); - table->use--; + nft_use_dec(&table->use); nf_tables_flowtable_destroy(flowtable); } list_for_each_entry_safe(set, ns, &table->sets, list) { list_del(&set->list); - table->use--; + nft_use_dec(&table->use); if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT)) nft_map_deactivate(&ctx, set); @@ -9243,13 +9282,13 @@ static void __nft_release_table(struct net *net, struct nft_table *table) } list_for_each_entry_safe(obj, ne, &table->objects, list) { nft_obj_del(obj); - table->use--; + nft_use_dec(&table->use); nft_obj_destroy(&ctx, obj); } list_for_each_entry_safe(chain, nc, &table->chains, list) { ctx.chain = chain; nft_chain_del(chain); - table->use--; + nft_use_dec(&table->use); nf_tables_chain_destroy(&ctx); } list_del(&table->list); diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index 3a6c84fb2c90dc09e352113e016c0a26e076f272..d868eade601763432e97bc6d7f918d2ccbedca83 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -174,8 +174,10 @@ static int nft_flow_offload_init(const struct nft_ctx *ctx, if (IS_ERR(flowtable)) return PTR_ERR(flowtable); + if (!nft_use_inc(&flowtable->use)) + return -EMFILE; + priv->flowtable = flowtable; - flowtable->use++; return nf_ct_netns_get(ctx->net, ctx->family); } @@ -194,7 +196,7 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx, { struct nft_flow_offload *priv = nft_expr_priv(expr); - priv->flowtable->use++; + nft_use_inc_restore(&priv->flowtable->use); } static void nft_flow_offload_destroy(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 6bf1c852e8eaa0191591323f9d9da168a9b5ef20..7d5b63c5a30af77fcc2d9877231785373904f2ad 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -168,7 +168,7 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx, nft_immediate_chain_deactivate(ctx, chain, phase); nft_chain_del(chain); chain->bound = false; - chain->table->use--; + nft_use_dec(&chain->table->use); break; } break; @@ -207,7 +207,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, * let the transaction records release this chain and its rules. */ if (chain->bound) { - chain->use--; + nft_use_dec(&chain->use); break; } @@ -215,9 +215,9 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, chain_ctx = *ctx; chain_ctx.chain = chain; - chain->use--; + nft_use_dec(&chain->use); list_for_each_entry_safe(rule, n, &chain->rules, list) { - chain->use--; + nft_use_dec(&chain->use); list_del(&rule->list); nf_tables_rule_destroy(&chain_ctx, rule); } diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index 25157d8cc2504885ec6aaccfbfbc2a496fd6ba15..30d0b0a346192175e80755c61228daacbe8f7bb6 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c @@ -41,8 +41,10 @@ static int nft_objref_init(const struct nft_ctx *ctx, if (IS_ERR(obj)) return -ENOENT; + if (!nft_use_inc(&obj->use)) + return -EMFILE; + nft_objref_priv(expr) = obj; - obj->use++; return 0; } @@ -71,7 +73,7 @@ static void nft_objref_deactivate(const struct nft_ctx *ctx, if (phase == NFT_TRANS_COMMIT) return; - obj->use--; + nft_use_dec(&obj->use); } static void nft_objref_activate(const struct nft_ctx *ctx, @@ -79,7 +81,7 @@ static void nft_objref_activate(const struct nft_ctx *ctx, { struct nft_object *obj = nft_objref_priv(expr); - obj->use++; + nft_use_inc_restore(&obj->use); } static struct nft_expr_type nft_objref_type; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c7129616dd530136d9a32ba76caa2a8e92308263..bbdb32acac324f50391ae764a96362c6b0dd388d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -366,18 +366,20 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status) { union tpacket_uhdr h; + /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */ + h.raw = frame; switch (po->tp_version) { case TPACKET_V1: - h.h1->tp_status = status; + WRITE_ONCE(h.h1->tp_status, status); flush_dcache_page(pgv_to_page(&h.h1->tp_status)); break; case TPACKET_V2: - h.h2->tp_status = status; + WRITE_ONCE(h.h2->tp_status, status); flush_dcache_page(pgv_to_page(&h.h2->tp_status)); break; case TPACKET_V3: - h.h3->tp_status = status; + WRITE_ONCE(h.h3->tp_status, status); flush_dcache_page(pgv_to_page(&h.h3->tp_status)); break; default: @@ -394,17 +396,19 @@ static int __packet_get_status(const struct packet_sock *po, void *frame) smp_rmb(); + /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */ + h.raw = frame; switch (po->tp_version) { case TPACKET_V1: flush_dcache_page(pgv_to_page(&h.h1->tp_status)); - return h.h1->tp_status; + return READ_ONCE(h.h1->tp_status); case TPACKET_V2: flush_dcache_page(pgv_to_page(&h.h2->tp_status)); - return h.h2->tp_status; + return READ_ONCE(h.h2->tp_status); case TPACKET_V3: flush_dcache_page(pgv_to_page(&h.h3->tp_status)); - return h.h3->tp_status; + return READ_ONCE(h.h3->tp_status); default: WARN(1, "TPACKET version not supported.\n"); BUG(); diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c index a61c7bcbc72da4bd022178f02a5ee90b3aa8f514..63f468bf8245cb70914a646eae59c2fd2aade8a8 100644 --- a/tools/testing/radix-tree/regression1.c +++ b/tools/testing/radix-tree/regression1.c @@ -177,7 +177,7 @@ void regression1_test(void) nr_threads = 2; pthread_barrier_init(&worker_barrier, NULL, nr_threads); - threads = malloc(nr_threads * sizeof(pthread_t *)); + threads = malloc(nr_threads * sizeof(*threads)); for (i = 0; i < nr_threads; i++) { arg = i; diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 5861446d077709381f609e356ce754c4d640e251..7996ec07e0bdb0cb6153f2d6bf619bc5a51d72c7 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -2,7 +2,7 @@ #include #define MAX_INSNS 512 -#define MAX_MATCHES 16 +#define MAX_MATCHES 24 struct bpf_reg_match { unsigned int line; @@ -267,6 +267,7 @@ static struct bpf_align_test tests[] = { */ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), @@ -280,6 +281,7 @@ static struct bpf_align_test tests[] = { BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), @@ -311,44 +313,52 @@ static struct bpf_align_test tests[] = { {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Variable offset is added to R5 packet pointer, - * resulting in auxiliary alignment of 4. + * resulting in auxiliary alignment of 4. To avoid BPF + * verifier's precision backtracking logging + * interfering we also have a no-op R4 = R5 + * instruction to validate R5 state. We also check + * that R4 is what it should be in such case. */ - {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {19, "R4_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {19, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5, resulting in * reg->off of 14. */ - {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {20, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off * (14) which is 16. Then the variable offset is 4-byte * aligned, so the total offset is 4-byte aligned and * meets the load's requirements. */ - {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {24, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {24, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5 packet pointer, * resulting in reg->off value of 14. */ - {26, "R5_w=pkt(id=0,off=14,r=8"}, + {27, "R5_w=pkt(id=0,off=14,r=8"}, /* Variable offset is added to R5, resulting in a - * variable offset of (4n). + * variable offset of (4n). See comment for insn #19 + * for R4 = R5 trick. */ - {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {29, "R4_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {29, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant is added to R5 again, setting reg->off to 18. */ - {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {30, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* And once more we add a variable; resulting var_off * is still (4n), fixed offset is not changed. * Also, we create a new reg->id. */ - {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {32, "R4_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {32, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (18) * which is 20. Then the variable offset is (4n), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, - {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {35, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {35, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, }, }, { diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c index 3a469099f30d89334ac21cf3e4b53cef798ddeda..e09c5239a5951c51808cfdb426802484f21416dd 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c @@ -29,7 +29,23 @@ static int stop, duration; static bool configure_stack(void) { + char tc_version[128]; char tc_cmd[BUFSIZ]; + char *prog; + FILE *tc; + + /* Check whether tc is built with libbpf. */ + tc = popen("tc -V", "r"); + if (CHECK_FAIL(!tc)) + return false; + if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc))) + return false; + if (strstr(tc_version, ", libbpf ")) + prog = "test_sk_assign_libbpf.o"; + else + prog = "test_sk_assign.o"; + if (CHECK_FAIL(pclose(tc))) + return false; /* Move to a new networking namespace */ if (CHECK_FAIL(unshare(CLONE_NEWNET))) @@ -46,8 +62,8 @@ configure_stack(void) /* Load qdisc, BPF program */ if (CHECK_FAIL(system("tc qdisc add dev lo clsact"))) return false; - sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf", - "direct-action object-file ./test_sk_assign.o", + sprintf(tc_cmd, "%s %s %s %s %s", "tc filter add dev lo ingress bpf", + "direct-action object-file", prog, "section classifier/sk_assign_test", (env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose"); if (CHECK(system(tc_cmd), "BPF load failed;", @@ -129,15 +145,12 @@ get_port(int fd) static ssize_t rcv_msg(int srv_client, int type) { - struct sockaddr_storage ss; char buf[BUFSIZ]; - socklen_t slen; if (type == SOCK_STREAM) return read(srv_client, &buf, sizeof(buf)); else - return recvfrom(srv_client, &buf, sizeof(buf), 0, - (struct sockaddr *)&ss, &slen); + return recvfrom(srv_client, &buf, sizeof(buf), 0, NULL, NULL); } static int diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index a943d394fd3a04e555e13aa6c108057d8ad477e5..38ab1ce32e57c80a56afb0700799cd2d48d458ba 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -33,7 +33,7 @@ int _version SEC("version") = 1; -__attribute__ ((noinline)) +__attribute__ ((noinline)) __weak int do_bind(struct bpf_sock_addr *ctx) { struct sockaddr_in sa = {}; diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c index 1ecd987005d2c8d0a3841c05e3de631f4e7bf0a5..77fd42f835fcf3f90b141c61ba42496dcfcd0f26 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_assign.c +++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c @@ -16,6 +16,16 @@ #include #include +#if defined(IPROUTE2_HAVE_LIBBPF) +/* Use a new-style map definition. */ +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __type(key, int); + __type(value, __u64); + __uint(pinning, LIBBPF_PIN_BY_NAME); + __uint(max_entries, 1); +} server_map SEC(".maps"); +#else /* Pin map under /sys/fs/bpf/tc/globals/ */ #define PIN_GLOBAL_NS 2 @@ -35,6 +45,7 @@ struct { .max_elem = 1, .pinning = PIN_GLOBAL_NS, }; +#endif int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c new file mode 100644 index 0000000000000000000000000000000000000000..dcf46adfda04113502c64675e18bb80487ec585c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-2.0 +#define IPROUTE2_HAVE_LIBBPF +#include "test_sk_assign.c" diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh index dbb9fcf759e0fbad7e1bd8c39a693590719a2e51..aa2eafb7b2437652e1e73745887a4794a7b596f3 100755 --- a/tools/testing/selftests/net/forwarding/ethtool.sh +++ b/tools/testing/selftests/net/forwarding/ethtool.sh @@ -286,6 +286,8 @@ different_speeds_autoneg_on() ethtool -s $h1 autoneg on } +skip_on_veth + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh index 4b42dfd4efd1a05bd51738831cac7faf6c30d576..baf831da5366c8fa4c397498c6ccf78e072c1c56 100755 --- a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh +++ b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh @@ -95,6 +95,8 @@ no_cable() ip link set dev $swp3 down } +skip_on_veth + setup_prepare tests_run diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 9605e158a0bfced24e6f60ab01f673f45842db64..dfb41db7fbe481ddf60f7b4dab88224827215a6e 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -69,6 +69,17 @@ check_tc_action_hw_stats_support() fi } +skip_on_veth() +{ + local kind=$(ip -j -d link show dev ${NETIFS[p1]} | + jq -r '.[].linkinfo.info_kind') + + if [[ $kind == veth ]]; then + echo "SKIP: Test cannot be run with veth pairs" + exit $ksft_skip + fi +} + if [[ "$(id -u)" -ne 0 ]]; then echo "SKIP: need root privileges" exit 0 @@ -121,6 +132,11 @@ create_netif_veth() for ((i = 1; i <= NUM_NETIFS; ++i)); do local j=$((i+1)) + if [ -z ${NETIFS[p$i]} ]; then + echo "SKIP: Cannot create interface. Name not specified" + exit $ksft_skip + fi + ip link show dev ${NETIFS[p$i]} &> /dev/null if [[ $? -ne 0 ]]; then ip link add ${NETIFS[p$i]} type veth \ diff --git a/tools/testing/selftests/net/forwarding/settings b/tools/testing/selftests/net/forwarding/settings new file mode 100644 index 0000000000000000000000000000000000000000..e7b9417537fbc4626153b72e8f295ab4594c844b --- /dev/null +++ b/tools/testing/selftests/net/forwarding/settings @@ -0,0 +1 @@ +timeout=0 diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh index b11d8e6b5bc145919111735dcadf9016533481eb..b7cdf75efb5f9732ddb6a9386ec34e97b09948df 100755 --- a/tools/testing/selftests/net/forwarding/tc_flower.sh +++ b/tools/testing/selftests/net/forwarding/tc_flower.sh @@ -49,8 +49,8 @@ match_dst_mac_test() tc_check_packets "dev $h2 ingress" 101 1 check_fail $? "Matched on a wrong filter" - tc_check_packets "dev $h2 ingress" 102 1 - check_err $? "Did not match on correct filter" + tc_check_packets "dev $h2 ingress" 102 0 + check_fail $? "Did not match on correct filter" tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower @@ -75,8 +75,8 @@ match_src_mac_test() tc_check_packets "dev $h2 ingress" 101 1 check_fail $? "Matched on a wrong filter" - tc_check_packets "dev $h2 ingress" 102 1 - check_err $? "Did not match on correct filter" + tc_check_packets "dev $h2 ingress" 102 0 + check_fail $? "Did not match on correct filter" tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile index 215e1067f03763075649077b3072f0a560907657..82ceca6aab965d68e718aa19bf144a96cd3e5b7d 100644 --- a/tools/testing/selftests/rseq/Makefile +++ b/tools/testing/selftests/rseq/Makefile @@ -4,8 +4,10 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) CLANG_FLAGS += -no-integrated-as endif +top_srcdir = ../../../.. + CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \ - $(CLANG_FLAGS) + $(CLANG_FLAGS) -I$(top_srcdir)/tools/include LDLIBS += -lpthread -ldl # Own dependencies because we only want to build against 1st prerequisite, but diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index 986b9458efb2643e92d1715e52a0febf8beb9b2b..e20191fb40d496a86f5f5719e16f8ad7a0c436ec 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -29,12 +29,22 @@ #include #include +#include + #include "../kselftest.h" #include "rseq.h" -static const ptrdiff_t *libc_rseq_offset_p; -static const unsigned int *libc_rseq_size_p; -static const unsigned int *libc_rseq_flags_p; +/* + * Define weak versions to play nice with binaries that are statically linked + * against a libc that doesn't support registering its own rseq. + */ +__weak ptrdiff_t __rseq_offset; +__weak unsigned int __rseq_size; +__weak unsigned int __rseq_flags; + +static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset; +static const unsigned int *libc_rseq_size_p = &__rseq_size; +static const unsigned int *libc_rseq_flags_p = &__rseq_flags; /* Offset from the thread pointer to the rseq area. */ ptrdiff_t rseq_offset; @@ -108,10 +118,19 @@ int rseq_unregister_current_thread(void) static __attribute__((constructor)) void rseq_init(void) { - libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); - libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); - libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); - if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p) { + /* + * If the libc's registered rseq size isn't already valid, it may be + * because the binary is dynamically linked and not necessarily due to + * libc not having registered a restartable sequence. Try to find the + * symbols if that's the case. + */ + if (!*libc_rseq_size_p) { + libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); + libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); + libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); + } + if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p && + *libc_rseq_size_p != 0) { /* rseq registration owned by glibc */ rseq_offset = *libc_rseq_offset_p; rseq_size = *libc_rseq_size_p;