diff --git a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml index 229af98b1d3054ae78c8525823188f7386d4789e..7cd88bc3a67d7054e85b8b1bb82dfb97c6f30408 100644 --- a/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml +++ b/Documentation/devicetree/bindings/clock/xlnx,versal-clk.yaml @@ -16,8 +16,6 @@ description: | reads required input clock frequencies from the devicetree and acts as clock provider for all clock consumers of PS clocks. -select: false - properties: compatible: const: xlnx,versal-clk diff --git a/Makefile b/Makefile index e57bc9b434d0d9ee59bf90fe57d879e9b81e9f06..0081cf02e163ad085fb87183d529b1f98cc15470 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 10 -SUBLEVEL = 194 +SUBLEVEL = 196 EXTRAVERSION = NAME = Dare mighty things diff --git a/README.OpenSource b/README.OpenSource index fb065d5bf6f9b5092606c637817077d9fce8e4bb..657f957bdbb06b7f88d2665aadd27378c689b41e 100644 --- a/README.OpenSource +++ b/README.OpenSource @@ -3,7 +3,7 @@ "Name": "linux-5.10", "License": "GPL-2.0+", "License File": "COPYING", - "Version Number": "5.10.194", + "Version Number": "5.10.196", "Owner": "liuyu82@huawei.com", "Upstream URL": "https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/log/?h=linux-5.10.y", "Description": "linux kernel 5.10" diff --git a/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts b/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts index cd797b4202ad83cfe5503a3bc2596092221ad2c3..01c48faabfade70fc319fb61ef9998450e4357a1 100644 --- a/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts +++ b/arch/arm/boot/dts/bcm4708-linksys-ea6500-v2.dts @@ -19,7 +19,8 @@ chosen { memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000>; + reg = <0x00000000 0x08000000>, + <0x88000000 0x08000000>; }; gpio-keys { diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts index ecc9d4dc707e45c5cc58e18b18b597f89f402d0e..d186b93144e38ab9fab37ea9c8a2338eed119915 100644 --- a/arch/arm/boot/dts/exynos4210-i9100.dts +++ b/arch/arm/boot/dts/exynos4210-i9100.dts @@ -170,8 +170,8 @@ lcd@0 { power-on-delay = <10>; reset-delay = <10>; - panel-width-mm = <90>; - panel-height-mm = <154>; + panel-width-mm = <56>; + panel-height-mm = <93>; display-timings { timing { diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h index 63e0b92a5fbb069bc232a93e32e17a39d6df9bd1..5882c0e29331ec900165b2cbfbe7e3a17f03449a 100644 --- a/arch/arm64/include/asm/sdei.h +++ b/arch/arm64/include/asm/sdei.h @@ -17,6 +17,9 @@ #include +DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event); +DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event); + extern unsigned long sdei_exit_mode; /* Software Delegated Exception entry point from firmware*/ @@ -29,6 +32,9 @@ asmlinkage void __sdei_asm_entry_trampoline(unsigned long event_num, unsigned long pc, unsigned long pstate); +/* Abort a running handler. Context is discarded. */ +void __sdei_handler_abort(void); + /* * The above entry point does the minimum to call C code. This function does * anything else, before calling the driver. diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 55e477f73158d6f8647c0eecc278fe8ed03d3b32..a94acea770c7c5e6acf21a2b265584980a803bff 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -1137,9 +1137,13 @@ SYM_CODE_START(__sdei_asm_handler) mov x19, x1 -#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK) + /* Store the registered-event for crash_smp_send_stop() */ ldrb w4, [x19, #SDEI_EVENT_PRIORITY] -#endif + cbnz w4, 1f + adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 + b 2f +1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 +2: str x19, [x5] #ifdef CONFIG_VMAP_STACK /* @@ -1204,6 +1208,14 @@ SYM_CODE_START(__sdei_asm_handler) ldr_l x2, sdei_exit_mode + /* Clear the registered-event seen by crash_smp_send_stop() */ + ldrb w3, [x4, #SDEI_EVENT_PRIORITY] + cbnz w3, 1f + adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 + b 2f +1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 +2: str xzr, [x5] + alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 sdei_handler_exit exit_mode=x2 alternative_else_nop_endif @@ -1214,4 +1226,15 @@ alternative_else_nop_endif #endif SYM_CODE_END(__sdei_asm_handler) NOKPROBE(__sdei_asm_handler) + +SYM_CODE_START(__sdei_handler_abort) + mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME + adr x1, 1f + ldr_l x2, sdei_exit_mode + sdei_handler_exit exit_mode=x2 + // exit the handler and jump to the next instruction. + // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx. +1: ret +SYM_CODE_END(__sdei_handler_abort) +NOKPROBE(__sdei_handler_abort) #endif /* CONFIG_ARM_SDE_INTERFACE */ diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 793c46d6a44791c11eee1600956ec3f35c54050f..0083f5afa51db67b853aedae3dab723d25abece8 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -38,6 +38,9 @@ DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); #endif +DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event); +DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event); + static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu) { unsigned long *p; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index feee5a3cd128834988a5f504acfbdc27f62b2803..ae0977b632a18ed36594ce8d8fc4f66698667bc0 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -1072,10 +1072,8 @@ void crash_smp_send_stop(void) * If this cpu is the only one alive at this point in time, online or * not, there are no stop messages to be sent around, so just back out. */ - if (num_other_online_cpus() == 0) { - sdei_mask_local_cpu(); - return; - } + if (num_other_online_cpus() == 0) + goto skip_ipi; cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); @@ -1094,7 +1092,9 @@ void crash_smp_send_stop(void) pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", cpumask_pr_args(&mask)); +skip_ipi: sdei_mask_local_cpu(); + sdei_handler_abort(); } bool smp_crash_stop_failed(void) diff --git a/arch/parisc/include/asm/led.h b/arch/parisc/include/asm/led.h index 6de13d08a3886b4dbef50cb5a9e5367715fc816a..b70b9094fb7cd2a314dabef5597a8d31fdf939b2 100644 --- a/arch/parisc/include/asm/led.h +++ b/arch/parisc/include/asm/led.h @@ -11,8 +11,8 @@ #define LED1 0x02 #define LED0 0x01 /* bottom (or furthest left) LED */ -#define LED_LAN_TX LED0 /* for LAN transmit activity */ -#define LED_LAN_RCV LED1 /* for LAN receive activity */ +#define LED_LAN_RCV LED0 /* for LAN receive activity */ +#define LED_LAN_TX LED1 /* for LAN transmit activity */ #define LED_DISK_IO LED2 /* for disk activity */ #define LED_HEARTBEAT LED3 /* heartbeat */ diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 6e2a8176b0dd087234013231fbb866ce4a163556..40135be979652a8779d38041070f9575b06a2625 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -97,7 +97,6 @@ struct cpuinfo_parisc { unsigned long cpu_loc; /* CPU location from PAT firmware */ unsigned int state; struct parisc_device *dev; - unsigned long loops_per_jiffy; }; extern struct system_cpuinfo_parisc boot_cpu_data; diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 176ef00bdd15e36d56dff44fe35296e8246d9e76..ccdbcfdfe4e21b3499b0d08478b6f1f8f620c224 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -163,7 +163,6 @@ static int __init processor_probe(struct parisc_device *dev) if (cpuid) memset(p, 0, sizeof(struct cpuinfo_parisc)); - p->loops_per_jiffy = loops_per_jiffy; p->dev = dev; /* Save IODC data in case we need it */ p->hpa = dev->hpa.start; /* save CPU hpa */ p->cpuid = cpuid; /* save CPU id */ @@ -373,10 +372,18 @@ int show_cpuinfo (struct seq_file *m, void *v) { unsigned long cpu; + char cpu_name[60], *p; + + /* strip PA path from CPU name to not confuse lscpu */ + strlcpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name)); + p = strrchr(cpu_name, '['); + if (p) + *(--p) = 0; for_each_online_cpu(cpu) { - const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); #ifdef CONFIG_SMP + const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); + if (0 == cpuinfo->hpa) continue; #endif @@ -421,8 +428,7 @@ show_cpuinfo (struct seq_file *m, void *v) seq_printf(m, "model\t\t: %s - %s\n", boot_cpu_data.pdc.sys_model_name, - cpuinfo->dev ? - cpuinfo->dev->name : "Unknown"); + cpu_name); seq_printf(m, "hversion\t: 0x%08x\n" "sversion\t: 0x%08x\n", @@ -433,8 +439,8 @@ show_cpuinfo (struct seq_file *m, void *v) show_cache_info(m); seq_printf(m, "bogomips\t: %lu.%02lu\n", - cpuinfo->loops_per_jiffy / (500000 / HZ), - (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100); + loops_per_jiffy / (500000 / HZ), + loops_per_jiffy / (5000 / HZ) % 100); seq_printf(m, "software id\t: %ld\n\n", boot_cpu_data.pdc.model.sw_id); diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 6da06905ddce524d963ba6c31507591d9c0cd4b4..c469e8848d659d1d7be24d51950ebefdfed3ca8f 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -501,6 +501,8 @@ static struct attribute_group ipl_ccw_attr_group_lpar = { static struct attribute *ipl_unknown_attrs[] = { &sys_ipl_type_attr.attr, + &sys_ipl_secure_attr.attr, + &sys_ipl_has_secure_attr.attr, NULL, }; diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c index bac8a058ebd7cd4cad1065700ed6a3fbb52862fc..05bd42dde107bb10e2a264b7a02840327529483e 100644 --- a/arch/sh/boards/mach-ap325rxa/setup.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c @@ -530,7 +530,7 @@ static int __init ap325rxa_devices_setup(void) device_initialize(&ap325rxa_ceu_device.dev); dma_declare_coherent_memory(&ap325rxa_ceu_device.dev, ceu_dma_membase, ceu_dma_membase, - ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1); + CEU_BUFFER_MEMORY_SIZE); platform_device_add(&ap325rxa_ceu_device); diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index bab91a99124e1fb47b65c7695749f56436f45c20..9730a992dab3378ce141a5842c4f08ee549c5a3b 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -1454,15 +1454,13 @@ static int __init arch_setup(void) device_initialize(&ecovec_ceu_devices[0]->dev); dma_declare_coherent_memory(&ecovec_ceu_devices[0]->dev, ceu0_dma_membase, ceu0_dma_membase, - ceu0_dma_membase + - CEU_BUFFER_MEMORY_SIZE - 1); + CEU_BUFFER_MEMORY_SIZE); platform_device_add(ecovec_ceu_devices[0]); device_initialize(&ecovec_ceu_devices[1]->dev); dma_declare_coherent_memory(&ecovec_ceu_devices[1]->dev, ceu1_dma_membase, ceu1_dma_membase, - ceu1_dma_membase + - CEU_BUFFER_MEMORY_SIZE - 1); + CEU_BUFFER_MEMORY_SIZE); platform_device_add(ecovec_ceu_devices[1]); gpiod_add_lookup_table(&cn12_power_gpiod_table); diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index eeb5ce341efdd705cb22cc6bdbec6956ac9364a8..4a1caa3e7cf5afd40a333143fbfcad463f00eb92 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -603,7 +603,7 @@ static int __init kfr2r09_devices_setup(void) device_initialize(&kfr2r09_ceu_device.dev); dma_declare_coherent_memory(&kfr2r09_ceu_device.dev, ceu_dma_membase, ceu_dma_membase, - ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1); + CEU_BUFFER_MEMORY_SIZE); platform_device_add(&kfr2r09_ceu_device); diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index 6703a2122c0d6bc3fabee59daa1347a57870ab9a..bd4ccd9f8dd0691098c80613e5ceb4caa38a33ac 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c @@ -604,7 +604,7 @@ static int __init migor_devices_setup(void) device_initialize(&migor_ceu_device.dev); dma_declare_coherent_memory(&migor_ceu_device.dev, ceu_dma_membase, ceu_dma_membase, - ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1); + CEU_BUFFER_MEMORY_SIZE); platform_device_add(&migor_ceu_device); diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 8d6541ba01865b3e0c42de965c90910c97b0f590..edc7712e4a8043fbe5543faf691942862da58654 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -940,15 +940,13 @@ static int __init devices_setup(void) device_initialize(&ms7724se_ceu_devices[0]->dev); dma_declare_coherent_memory(&ms7724se_ceu_devices[0]->dev, ceu0_dma_membase, ceu0_dma_membase, - ceu0_dma_membase + - CEU_BUFFER_MEMORY_SIZE - 1); + CEU_BUFFER_MEMORY_SIZE); platform_device_add(ms7724se_ceu_devices[0]); device_initialize(&ms7724se_ceu_devices[1]->dev); dma_declare_coherent_memory(&ms7724se_ceu_devices[1]->dev, ceu1_dma_membase, ceu1_dma_membase, - ceu1_dma_membase + - CEU_BUFFER_MEMORY_SIZE - 1); + CEU_BUFFER_MEMORY_SIZE); platform_device_add(ms7724se_ceu_devices[1]); return platform_add_devices(ms7724se_devices, diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 8eefa3386d8cefd655e4b5c3f8e98e446d2a356e..331474296e6f1c83d4e4d8aca7c15bfa1cc98bbd 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -95,12 +95,6 @@ static inline int cpu_has_svm(const char **msg) return 0; } - if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) { - if (msg) - *msg = "can't execute cpuid_8000000a"; - return 0; - } - if (!boot_cpu_has(X86_FEATURE_SVM)) { if (msg) *msg = "svm not available"; diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 7fd56df8b91944cd586e361eca840dac4110c1f9..b8135c38f584f6ddb0f51ffb1a22a235bdb84032 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -128,6 +128,15 @@ int x509_check_for_self_signed(struct x509_certificate *cert) goto out; } + ret = -EKEYREJECTED; + if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0) + goto out; + + if (cert->unsupported_sig) { + ret = 0; + goto out; + } + ret = public_key_verify_signature(cert->pub, cert->sig); if (ret < 0) { if (ret == -ENOPKG) { diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 9bdb5bd5fda63cb7295dcf9462cec57d7922ec4b..8678e162181f4decfb49be9bd9ec49978a2910f3 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1457,33 +1457,35 @@ static struct platform_driver ghes_platform_driver = { .remove = ghes_remove, }; -static int __init ghes_init(void) +void __init ghes_init(void) { int rc; + sdei_init(); + if (acpi_disabled) - return -ENODEV; + return; switch (hest_disable) { case HEST_NOT_FOUND: - return -ENODEV; + return; case HEST_DISABLED: pr_info(GHES_PFX "HEST is not enabled!\n"); - return -EINVAL; + return; default: break; } if (ghes_disable) { pr_info(GHES_PFX "GHES is not enabled!\n"); - return -EINVAL; + return; } ghes_nmi_init_cxt(); rc = platform_driver_register(&ghes_platform_driver); if (rc) - goto err; + return; rc = apei_osc_setup(); if (rc == 0 && osc_sb_apei_support_acked) @@ -1494,9 +1496,4 @@ static int __init ghes_init(void) pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n"); else pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); - - return 0; -err: - return rc; } -device_initcall(ghes_init); diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 5e14288fcabe904e493e7b68af3968cccddc5f80..60dfe63301d008aba4b2644928b2a39240ec71f4 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -1252,6 +1252,8 @@ static int __init acpi_init(void) pci_mmcfg_late_init(); acpi_iort_init(); + acpi_hest_init(); + ghes_init(); acpi_scan_init(); acpi_ec_init(); acpi_debugfs_init(); diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index c12b5fb3e8fbaae9c8343730c450d6ba9ebd571a..d972ea057a0351a101b50e1fa824f2a39b4df932 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -20,8 +20,6 @@ #include #include #include -#include /* for acpi_hest_init() */ - #include "internal.h" #define ACPI_PCI_ROOT_CLASS "pci_bridge" @@ -950,7 +948,6 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, void __init acpi_pci_root_init(void) { - acpi_hest_init(); if (acpi_pci_disabled) return; diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c index 34cb104f6b43e5cf237f0c93a4ae9ea87e7bb3c9..bc30e2f305beb93131239bbf7a99f80519c10602 100644 --- a/drivers/ata/pata_ftide010.c +++ b/drivers/ata/pata_ftide010.c @@ -570,6 +570,7 @@ static struct platform_driver pata_ftide010_driver = { }; module_platform_driver(pata_ftide010_driver); +MODULE_DESCRIPTION("low level driver for Faraday Technology FTIDE010"); MODULE_AUTHOR("Linus Walleij "); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c index f793564f3d7876b663709385ae90bd4e93e11b79..6fd54e968d10ad0667a4e20c3d3c987444c88524 100644 --- a/drivers/ata/sata_gemini.c +++ b/drivers/ata/sata_gemini.c @@ -435,6 +435,7 @@ static struct platform_driver gemini_sata_driver = { }; module_platform_driver(gemini_sata_driver); +MODULE_DESCRIPTION("low level driver for Cortina Systems Gemini SATA bridge"); MODULE_AUTHOR("Linus Walleij "); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index 7d69b740b9f93efb80a9261ba86a03c974ff227e..fe8ecd6eaa4d17eb43732e32ae0a2f148700c788 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -490,6 +490,10 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, u32 in_reset = -1; unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); + /* Skip MHI RESET if in RDDM state */ + if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM) + goto skip_mhi_reset; + dev_dbg(dev, "Triggering MHI Reset in device\n"); mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); @@ -515,6 +519,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); } +skip_mhi_reset: dev_dbg(dev, "Waiting for all pending event ring processing to complete\n"); mhi_event = mhi_cntrl->mhi_event; diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c index aba36e4217d2d846ae04ec6b7664cd7caee1d44d..e46311c2e63ea872b7fa897ca5656d4a4195e03e 100644 --- a/drivers/clk/imx/clk-pll14xx.c +++ b/drivers/clk/imx/clk-pll14xx.c @@ -60,8 +60,6 @@ static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = { PLL_1443X_RATE(650000000U, 325, 3, 2, 0), PLL_1443X_RATE(594000000U, 198, 2, 2, 0), PLL_1443X_RATE(519750000U, 173, 2, 2, 16384), - PLL_1443X_RATE(393216000U, 262, 2, 3, 9437), - PLL_1443X_RATE(361267200U, 361, 3, 3, 17511), }; struct imx_pll14xx_clk imx_1443x_pll = { diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c index 8bed02a748aba8f1cb49e0583a49bc88b55e432a..470a277603a92db374bebb82725fac0d7ca2b11f 100644 --- a/drivers/clk/qcom/gcc-mdm9615.c +++ b/drivers/clk/qcom/gcc-mdm9615.c @@ -58,7 +58,7 @@ static struct clk_regmap pll0_vote = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "pll0_vote", - .parent_names = (const char *[]){ "pll8" }, + .parent_names = (const char *[]){ "pll0" }, .num_parents = 1, .ops = &clk_pll_vote_ops, }, diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c index 4153150e20db52e18b01f2e12f75760064ddd8ad..f644c5e325fb2def542ce0e9000ec2ba1c040130 100644 --- a/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -434,7 +434,11 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv) if (ret) return ERR_PTR(ret); - table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1, sizeof(*table), + /* + * We allocate space for the 5 different P-STATES AVS, + * plus extra space for a terminating element. + */ + table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1 + 1, sizeof(*table), GFP_KERNEL); if (!table) return ERR_PTR(-ENOMEM); diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 2f2a426a6cd5c33c93b39c5a65685f719d6fd3f8..37fde13b80dde7499ea8fdcd9e252da07ef129f8 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -564,9 +564,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) } for_each_sg(rctx->sg, tsg, rctx->nents, i) { + sg[0] = *tsg; len = sg->length; - sg[0] = *tsg; if (sg_is_last(sg)) { if (hdev->dma_mode == 1) { len = (ALIGN(sg->length, 16) - 16); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index c08968c5ddf8c458c35fc1fc5c5c84412aad4bdb..807c5320dc0fff01267fac488fca15992d0d85f7 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -72,6 +72,7 @@ config ARM_SCPI_POWER_DOMAIN config ARM_SDE_INTERFACE bool "ARM Software Delegated Exception Interface (SDEI)" depends on ARM64 + depends on ACPI_APEI_GHES help The Software Delegated Exception Interface (SDEI) is an ARM standard for registering callbacks from the platform firmware diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 5a877d76078f7271997281050bab8d39a9e5d4de..68e55ca7491e5250577e43342adba5c9ad653fa1 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -1063,14 +1063,14 @@ static bool __init sdei_present_acpi(void) return true; } -static int __init sdei_init(void) +void __init sdei_init(void) { struct platform_device *pdev; int ret; ret = platform_driver_register(&sdei_driver); if (ret || !sdei_present_acpi()) - return ret; + return; pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL, 0); @@ -1080,17 +1080,8 @@ static int __init sdei_init(void) pr_info("Failed to register ACPI:SDEI platform device %d\n", ret); } - - return ret; } -/* - * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register - * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised - * by device_initcall(). We want to be called in the middle. - */ -subsys_initcall_sync(sdei_init); - int sdei_event_handler(struct pt_regs *regs, struct sdei_registered_event *arg) { @@ -1118,3 +1109,22 @@ int sdei_event_handler(struct pt_regs *regs, return err; } NOKPROBE_SYMBOL(sdei_event_handler); + +void sdei_handler_abort(void) +{ + /* + * If the crash happened in an SDEI event handler then we need to + * finish the handler with the firmware so that we can have working + * interrupts in the crash kernel. + */ + if (__this_cpu_read(sdei_active_critical_event)) { + pr_warn("still in SDEI critical event context, attempting to finish handler.\n"); + __sdei_handler_abort(); + __this_cpu_write(sdei_active_critical_event, NULL); + } + if (__this_cpu_read(sdei_active_normal_event)) { + pr_warn("still in SDEI normal event context, attempting to finish handler.\n"); + __sdei_handler_abort(); + __this_cpu_write(sdei_active_normal_event, NULL); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 855682590c1bb2f872451bbce2376f603024097a..fd08177de595ca2835a5ffbcb72da55378601535 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -206,8 +206,9 @@ struct mpcc *mpc1_insert_plane( /* check insert_above_mpcc exist in tree->opp_list */ struct mpcc *temp_mpcc = tree->opp_list; - while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) - temp_mpcc = temp_mpcc->mpcc_bot; + if (temp_mpcc != insert_above_mpcc) + while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc) + temp_mpcc = temp_mpcc->mpcc_bot; if (temp_mpcc == NULL) return NULL; } diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index d988533d4af5f4a43936ed1bbfacdd8a6643f85c..627d578175cf958ec9eb530aede76fa4f5e43a11 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -327,7 +327,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync, * - Delta for CEIL: delta_from_mid_point_in_us_1 * - Delta for FLOOR: delta_from_mid_point_in_us_2 */ - if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) { + if (mid_point_frames_ceil && + (last_render_time_in_us / mid_point_frames_ceil) < + in_out_vrr->min_duration_in_us) { /* Check for out of range. * If using CEIL produces a value that is out of range, * then we are forced to use FLOOR. @@ -374,8 +376,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync, /* Either we've calculated the number of frames to insert, * or we need to insert min duration frames */ - if (last_render_time_in_us / frames_to_insert < - in_out_vrr->min_duration_in_us){ + if (frames_to_insert && + (last_render_time_in_us / frames_to_insert) < + in_out_vrr->min_duration_in_us){ frames_to_insert -= (frames_to_insert > 1) ? 1 : 0; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 8902c2f84bf99ab0def4b95870cfcc1816d66b22..1bc05bf0f2320c0060af593ef0dc683dcdc59ba3 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -290,7 +290,7 @@ static void ast_init_dram_reg(struct drm_device *dev) ; } while (ast_read32(ast, 0x10100) != 0xa8); } else {/* AST2100/1100 */ - if (ast->chip == AST2100 || ast->chip == 2200) + if (ast->chip == AST2100 || ast->chip == AST2200) dram_reg_info = ast2100_dram_table_data; else dram_reg_info = ast1100_dram_table_data; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 0201f9b5f87e77d15df2d709686fdad2c11fa644..0d31a0db305d5ae2a19992d2c0766f6e8af8f66f 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -636,9 +636,18 @@ static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + unsigned long offset = index; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); + if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { + offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT); + mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64; + } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { + offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT); + mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64; + } + pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu); } @@ -1953,6 +1962,21 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) return ERR_PTR(-ENOMEM); } + mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); + if (!mm->ggtt_mm.host_ggtt_aperture) { + vfree(mm->ggtt_mm.virtual_ggtt); + vgpu_free_mm(mm); + return ERR_PTR(-ENOMEM); + } + + mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); + if (!mm->ggtt_mm.host_ggtt_hidden) { + vfree(mm->ggtt_mm.host_ggtt_aperture); + vfree(mm->ggtt_mm.virtual_ggtt); + vgpu_free_mm(mm); + return ERR_PTR(-ENOMEM); + } + return mm; } @@ -1980,6 +2004,8 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) invalidate_ppgtt_mm(mm); } else { vfree(mm->ggtt_mm.virtual_ggtt); + vfree(mm->ggtt_mm.host_ggtt_aperture); + vfree(mm->ggtt_mm.host_ggtt_hidden); } vgpu_free_mm(mm); @@ -2845,19 +2871,39 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) } /** - * intel_vgpu_reset_gtt - reset the all GTT related status - * @vgpu: a vGPU + * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries + * @gvt: intel gvt device * - * This function is called from vfio core to reset reset all - * GTT related status, including GGTT, PPGTT, scratch page. + * This function is called at driver resume stage to restore + * GGTT entries of every vGPU. * */ -void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) +void intel_gvt_restore_ggtt(struct intel_gvt *gvt) { - /* Shadow pages are only created when there is no page - * table tracking data, so remove page tracking data after - * removing the shadow pages. - */ - intel_vgpu_destroy_all_ppgtt_mm(vgpu); - intel_vgpu_reset_ggtt(vgpu, true); + struct intel_vgpu *vgpu; + struct intel_vgpu_mm *mm; + int id; + gen8_pte_t pte; + u32 idx, num_low, num_hi, offset; + + /* Restore dirty host ggtt for all vGPUs */ + idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { + mm = vgpu->gtt.ggtt_mm; + + num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; + offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; + for (idx = 0; idx < num_low; idx++) { + pte = mm->ggtt_mm.host_ggtt_aperture[idx]; + if (pte & _PAGE_PRESENT) + write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); + } + + num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; + offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; + for (idx = 0; idx < num_hi; idx++) { + pte = mm->ggtt_mm.host_ggtt_hidden[idx]; + if (pte & _PAGE_PRESENT) + write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); + } + } } diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 52d0d88abd86a02f06cc8098e6fa832297715c9a..89ffb52cafa04527d17cca0639b3bf4da02f8e76 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -164,6 +164,9 @@ struct intel_vgpu_mm { } ppgtt_mm; struct { void *virtual_ggtt; + /* Save/restore for PM */ + u64 *host_ggtt_aperture; + u64 *host_ggtt_hidden; struct list_head partial_pte_list; } ggtt_mm; }; @@ -212,7 +215,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); int intel_gvt_init_gtt(struct intel_gvt *gvt); -void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu); void intel_gvt_clean_gtt(struct intel_gvt *gvt); struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, @@ -280,5 +282,6 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes); void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu); +void intel_gvt_restore_ggtt(struct intel_gvt *gvt); #endif /* _GVT_GTT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 5c9ef8e58a08796a415bc3ac454f268a9b5a37a1..87f22a88925ce2521b56ceb2e0d245973418d15a 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -405,6 +405,15 @@ int intel_gvt_init_device(struct drm_i915_private *i915) return ret; } +int +intel_gvt_pm_resume(struct intel_gvt *gvt) +{ + intel_gvt_restore_fence(gvt); + intel_gvt_restore_mmio(gvt); + intel_gvt_restore_ggtt(gvt); + return 0; +} + int intel_gvt_register_hypervisor(struct intel_gvt_mpt *m) { diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index a81cf0f01e78eec1f510b434b49875f8720f4b49..b3d6355dd797dd2d35f5546dfadba146133c28d4 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -255,6 +255,8 @@ struct intel_gvt_mmio { #define F_CMD_ACCESS (1 << 3) /* This reg has been accessed by a VM */ #define F_ACCESSED (1 << 4) +/* This reg requires save & restore during host PM suspend/resume */ +#define F_PM_SAVE (1 << 5) /* This reg could be accessed by unaligned address */ #define F_UNALIGN (1 << 6) /* This reg is in GVT's mmio save-restor list and in hardware @@ -685,6 +687,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); +int intel_gvt_pm_resume(struct intel_gvt *gvt); #include "trace.h" #include "mpt.h" diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 606e6c315fe244d3b35bb4dbd10989a5b86cf28b..55ce7aaabf893ce888b24d1cdd23f403457c167b 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3135,9 +3135,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS, - NULL, gen9_trtte_write); - MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); + MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE, + NULL, gen9_trtte_write); + MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE, + NULL, gen9_trtt_chicken_write); MMIO_D(_MMIO(0x46430), D_SKL_PLUS); @@ -3686,3 +3687,40 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) : intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes); } + +void intel_gvt_restore_fence(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + int i, id; + + idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { + mmio_hw_access_pre(gvt->gt); + for (i = 0; i < vgpu_fence_sz(vgpu); i++) + intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i))); + mmio_hw_access_post(gvt->gt); + } +} + +static inline int mmio_pm_restore_handler(struct intel_gvt *gvt, + u32 offset, void *data) +{ + struct intel_vgpu *vgpu = data; + struct drm_i915_private *dev_priv = gvt->gt->i915; + + if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE) + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + + return 0; +} + +void intel_gvt_restore_mmio(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + int id; + + idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { + mmio_hw_access_pre(gvt->gt); + intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu); + mmio_hw_access_post(gvt->gt); + } +} diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index cc4812648bf4a5ea7a71b79af4e84141fc82f0b1..9e862dc73579bb0ab848df67abbc8b3396a37309 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -104,4 +104,8 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); + +void intel_gvt_restore_fence(struct intel_gvt *gvt); +void intel_gvt_restore_mmio(struct intel_gvt *gvt); + #endif diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 99fe8aef1c67f09d9a40beddf077418d4c9b2d5e..4e70c1a9ef2ed969a37d8a4eeb402a3089a185a7 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -24,6 +24,7 @@ #include "i915_drv.h" #include "i915_vgpu.h" #include "intel_gvt.h" +#include "gvt/gvt.h" /** * DOC: Intel GVT-g host support @@ -147,3 +148,17 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) intel_gvt_clean_device(dev_priv); } + +/** + * intel_gvt_resume - GVT resume routine wapper + * + * @dev_priv: drm i915 private * + * + * This function is called at the i915 driver resume stage to restore required + * HW status for GVT so that vGPU can continue running after resumed. + */ +void intel_gvt_resume(struct drm_i915_private *dev_priv) +{ + if (intel_gvt_active(dev_priv)) + intel_gvt_pm_resume(dev_priv->gvt); +} diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h index 502fad8a8652c81c6721ea6f37512e421987c276..d7d3fb6186fdd141c93d46142db66e5552fdc0bb 100644 --- a/drivers/gpu/drm/i915/intel_gvt.h +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -33,6 +33,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv); void intel_gvt_clean_device(struct drm_i915_private *dev_priv); int intel_gvt_init_host(void); void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv); +void intel_gvt_resume(struct drm_i915_private *dev_priv); #else static inline int intel_gvt_init(struct drm_i915_private *dev_priv) { @@ -46,6 +47,10 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) { } + +static inline void intel_gvt_resume(struct drm_i915_private *dev_priv) +{ +} #endif #endif /* _INTEL_GVT_H_ */ diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index d18ca119929e1f054b8e37e872a941e19b56d775..b28302836b2e9eb438090ef991a17c171c5d928d 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -2512,8 +2512,6 @@ backlog_store(struct mddev *mddev, const char *buf, size_t len) mddev_destroy_serial_pool(mddev, NULL, false); } else if (backlog && !mddev->serial_info_pool) { /* serial_info_pool is needed since backlog is not zero */ - struct md_rdev *rdev; - rdev_for_each(rdev, mddev) mddev_create_serial_pool(mddev, rdev, false); } diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 64c8c177d008286c5455b23cecc0c7b8d11260cd..e170c545fec50205ee6dcac29d539ef4adc30cac 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -1040,6 +1040,14 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, cpu_relax(); } while (time_after(limit, jiffies)); + /* + * do a final check after time out in case the CPU was busy and the driver + * did not get enough time to perform the polling to avoid false alarms + */ + val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); + if ((val & mask) == expected_val) + return 0; + dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n", expected_val, val & mask); @@ -1429,19 +1437,33 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i, const u8 *oob, int sas, int sector_1k) { int tbytes = sas << sector_1k; - int j; + int j, k = 0; + u32 last = 0xffffffff; + u8 *plast = (u8 *)&last; /* Adjust OOB values for 1K sector size */ if (sector_1k && (i & 0x01)) tbytes = max(0, tbytes - (int)ctrl->max_oob); tbytes = min_t(int, tbytes, ctrl->max_oob); - for (j = 0; j < tbytes; j += 4) + /* + * tbytes may not be multiple of words. Make sure we don't read out of + * the boundary and stop at last word. + */ + for (j = 0; (j + 3) < tbytes; j += 4) oob_reg_write(ctrl, j, (oob[j + 0] << 24) | (oob[j + 1] << 16) | (oob[j + 2] << 8) | (oob[j + 3] << 0)); + + /* handle the remaing bytes */ + while (j < tbytes) + plast[k++] = oob[j++]; + + if (tbytes & 0x3) + oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last)); + return tbytes; } @@ -1543,7 +1565,17 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr); - BUG_ON(ctrl->cmd_pending != 0); + /* + * If we came here through _panic_write and there is a pending + * command, try to wait for it. If it times out, rather than + * hitting BUG_ON, just return so we don't crash while crashing. + */ + if (oops_in_progress) { + if (ctrl->cmd_pending && + bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0)) + return; + } else + BUG_ON(ctrl->cmd_pending != 0); ctrl->cmd_pending = cmd; ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index c03d76c1086865ed296140201df6d18fff80c3ee..4362fe0f346d2ca202e18a7ece77a558294628a1 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -1691,6 +1691,18 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port, #define BYTES_PER_KBIT (1000LL / 8) +static int sja1105_find_cbs_shaper(struct sja1105_private *priv, + int port, int prio) +{ + int i; + + for (i = 0; i < priv->info->num_cbs_shapers; i++) + if (priv->cbs[i].port == port && priv->cbs[i].prio == prio) + return i; + + return -1; +} + static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv) { int i; @@ -1725,14 +1737,20 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port, { struct sja1105_private *priv = ds->priv; struct sja1105_cbs_entry *cbs; + s64 port_transmit_rate_kbps; int index; if (!offload->enable) return sja1105_delete_cbs_shaper(priv, port, offload->queue); - index = sja1105_find_unused_cbs_shaper(priv); - if (index < 0) - return -ENOSPC; + /* The user may be replacing an existing shaper */ + index = sja1105_find_cbs_shaper(priv, port, offload->queue); + if (index < 0) { + /* That isn't the case - see if we can allocate a new one */ + index = sja1105_find_unused_cbs_shaper(priv); + if (index < 0) + return -ENOSPC; + } cbs = &priv->cbs[index]; cbs->port = port; @@ -1742,9 +1760,17 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port, */ cbs->credit_hi = offload->hicredit; cbs->credit_lo = abs(offload->locredit); - /* User space is in kbits/sec, hardware in bytes/sec */ - cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT; - cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT); + /* User space is in kbits/sec, while the hardware in bytes/sec times + * link speed. Since the given offload->sendslope is good only for the + * current link speed anyway, and user space is likely to reprogram it + * when that changes, don't even bother to track the port's link speed, + * but deduce the port transmit rate from idleslope - sendslope. + */ + port_transmit_rate_kbps = offload->idleslope - offload->sendslope; + cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT, + port_transmit_rate_kbps); + cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT), + port_transmit_rate_kbps); /* Convert the negative values from 64-bit 2's complement * to 32-bit 2's complement (for the case of 0x80000000 whose * negative is still negative). diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index cd0d7a546957a105874d36cdc27f8e766b0be767..d35f4b2b480e6af139beb6e786531b6a2109163e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -704,7 +704,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev, hns3_get_ksettings(h, cmd); break; case HNAE3_MEDIA_TYPE_FIBER: - if (module_type == HNAE3_MODULE_TYPE_CR) + if (module_type == HNAE3_MODULE_TYPE_UNKNOWN) + cmd->base.port = PORT_OTHER; + else if (module_type == HNAE3_MODULE_TYPE_CR) cmd->base.port = PORT_DA; else cmd->base.port = PORT_FIBRE; diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index e6d2800a8abc5767c970d95ca7df6c2bb90f6160..da0e3897e6831d4cea3fffdd25e5ef468a2a8259 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -34,11 +34,11 @@ struct igb_adapter; /* TX/RX descriptor defines */ #define IGB_DEFAULT_TXD 256 #define IGB_DEFAULT_TX_WORK 128 -#define IGB_MIN_TXD 80 +#define IGB_MIN_TXD 64 #define IGB_MAX_TXD 4096 #define IGB_DEFAULT_RXD 256 -#define IGB_MIN_RXD 80 +#define IGB_MIN_RXD 64 #define IGB_MAX_RXD 4096 #define IGB_DEFAULT_ITR 3 /* dynamic */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1143800c889ac73362a206f1e2f9f320e039619b..01176c86be1254a1c867e994e2558547ec647e48 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3857,8 +3857,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter) struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; - /* Virtualization features not supported on i210 family. */ - if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) + /* Virtualization features not supported on i210 and 82580 family. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) || + (hw->mac.type == e1000_82580)) return; /* Of the below we really only want the effect of getting diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index 975eb47ee04dff81fcd3a98e0998fa7833d4f9cf..b39fca9827dc2733afd3f8ff48d68e13713083d4 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -39,11 +39,11 @@ enum latency_range { /* Tx/Rx descriptor defines */ #define IGBVF_DEFAULT_TXD 256 #define IGBVF_MAX_TXD 4096 -#define IGBVF_MIN_TXD 80 +#define IGBVF_MIN_TXD 64 #define IGBVF_DEFAULT_RXD 256 #define IGBVF_MAX_RXD 4096 -#define IGBVF_MIN_RXD 80 +#define IGBVF_MIN_RXD 64 #define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ #define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 33f64c80335d3569fcfa30bc9f1cb618437f840c..31af08ceb36b92bdd6c70e997fe34649db4e521c 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -319,11 +319,11 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc) /* TX/RX descriptor defines */ #define IGC_DEFAULT_TXD 256 #define IGC_DEFAULT_TX_WORK 128 -#define IGC_MIN_TXD 80 +#define IGC_MIN_TXD 64 #define IGC_MAX_TXD 4096 #define IGC_DEFAULT_RXD 256 -#define IGC_MIN_RXD 80 +#define IGC_MIN_RXD 64 #define IGC_MAX_RXD 4096 /* Supported Rx Buffer Sizes */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 8b7f3003557109d46e3ca75a817cb8c93f906812..3eb2c05361e804c0e8d3420a8b6f6c3518e150f3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -989,6 +989,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + u32 aflags = adapter->flags; bool is_l2 = false; u32 regval; @@ -1009,20 +1010,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; tsync_rx_mtrl = 0; - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -1036,8 +1037,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | + IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_NTP_ALL: @@ -1048,7 +1049,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, if (hw->mac.type >= ixgbe_mac_X550) { tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL; config->rx_filter = HWTSTAMP_FILTER_ALL; - adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; break; } fallthrough; @@ -1059,8 +1060,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, * Delay_Req messages and hardware does not support * timestamping all packets => return error */ - adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | - IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } @@ -1092,8 +1091,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_TSYNCRXCTL_TYPE_ALL | IXGBE_TSYNCRXCTL_TSIP_UT_EN; config->rx_filter = HWTSTAMP_FILTER_ALL; - adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; - adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; + aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; + aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; is_l2 = true; break; default: @@ -1126,6 +1125,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, IXGBE_WRITE_FLUSH(hw); + /* configure adapter flags only when HW is actually configured */ + adapter->flags = aflags; + /* clear TX/RX time stamp registers, just to be sure */ ixgbe_ptp_clear_tx_timestamp(adapter); IXGBE_READ_REG(hw, IXGBE_RXSTMPH); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 68c5ed8716c84a46504f6f0c4388629778ebed4d..e0e6275b3e20cb8decc42f23c80fd56365461fc0 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5201,6 +5201,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev, break; case ETHTOOL_GRXCLSRLALL: for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) { + if (loc == info->rule_cnt) { + ret = -EMSGSIZE; + break; + } + if (port->rfs_rules[i]) rules[loc++] = i; } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index a8319295f1ab201e4627621456353b217870fab4..aa9e616cc1d59313375bff4f934c30e982b7ce84 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2013,6 +2013,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev, int i; for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + if (mac->hwlro_ip[i]) { rule_locs[cnt] = i; cnt++; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index f9a79d67d6d4ffc17fa37751ffb4daf3a2148725..cc7c86debfa271bb9529d79fb06de48ad988f8b8 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -2439,6 +2439,9 @@ static int r8152_poll(struct napi_struct *napi, int budget) struct r8152 *tp = container_of(napi, struct r8152, napi); int work_done; + if (!budget) + return 0; + work_done = rx_bottom(tp, budget); if (work_done < budget) { diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 222fbd8016e59465f865d97e682cc22e05032959..d0e66a732b215b683beac24168d035ba9f99e8aa 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -284,6 +284,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct veth_rq *rq = NULL; + int ret = NETDEV_TX_OK; struct net_device *rcv; int length = skb->len; bool rcv_xdp = false; @@ -310,6 +311,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) } else { drop: atomic64_inc(&priv->dropped); + ret = NET_XMIT_DROP; } if (rcv_xdp) @@ -317,7 +319,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) rcu_read_unlock(); - return NETDEV_TX_OK; + return ret; } static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes) diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index 4854120fc095409bb24920435ee552511f801ec0..81e5e7a20b94ee3f4d1d5a9df10e7d1429a26fef 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -56,8 +56,8 @@ static int led_type __read_mostly = -1; static unsigned char lastleds; /* LED state from most recent update */ static unsigned int led_heartbeat __read_mostly = 1; -static unsigned int led_diskio __read_mostly = 1; -static unsigned int led_lanrxtx __read_mostly = 1; +static unsigned int led_diskio __read_mostly; +static unsigned int led_lanrxtx __read_mostly; static char lcd_text[32] __read_mostly; static char lcd_text_default[32] __read_mostly; static int lcd_no_led_support __read_mostly = 0; /* KittyHawk doesn't support LED on its LCD */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 1193c81f889642ff69c1060799ac5a1c23f4ef7c..c0d11348119153bc34f5b31a4e6ed37f2e334d01 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3578,7 +3578,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev) */ static void quirk_nvidia_no_bus_reset(struct pci_dev *dev) { - if ((dev->device & 0xffc0) == 0x2340 || dev->device == 0x1eb8) + if ((dev->device & 0xffc0) == 0x2340) quirk_no_bus_reset(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 44caada37b71f698bb4c5396b9b324a3dabf5744..18b85ae84ed79aa07445d7eb3c3b5ca68da5a20f 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1625,7 +1625,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev) const struct intel_pinctrl_soc_data *soc_data; struct intel_community *community; struct device *dev = &pdev->dev; - struct acpi_device *adev = ACPI_COMPANION(dev); struct intel_pinctrl *pctrl; acpi_status status; int ret, irq; @@ -1688,7 +1687,7 @@ static int chv_pinctrl_probe(struct platform_device *pdev) if (ret) return ret; - status = acpi_install_address_space_handler(adev->handle, + status = acpi_install_address_space_handler(ACPI_HANDLE(dev), community->acpi_space_id, chv_pinctrl_mmio_access_handler, NULL, pctrl); @@ -1705,7 +1704,7 @@ static int chv_pinctrl_remove(struct platform_device *pdev) struct intel_pinctrl *pctrl = platform_get_drvdata(pdev); const struct intel_community *community = &pctrl->communities[0]; - acpi_remove_address_space_handler(ACPI_COMPANION(&pdev->dev), + acpi_remove_address_space_handler(ACPI_HANDLE(&pdev->dev), community->acpi_space_id, chv_pinctrl_mmio_access_handler); diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index 64d22ecf3cddd71c96b0191185997ea4d5b66c33..194f3205e55974ba6eeb5cc28defa681b5cfce4a 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -56,6 +56,7 @@ struct mlxbf_tmfifo; * @vq: pointer to the virtio virtqueue * @desc: current descriptor of the pending packet * @desc_head: head descriptor of the pending packet + * @drop_desc: dummy desc for packet dropping * @cur_len: processed length of the current descriptor * @rem_len: remaining length of the pending packet * @pkt_len: total length of the pending packet @@ -72,6 +73,7 @@ struct mlxbf_tmfifo_vring { struct virtqueue *vq; struct vring_desc *desc; struct vring_desc *desc_head; + struct vring_desc drop_desc; int cur_len; int rem_len; u32 pkt_len; @@ -83,6 +85,14 @@ struct mlxbf_tmfifo_vring { struct mlxbf_tmfifo *fifo; }; +/* Check whether vring is in drop mode. */ +#define IS_VRING_DROP(_r) ({ \ + typeof(_r) (r) = (_r); \ + (r->desc_head == &r->drop_desc ? true : false); }) + +/* A stub length to drop maximum length packet. */ +#define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0) + /* Interrupt types. */ enum { MLXBF_TM_RX_LWM_IRQ, @@ -195,7 +205,7 @@ static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = { static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr"; /* Maximum L2 header length. */ -#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36 +#define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN) /* Supported virtio-net features. */ #define MLXBF_TMFIFO_NET_FEATURES \ @@ -243,6 +253,7 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo, vring->align = SMP_CACHE_BYTES; vring->index = i; vring->vdev_id = tm_vdev->vdev.id.device; + vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN; dev = &tm_vdev->vdev.dev; size = vring_size(vring->num, vring->align); @@ -348,7 +359,7 @@ static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring, return len; } -static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring) +static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring) { struct vring_desc *desc_head; u32 len = 0; @@ -577,19 +588,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring, if (vring->cur_len + sizeof(u64) <= len) { /* The whole word. */ - if (is_rx) - memcpy(addr + vring->cur_len, &data, sizeof(u64)); - else - memcpy(&data, addr + vring->cur_len, sizeof(u64)); + if (!IS_VRING_DROP(vring)) { + if (is_rx) + memcpy(addr + vring->cur_len, &data, + sizeof(u64)); + else + memcpy(&data, addr + vring->cur_len, + sizeof(u64)); + } vring->cur_len += sizeof(u64); } else { /* Leftover bytes. */ - if (is_rx) - memcpy(addr + vring->cur_len, &data, - len - vring->cur_len); - else - memcpy(&data, addr + vring->cur_len, - len - vring->cur_len); + if (!IS_VRING_DROP(vring)) { + if (is_rx) + memcpy(addr + vring->cur_len, &data, + len - vring->cur_len); + else + memcpy(&data, addr + vring->cur_len, + len - vring->cur_len); + } vring->cur_len = len; } @@ -606,13 +623,14 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring, * flag is set. */ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, - struct vring_desc *desc, + struct vring_desc **desc, bool is_rx, bool *vring_change) { struct mlxbf_tmfifo *fifo = vring->fifo; struct virtio_net_config *config; struct mlxbf_tmfifo_msg_hdr hdr; int vdev_id, hdr_len; + bool drop_rx = false; /* Read/Write packet header. */ if (is_rx) { @@ -632,8 +650,8 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, if (ntohs(hdr.len) > __virtio16_to_cpu(virtio_legacy_is_little_endian(), config->mtu) + - MLXBF_TMFIFO_NET_L2_OVERHEAD) - return; + MLXBF_TMFIFO_NET_L2_OVERHEAD) + drop_rx = true; } else { vdev_id = VIRTIO_ID_CONSOLE; hdr_len = 0; @@ -648,16 +666,25 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, if (!tm_dev2) return; - vring->desc = desc; + vring->desc = *desc; vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX]; *vring_change = true; } + + if (drop_rx && !IS_VRING_DROP(vring)) { + if (vring->desc_head) + mlxbf_tmfifo_release_pkt(vring); + *desc = &vring->drop_desc; + vring->desc_head = *desc; + vring->desc = *desc; + } + vring->pkt_len = ntohs(hdr.len) + hdr_len; } else { /* Network virtio has an extra header. */ hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ? sizeof(struct virtio_net_hdr) : 0; - vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc); + vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc); hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ? VIRTIO_ID_NET : VIRTIO_ID_CONSOLE; hdr.len = htons(vring->pkt_len - hdr_len); @@ -690,15 +717,23 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, /* Get the descriptor of the next packet. */ if (!vring->desc) { desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx); - if (!desc) - return false; + if (!desc) { + /* Drop next Rx packet to avoid stuck. */ + if (is_rx) { + desc = &vring->drop_desc; + vring->desc_head = desc; + vring->desc = desc; + } else { + return false; + } + } } else { desc = vring->desc; } /* Beginning of a packet. Start to Rx/Tx packet header. */ if (vring->pkt_len == 0) { - mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change); + mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change); (*avail)--; /* Return if new packet is for another ring. */ @@ -724,17 +759,24 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, vring->rem_len -= len; /* Get the next desc on the chain. */ - if (vring->rem_len > 0 && + if (!IS_VRING_DROP(vring) && vring->rem_len > 0 && (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) { idx = virtio16_to_cpu(vdev, desc->next); desc = &vr->desc[idx]; goto mlxbf_tmfifo_desc_done; } - /* Done and release the pending packet. */ - mlxbf_tmfifo_release_pending_pkt(vring); + /* Done and release the packet. */ desc = NULL; fifo->vring[is_rx] = NULL; + if (!IS_VRING_DROP(vring)) { + mlxbf_tmfifo_release_pkt(vring); + } else { + vring->pkt_len = 0; + vring->desc_head = NULL; + vring->desc = NULL; + return false; + } /* * Make sure the load/store are in order before @@ -914,7 +956,7 @@ static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev) /* Release the pending packet. */ if (vring->desc) - mlxbf_tmfifo_release_pending_pkt(vring); + mlxbf_tmfifo_release_pkt(vring); vq = vring->vq; if (vq) { vring->vq = NULL; diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c index 522f862eca5268255c05637e6a49fea07a7d7e22..504a8f506195a38e2339aa728d4d381db9a6ba10 100644 --- a/drivers/pwm/pwm-lpc32xx.c +++ b/drivers/pwm/pwm-lpc32xx.c @@ -51,10 +51,10 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, if (duty_cycles > 255) duty_cycles = 255; - val = readl(lpc32xx->base + (pwm->hwpwm << 2)); + val = readl(lpc32xx->base); val &= ~0xFFFF; val |= (period_cycles << 8) | duty_cycles; - writel(val, lpc32xx->base + (pwm->hwpwm << 2)); + writel(val, lpc32xx->base); return 0; } @@ -69,9 +69,9 @@ static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) if (ret) return ret; - val = readl(lpc32xx->base + (pwm->hwpwm << 2)); + val = readl(lpc32xx->base); val |= PWM_ENABLE; - writel(val, lpc32xx->base + (pwm->hwpwm << 2)); + writel(val, lpc32xx->base); return 0; } @@ -81,9 +81,9 @@ static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip); u32 val; - val = readl(lpc32xx->base + (pwm->hwpwm << 2)); + val = readl(lpc32xx->base); val &= ~PWM_ENABLE; - writel(val, lpc32xx->base + (pwm->hwpwm << 2)); + writel(val, lpc32xx->base); clk_disable_unprepare(lpc32xx->clk); } @@ -121,9 +121,9 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev) lpc32xx->chip.base = -1; /* If PWM is disabled, configure the output to the default value */ - val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2)); + val = readl(lpc32xx->base); val &= ~PWM_PIN_LEVEL; - writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2)); + writel(val, lpc32xx->base); ret = pwmchip_add(&lpc32xx->chip); if (ret < 0) { diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 3b9eda311c273eddd3b3171af50da836052cc34e..b518009715eebae000d907c6bc5112bc30429e3f 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -399,6 +399,7 @@ static int zcdn_create(const char *name) ZCRYPT_NAME "_%d", (int) MINOR(devt)); nodename[sizeof(nodename)-1] = '\0'; if (dev_set_name(&zcdndev->device, nodename)) { + kfree(zcdndev); rc = -EINVAL; goto unlockout; } diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 12e27ee8c5c73b29ab5df1302cadefa5e5c336f4..f919da0bbf75e9394603b5239fa74a7f8e76499e 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -3028,8 +3028,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) vha->flags.difdix_supported = 1; ql_dbg(ql_dbg_user, vha, 0x7082, "Registered for DIF/DIX type 1 and 3 protection.\n"); - if (ql2xenabledif == 1) - prot = SHOST_DIX_TYPE0_PROTECTION; scsi_host_set_prot(vha->host, prot | SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 00b4d033b07a945ecaf980f190883585169d0793..8e9ffbec6643fa8f6f1fee3ba46127de25965774 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -18,7 +18,7 @@ * | Queue Command and IO tracing | 0x3074 | 0x300b | * | | | 0x3027-0x3028 | * | | | 0x303d-0x3041 | - * | | | 0x302d,0x3033 | + * | | | 0x302e,0x3033 | * | | | 0x3036,0x3038 | * | | | 0x303a | * | DPC Thread | 0x4023 | 0x4002,0x4013 | @@ -112,8 +112,13 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, uint32_t stat; ulong i, j, timer = 6000000; int rval = QLA_FUNCTION_FAILED; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + if (qla_pci_disconnected(vha, reg)) + return rval; + for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { if (i + dwords > ram_dwords) dwords = ram_dwords - i; @@ -137,6 +142,9 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, while (timer--) { udelay(5); + if (qla_pci_disconnected(vha, reg)) + return rval; + stat = rd_reg_dword(®->host_status); /* Check for pending interrupts. */ if (!(stat & HSRX_RISC_INT)) @@ -191,9 +199,13 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram, uint32_t dwords = qla2x00_gid_list_size(ha) / 4; uint32_t stat; ulong i, j, timer = 6000000; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + if (qla_pci_disconnected(vha, reg)) + return rval; + for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { if (i + dwords > ram_dwords) dwords = ram_dwords - i; @@ -215,8 +227,10 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram, ha->flags.mbox_int = 0; while (timer--) { udelay(5); - stat = rd_reg_dword(®->host_status); + if (qla_pci_disconnected(vha, reg)) + return rval; + stat = rd_reg_dword(®->host_status); /* Check for pending interrupts. */ if (!(stat & HSRX_RISC_INT)) continue; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 06b0ad2b51bb4d736487858f337a69f81c939931..6645b69fc2a0f36a64bc1107162e7bbaa2f0deb4 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -396,6 +396,7 @@ typedef union { } b; } port_id_t; #define INVALID_PORT_ID 0xFFFFFF +#define ISP_REG16_DISCONNECT 0xFFFF static inline le_id_t be_id_to_le(be_id_t id) { @@ -3848,6 +3849,13 @@ struct qla_hw_data_stat { u32 num_mpi_reset; }; +/* refer to pcie_do_recovery reference */ +typedef enum { + QLA_PCI_RESUME, + QLA_PCI_ERR_DETECTED, + QLA_PCI_MMIO_ENABLED, + QLA_PCI_SLOT_RESET, +} pci_error_state_t; /* * Qlogic host adapter specific data structure. */ @@ -4192,7 +4200,6 @@ struct qla_hw_data { uint8_t aen_mbx_count; atomic_t num_pend_mbx_stage1; atomic_t num_pend_mbx_stage2; - atomic_t num_pend_mbx_stage3; uint16_t frame_payload_size; uint32_t login_retry_count; @@ -4586,6 +4593,7 @@ struct qla_hw_data { #define DEFAULT_ZIO_THRESHOLD 5 struct qla_hw_data_stat stat; + pci_error_state_t pci_error_state; }; struct active_regions { @@ -4706,7 +4714,7 @@ typedef struct scsi_qla_host { #define FX00_CRITEMP_RECOVERY 25 #define FX00_HOST_INFO_RESEND 26 #define QPAIR_ONLINE_CHECK_NEEDED 27 -#define SET_NVME_ZIO_THRESHOLD_NEEDED 28 +#define DO_EEH_RECOVERY 28 #define DETECT_SFP_CHANGE 29 #define N2N_LOGIN_NEEDED 30 #define IOCB_WORK_ACTIVE 31 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 7e5ee31581d61c78e2a86fd459ea631761876122..8ef2de6822de9a5de32099464f60581d2fe62c48 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -222,6 +222,7 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern void qla2x00_disable_board_on_pci_error(struct work_struct *); +extern void qla_eeh_work(struct work_struct *); extern void qla2x00_sp_compl(srb_t *sp, int); extern void qla2xxx_qpair_sp_free_dma(srb_t *sp); extern void qla2xxx_qpair_sp_compl(srb_t *sp, int); @@ -233,6 +234,8 @@ int qla24xx_post_relogin_work(struct scsi_qla_host *vha); void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, struct purex_item *pkt); +void qla_pci_set_eeh_busy(struct scsi_qla_host *); +void qla_schedule_eeh_work(struct scsi_qla_host *); /* * Global Functions in qla_mid.c source file. diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 422ff67038d17e33e2e5cd38c93a10fcd3b17800..a8d2c06285c2450411d5f5d65aa244fdb3b8f4e4 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -483,6 +483,7 @@ static void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) { struct fc_port *fcport = ea->fcport; + unsigned long flags; ql_dbg(ql_dbg_disc, vha, 0x20d2, "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n", @@ -497,9 +498,15 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) ql_dbg(ql_dbg_disc, vha, 0x2066, "%s %8phC: adisc fail: post delete\n", __func__, ea->fcport->port_name); + + spin_lock_irqsave(&vha->work_lock, flags); /* deleted = 0 & logout_on_delete = force fw cleanup */ - fcport->deleted = 0; + if (fcport->deleted == QLA_SESS_DELETED) + fcport->deleted = 0; + fcport->logout_on_delete = 1; + spin_unlock_irqrestore(&vha->work_lock, flags); + qlt_schedule_sess_for_deletion(ea->fcport); return; } @@ -1405,7 +1412,6 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); ea->fcport->login_gen++; - ea->fcport->deleted = 0; ea->fcport->logout_on_delete = 1; if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { @@ -4336,15 +4342,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha) memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE); } + QLA_FW_STARTED(ha); rval = qla2x00_init_firmware(vha, ha->init_cb_size); next_check: if (rval) { + QLA_FW_STOPPED(ha); ql_log(ql_log_fatal, vha, 0x00d2, "Init Firmware **** FAILED ****.\n"); } else { ql_dbg(ql_dbg_init, vha, 0x00d3, "Init Firmware -- success.\n"); - QLA_FW_STARTED(ha); vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0; } @@ -5639,6 +5646,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) void qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { + unsigned long flags; + if (IS_SW_RESV_ADDR(fcport->d_id)) return; @@ -5648,7 +5657,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); fcport->login_retry = vha->hw->login_retry_count; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + + spin_lock_irqsave(&vha->work_lock, flags); fcport->deleted = 0; + spin_unlock_irqrestore(&vha->work_lock, flags); + if (vha->hw->current_topology == ISP_CFG_NL) fcport->logout_on_delete = 0; else @@ -6913,14 +6926,15 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) } /* purge MBox commands */ - if (atomic_read(&ha->num_pend_mbx_stage3)) { + spin_lock_irqsave(&ha->hardware_lock, flags); + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) { clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); complete(&ha->mbx_intr_comp); } + spin_unlock_irqrestore(&ha->hardware_lock, flags); i = 0; - while (atomic_read(&ha->num_pend_mbx_stage3) || - atomic_read(&ha->num_pend_mbx_stage2) || + while (atomic_read(&ha->num_pend_mbx_stage2) || atomic_read(&ha->num_pend_mbx_stage1)) { msleep(20); i++; @@ -6969,22 +6983,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) } spin_unlock_irqrestore(&ha->vport_slock, flags); - if (!ha->flags.eeh_busy) { - /* Make sure for ISP 82XX IO DMA is complete */ - if (IS_P3P_TYPE(ha)) { - qla82xx_chip_reset_cleanup(vha); - ql_log(ql_log_info, vha, 0x00b4, - "Done chip reset cleanup.\n"); - - /* Done waiting for pending commands. - * Reset the online flag. - */ - vha->flags.online = 0; - } + /* Make sure for ISP 82XX IO DMA is complete */ + if (IS_P3P_TYPE(ha)) { + qla82xx_chip_reset_cleanup(vha); + ql_log(ql_log_info, vha, 0x00b4, + "Done chip reset cleanup.\n"); - /* Requeue all commands in outstanding command list. */ - qla2x00_abort_all_cmds(vha, DID_RESET << 16); + /* Done waiting for pending commands. Reset online flag */ + vha->flags.online = 0; } + + /* Requeue all commands in outstanding command list. */ + qla2x00_abort_all_cmds(vha, DID_RESET << 16); /* memory barrier */ wmb(); } @@ -7012,6 +7022,12 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) if (vha->flags.online) { qla2x00_abort_isp_cleanup(vha); + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803f, + "ISP Abort - ISP reg disconnect, exiting.\n"); + return status; + } + if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) { ha->flags.chip_reset_done = 1; vha->flags.online = 1; @@ -7052,8 +7068,18 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) ha->isp_ops->get_flash_version(vha, req->ring); + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803f, + "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n"); + return status; + } ha->isp_ops->nvram_config(vha); + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803f, + "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n"); + return status; + } if (!qla2x00_restart_isp(vha)) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 7e8b59a0954bbc31a312322eca3e518d937d54e7..47ee5b9f2a55c88b3001e6ca6e0827fe07b5f9dc 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -435,3 +435,49 @@ qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores) } iores->res_type = RESOURCE_NONE; } + +#define ISP_REG_DISCONNECT 0xffffffffU +/************************************************************************** + * qla2x00_isp_reg_stat + * + * Description: + * Read the host status register of ISP before aborting the command. + * + * Input: + * ha = pointer to host adapter structure. + * + * + * Returns: + * Either true or false. + * + * Note: Return true if there is register disconnect. + **************************************************************************/ +static inline +uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) +{ + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; + + if (IS_P3P_TYPE(ha)) + return ((rd_reg_dword(®82->host_int)) == ISP_REG_DISCONNECT); + else + return ((rd_reg_dword(®->host_status)) == + ISP_REG_DISCONNECT); +} + +static inline +bool qla_pci_disconnected(struct scsi_qla_host *vha, + struct device_reg_24xx __iomem *reg) +{ + uint32_t stat; + bool ret = false; + + stat = rd_reg_dword(®->host_status); + if (stat == 0xffffffff) { + ql_log(ql_log_info, vha, 0x8041, + "detected PCI disconnect.\n"); + qla_schedule_eeh_work(vha); + ret = true; + } + return ret; +} diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 54fc0afbc02ac199a35308cf9a9b529ab3af2e9d..1752a620317108ea41f8fe8ae0ab5af0df610a48 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1644,8 +1644,14 @@ qla24xx_start_scsi(srb_t *sp) goto queuing_error; if (req->cnt < (req_cnt + 2)) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -1836,8 +1842,13 @@ qla24xx_dif_start_scsi(srb_t *sp) goto queuing_error; if (req->cnt < (req_cnt + 2)) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -1911,6 +1922,7 @@ qla24xx_dif_start_scsi(srb_t *sp) qla_put_iocbs(sp->qpair, &sp->iores); spin_unlock_irqrestore(&ha->hardware_lock, flags); + return QLA_FUNCTION_FAILED; } @@ -1978,8 +1990,14 @@ qla2xxx_start_scsi_mq(srb_t *sp) goto queuing_error; if (req->cnt < (req_cnt + 2)) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -2185,8 +2203,14 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) goto queuing_error; if (req->cnt < (req_cnt + 2)) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -2263,6 +2287,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) qla_put_iocbs(sp->qpair, &sp->iores); spin_unlock_irqrestore(&qpair->qp_lock, flags); + return QLA_FUNCTION_FAILED; } @@ -2307,6 +2332,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) cnt = qla2x00_debounce_register( ISP_REQ_Q_OUT(ha, ®->isp)); + if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) { + qla_schedule_eeh_work(vha); + return NULL; + } + if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -3711,6 +3741,9 @@ qla2x00_start_sp(srb_t *sp) void *pkt; unsigned long flags; + if (vha->hw->flags.eeh_busy) + return -EIO; + spin_lock_irqsave(qp->qp_lock_ptr, flags); pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); if (!pkt) { @@ -3928,8 +3961,14 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) /* Check for room on request queue. */ if (req->cnt < req_cnt + 2) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -3968,5 +4007,6 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) qla2x00_start_iocbs(vha, req); queuing_error: spin_unlock_irqrestore(&ha->hardware_lock, flags); + return rval; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 7ea73ad845de6c3d32ea57d1dcee5587ae9112fe..fd0beb194e351857a072734ba46d550899412682 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -269,12 +269,7 @@ qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { - /* - * Schedule this (only once) on the default system - * workqueue so that all the adapter workqueues and the - * DPC thread can be shutdown cleanly. - */ - schedule_work(&vha->hw->board_disable); + qla_schedule_eeh_work(vha); } return true; } else @@ -982,8 +977,12 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) unsigned long flags; fc_port_t *fcport = NULL; - if (!vha->hw->flags.fw_started) + if (!vha->hw->flags.fw_started) { + ql_log(ql_log_warn, vha, 0x50ff, + "Dropping AEN - %04x %04x %04x %04x.\n", + mb[0], mb[1], mb[2], mb[3]); return; + } /* Setup to process RIO completion. */ handle_cnt = 0; @@ -1639,8 +1638,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) case MBA_TEMPERATURE_ALERT: ql_dbg(ql_dbg_async, vha, 0x505e, "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); - if (mb[1] == 0x12) - schedule_work(&ha->board_disable); break; case MBA_TRANS_INSERT: @@ -3139,7 +3136,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) case CS_PORT_BUSY: case CS_INCOMPLETE: case CS_PORT_UNAVAILABLE: - case CS_TIMEOUT: case CS_RESET: /* diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 6ff720d8961d0edfd6e86040f34f69ac8d7bc378..21ba7100ff67601d916abcdf050487e4f11721f3 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -167,7 +167,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) /* check if ISP abort is active and return cmd with timeout */ if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || - test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && + test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) || + ha->flags.eeh_busy) && !is_rom_cmd(mcp->mb[0])) { ql_log(ql_log_info, vha, 0x1005, "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", @@ -268,7 +269,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_time = jiffies; - atomic_inc(&ha->num_pend_mbx_stage3); if (!wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ)) { ql_dbg(ql_dbg_mbx, vha, 0x117a, @@ -283,7 +283,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) spin_unlock_irqrestore(&ha->hardware_lock, flags); atomic_dec(&ha->num_pend_mbx_stage2); - atomic_dec(&ha->num_pend_mbx_stage3); rval = QLA_ABORTED; goto premature_exit; } @@ -293,11 +292,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, flags); atomic_dec(&ha->num_pend_mbx_stage2); - atomic_dec(&ha->num_pend_mbx_stage3); rval = QLA_ABORTED; goto premature_exit; } - atomic_dec(&ha->num_pend_mbx_stage3); if (time_after(jiffies, wait_time + 5 * HZ)) ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 8b0c8f9bdef08e8f52fad57e9da87aa703f25380..6dad7787f20de242b66e39d92109f17cf2da442d 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -397,8 +397,13 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) } req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (req->cnt < (req_cnt + 2)) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; @@ -535,6 +540,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) queuing_error: spin_unlock_irqrestore(&qpair->qp_lock, flags); + return rval; } @@ -604,7 +610,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, rval = qla2x00_start_nvme_mq(sp); if (rval != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x212d, + ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d, "qla2x00_start_nvme_mq failed = %d\n", rval); sp->priv = NULL; priv->sp = NULL; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index cbc5af26303a34211a8c46e1bfc58461fd1df7c2..8d199deaf3b124a02a043b43f7fb915a0ce715dc 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -879,8 +879,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto qc24_fail_command; } - if (!fcport) { - cmd->result = DID_NO_CONNECT << 16; + if (!fcport || fcport->deleted) { + cmd->result = DID_IMM_RETRY << 16; goto qc24_fail_command; } @@ -961,11 +961,18 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, goto qc24_fail_command; } - if (!fcport) { + if (!qpair->online) { + ql_dbg(ql_dbg_io, vha, 0x3077, + "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy); cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; } + if (!fcport || fcport->deleted) { + cmd->result = DID_IMM_RETRY << 16; + goto qc24_fail_command; + } + if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || atomic_read(&base_vha->loop_state) == LOOP_DEAD) { @@ -1190,35 +1197,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) return return_status; } -#define ISP_REG_DISCONNECT 0xffffffffU -/************************************************************************** -* qla2x00_isp_reg_stat -* -* Description: -* Read the host status register of ISP before aborting the command. -* -* Input: -* ha = pointer to host adapter structure. -* -* -* Returns: -* Either true or false. -* -* Note: Return true if there is register disconnect. -**************************************************************************/ -static inline -uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) -{ - struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; - - if (IS_P3P_TYPE(ha)) - return ((rd_reg_dword(®82->host_int)) == ISP_REG_DISCONNECT); - else - return ((rd_reg_dword(®->host_status)) == - ISP_REG_DISCONNECT); -} - /************************************************************************** * qla2xxx_eh_abort * @@ -1253,6 +1231,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x8042, "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); return FAILED; } @@ -1444,6 +1423,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x803e, "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); return FAILED; } @@ -1460,6 +1440,7 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x803f, "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); return FAILED; } @@ -1495,6 +1476,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x8040, "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); return FAILED; } @@ -1572,7 +1554,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x8041, "PCI/Register disconnect, exiting.\n"); - schedule_work(&ha->board_disable); + qla_pci_set_eeh_busy(vha); return SUCCESS; } @@ -2866,7 +2848,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->max_exchg = FW_MAX_EXCHANGES_CNT; atomic_set(&ha->num_pend_mbx_stage1, 0); atomic_set(&ha->num_pend_mbx_stage2, 0); - atomic_set(&ha->num_pend_mbx_stage3, 0); atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD); ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD; @@ -3141,6 +3122,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->max_id = ha->max_fibre_devices; host->cmd_per_lun = 3; host->unique_id = host->host_no; + + if (ql2xenabledif && ql2xenabledif != 2) { + ql_log(ql_log_warn, base_vha, 0x302d, + "Invalid value for ql2xenabledif, resetting it to default (2)\n"); + ql2xenabledif = 2; + } + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) host->max_cmd_len = 32; else @@ -3373,8 +3361,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) base_vha->flags.difdix_supported = 1; ql_dbg(ql_dbg_init, base_vha, 0x00f1, "Registering for DIF/DIX type 1 and 3 protection.\n"); - if (ql2xenabledif == 1) - prot = SHOST_DIX_TYPE0_PROTECTION; if (ql2xprotmask) scsi_host_set_prot(host, ql2xprotmask); else @@ -6672,6 +6658,9 @@ qla2x00_do_dpc(void *data) schedule(); + if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags)) + qla_pci_set_eeh_busy(base_vha); + if (!base_vha->flags.init_done || ha->flags.mbox_busy) goto end_loop; @@ -6968,28 +6957,23 @@ qla2x00_do_dpc(void *data) mutex_unlock(&ha->mq_lock); } - if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, - &base_vha->dpc_flags)) { + if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, + &base_vha->dpc_flags)) { + u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold; + + if (threshold > ha->orig_fw_xcb_count) + threshold = ha->orig_fw_xcb_count; + ql_log(ql_log_info, base_vha, 0xffffff, - "nvme: SET ZIO Activity exchange threshold to %d.\n", - ha->nvme_last_rptd_aen); - if (qla27xx_set_zio_threshold(base_vha, - ha->nvme_last_rptd_aen)) { + "SET ZIO Activity exchange threshold to %d.\n", + threshold); + if (qla27xx_set_zio_threshold(base_vha, threshold)) { ql_log(ql_log_info, base_vha, 0xffffff, - "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n", - ha->nvme_last_rptd_aen); + "Unable to SET ZIO Activity exchange threshold to %d.\n", + threshold); } } - if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, - &base_vha->dpc_flags)) { - ql_log(ql_log_info, base_vha, 0xffffff, - "SET ZIO Activity exchange threshold to %d.\n", - ha->last_zio_threshold); - qla27xx_set_zio_threshold(base_vha, - ha->last_zio_threshold); - } - if (!IS_QLAFX00(ha)) qla2x00_do_dpc_all_vps(base_vha); @@ -7205,14 +7189,13 @@ qla2x00_timer(struct timer_list *t) index = atomic_read(&ha->nvme_active_aen_cnt); if (!vha->vp_idx && (index != ha->nvme_last_rptd_aen) && - (index >= DEFAULT_ZIO_THRESHOLD) && ha->zio_mode == QLA_ZIO_MODE_6 && !ha->flags.host_shutting_down) { + ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); ql_log(ql_log_info, vha, 0x3002, "nvme: Sched: Set ZIO exchange threshold to %d.\n", ha->nvme_last_rptd_aen); - ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); - set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); + set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); start_dpc++; } @@ -7385,6 +7368,8 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha) int i; unsigned long flags; + ql_dbg(ql_dbg_aer, vha, 0x9000, + "%s\n", __func__); ha->chip_reset++; ha->base_qpair->chip_reset = ha->chip_reset; @@ -7394,28 +7379,16 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha) ha->base_qpair->chip_reset; } - /* purge MBox commands */ - if (atomic_read(&ha->num_pend_mbx_stage3)) { - clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } - - i = 0; - - while (atomic_read(&ha->num_pend_mbx_stage3) || - atomic_read(&ha->num_pend_mbx_stage2) || - atomic_read(&ha->num_pend_mbx_stage1)) { - msleep(20); - i++; - if (i > 50) - break; - } - - ha->flags.purge_mbox = 0; + /* + * purge mailbox might take a while. Slot Reset/chip reset + * will take care of the purge + */ mutex_lock(&ha->mq_lock); + ha->base_qpair->online = 0; list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) qpair->online = 0; + wmb(); mutex_unlock(&ha->mq_lock); qla2x00_mark_all_devices_lost(vha); @@ -7452,14 +7425,17 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { scsi_qla_host_t *vha = pci_get_drvdata(pdev); struct qla_hw_data *ha = vha->hw; + pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET; - ql_dbg(ql_dbg_aer, vha, 0x9000, - "PCI error detected, state %x.\n", state); + ql_log(ql_log_warn, vha, 0x9000, + "PCI error detected, state %x.\n", state); + ha->pci_error_state = QLA_PCI_ERR_DETECTED; if (!atomic_read(&pdev->enable_cnt)) { ql_log(ql_log_info, vha, 0xffff, "PCI device is disabled,state %x\n", state); - return PCI_ERS_RESULT_NEED_RESET; + ret = PCI_ERS_RESULT_NEED_RESET; + goto out; } switch (state) { @@ -7469,11 +7445,12 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } - return PCI_ERS_RESULT_CAN_RECOVER; + ret = PCI_ERS_RESULT_CAN_RECOVER; + break; case pci_channel_io_frozen: - ha->flags.eeh_busy = 1; - qla_pci_error_cleanup(vha); - return PCI_ERS_RESULT_NEED_RESET; + qla_pci_set_eeh_busy(vha); + ret = PCI_ERS_RESULT_NEED_RESET; + break; case pci_channel_io_perm_failure: ha->flags.pci_channel_io_perm_failure = 1; qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); @@ -7481,9 +7458,12 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } - return PCI_ERS_RESULT_DISCONNECT; + ret = PCI_ERS_RESULT_DISCONNECT; } - return PCI_ERS_RESULT_NEED_RESET; +out: + ql_dbg(ql_dbg_aer, vha, 0x600d, + "PCI error detected returning [%x].\n", ret); + return ret; } static pci_ers_result_t @@ -7497,6 +7477,10 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; + ql_log(ql_log_warn, base_vha, 0x9000, + "mmio enabled\n"); + + ha->pci_error_state = QLA_PCI_MMIO_ENABLED; if (IS_QLA82XX(ha)) return PCI_ERS_RESULT_RECOVERED; @@ -7520,10 +7504,11 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) ql_log(ql_log_info, base_vha, 0x9003, "RISC paused -- mmio_enabled, Dumping firmware.\n"); qla2xxx_dump_fw(base_vha); - - return PCI_ERS_RESULT_NEED_RESET; - } else - return PCI_ERS_RESULT_RECOVERED; + } + /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */ + ql_dbg(ql_dbg_aer, base_vha, 0x600d, + "mmio enabled returning.\n"); + return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t @@ -7535,9 +7520,10 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) int rc; struct qla_qpair *qpair = NULL; - ql_dbg(ql_dbg_aer, base_vha, 0x9004, - "Slot Reset.\n"); + ql_log(ql_log_warn, base_vha, 0x9004, + "Slot Reset.\n"); + ha->pci_error_state = QLA_PCI_SLOT_RESET; /* Workaround: qla2xxx driver which access hardware earlier * needs error state to be pci_channel_io_online. * Otherwise mailbox command timesout. @@ -7571,16 +7557,24 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) qpair->online = 1; mutex_unlock(&ha->mq_lock); + ha->flags.eeh_busy = 0; base_vha->flags.online = 1; set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); - if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS) - ret = PCI_ERS_RESULT_RECOVERED; + ha->isp_ops->abort_isp(base_vha); clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + if (qla2x00_isp_reg_stat(ha)) { + ha->flags.eeh_busy = 1; + qla_pci_error_cleanup(base_vha); + ql_log(ql_log_warn, base_vha, 0x9005, + "Device unable to recover from PCI error.\n"); + } else { + ret = PCI_ERS_RESULT_RECOVERED; + } exit_slot_reset: ql_dbg(ql_dbg_aer, base_vha, 0x900e, - "slot_reset return %x.\n", ret); + "Slot Reset returning %x.\n", ret); return ret; } @@ -7592,16 +7586,55 @@ qla2xxx_pci_resume(struct pci_dev *pdev) struct qla_hw_data *ha = base_vha->hw; int ret; - ql_dbg(ql_dbg_aer, base_vha, 0x900f, - "pci_resume.\n"); + ql_log(ql_log_warn, base_vha, 0x900f, + "Pci Resume.\n"); - ha->flags.eeh_busy = 0; ret = qla2x00_wait_for_hba_online(base_vha); if (ret != QLA_SUCCESS) { ql_log(ql_log_fatal, base_vha, 0x9002, "The device failed to resume I/O from slot/link_reset.\n"); } + ha->pci_error_state = QLA_PCI_RESUME; + ql_dbg(ql_dbg_aer, base_vha, 0x600d, + "Pci Resume returning.\n"); +} + +void qla_pci_set_eeh_busy(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + bool do_cleanup = false; + unsigned long flags; + + if (ha->flags.eeh_busy) + return; + + spin_lock_irqsave(&base_vha->work_lock, flags); + if (!ha->flags.eeh_busy) { + ha->flags.eeh_busy = 1; + do_cleanup = true; + } + spin_unlock_irqrestore(&base_vha->work_lock, flags); + + if (do_cleanup) + qla_pci_error_cleanup(base_vha); +} + +/* + * this routine will schedule a task to pause IO from interrupt context + * if caller sees a PCIE error event (register read = 0xf's) + */ +void qla_schedule_eeh_work(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + if (ha->flags.eeh_busy) + return; + + set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags); + qla2xxx_wake_dpc(base_vha); } static void diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index ecb30c2738b8bc606598d038a133cd6b32cdc180..fdb424501da5b82e62d4c8260a74d2115327e622 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1044,10 +1044,6 @@ void qlt_free_session_done(struct work_struct *work) (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); } - spin_lock_irqsave(&vha->work_lock, flags); - sess->flags &= ~FCF_ASYNC_SENT; - spin_unlock_irqrestore(&vha->work_lock, flags); - spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (sess->se_sess) { sess->se_sess = NULL; @@ -1057,7 +1053,6 @@ void qlt_free_session_done(struct work_struct *work) qla2x00_set_fcport_disc_state(sess, DSC_DELETED); sess->fw_login_state = DSC_LS_PORT_UNAVAIL; - sess->deleted = QLA_SESS_DELETED; if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { vha->fcport_count--; @@ -1109,10 +1104,15 @@ void qlt_free_session_done(struct work_struct *work) sess->explicit_logout = 0; spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - sess->free_pending = 0; qla2x00_dfs_remove_rport(vha, sess); + spin_lock_irqsave(&vha->work_lock, flags); + sess->flags &= ~FCF_ASYNC_SENT; + sess->deleted = QLA_SESS_DELETED; + sess->free_pending = 0; + spin_unlock_irqrestore(&vha->work_lock, flags); + ql_dbg(ql_dbg_disc, vha, 0xf001, "Unregistration of sess %p %8phC finished fcp_cnt %d\n", sess, sess->port_name, vha->fcport_count); @@ -1161,12 +1161,12 @@ void qlt_unreg_sess(struct fc_port *sess) * management from being sent. */ sess->flags |= FCF_ASYNC_SENT; + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; spin_unlock_irqrestore(&sess->vha->work_lock, flags); if (sess->se_sess) vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); - sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); sess->last_rscn_gen = sess->rscn_gen; sess->last_login_gen = sess->login_gen; diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c index 3aaab71d1b2c1e52dbe66eb05b15c72332f91911..dbc8b4c93190383b98a4465899b906c27508c538 100644 --- a/drivers/soc/qcom/qmi_encdec.c +++ b/drivers/soc/qcom/qmi_encdec.c @@ -534,8 +534,8 @@ static int qmi_decode_string_elem(struct qmi_elem_info *ei_array, decoded_bytes += rc; } - if (string_len > temp_ei->elem_len) { - pr_err("%s: String len %d > Max Len %d\n", + if (string_len >= temp_ei->elem_len) { + pr_err("%s: String len %d >= Max Len %d\n", __func__, string_len, temp_ei->elem_len); return -ETOOSMALL; } else if (string_len > tlv_len) { diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c index f4e7f4d78b565bb0c66f7118f5ab4cd813d77749..7994b46592b99d063ddc98505d68506493e31a1c 100644 --- a/drivers/usb/typec/bus.c +++ b/drivers/usb/typec/bus.c @@ -152,12 +152,20 @@ EXPORT_SYMBOL_GPL(typec_altmode_exit); * * Notifies the partner of @adev about Attention command. */ -void typec_altmode_attention(struct typec_altmode *adev, u32 vdo) +int typec_altmode_attention(struct typec_altmode *adev, u32 vdo) { - struct typec_altmode *pdev = &to_altmode(adev)->partner->adev; + struct altmode *partner = to_altmode(adev)->partner; + struct typec_altmode *pdev; + + if (!partner) + return -ENODEV; + + pdev = &partner->adev; if (pdev->ops && pdev->ops->attention) pdev->ops->attention(pdev, vdo); + + return 0; } EXPORT_SYMBOL_GPL(typec_altmode_attention); diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index 069affa5cb1eeada22d27f2711aab1e349b8c775..e34e46df802435772e369c363dc21d9851f7656a 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -475,6 +475,10 @@ static int tcpci_init(struct tcpc_dev *tcpc) if (time_after(jiffies, timeout)) return -ETIMEDOUT; + ret = tcpci_write16(tcpci, TCPC_FAULT_STATUS, TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT); + if (ret < 0) + return ret; + /* Handle vendor init */ if (tcpci->data->init) { ret = tcpci->data->init(tcpci, tcpci->data); diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h index 5ef07a56d67aa3dfd3df9e98c15d984c1b236c46..95ce89139c6ef862587d2205ef73383c78d8a0da 100644 --- a/drivers/usb/typec/tcpm/tcpci.h +++ b/drivers/usb/typec/tcpm/tcpci.h @@ -84,6 +84,7 @@ #define TCPC_POWER_STATUS_VBUS_PRES BIT(2) #define TCPC_FAULT_STATUS 0x1f +#define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7) #define TCPC_ALERT_EXTENDED 0x21 diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index cf0e6a80815ae6ec9476c4feec43966e9876508a..ac3953a0fa2913e46be1a3327d320d4e3d5055dc 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -1395,7 +1395,8 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port, } break; case ADEV_ATTENTION: - typec_altmode_attention(adev, p[1]); + if (typec_altmode_attention(adev, p[1])) + tcpm_log(port, "typec_altmode_attention no port partner altmode"); break; } } diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c index 5c5c99f7979e35c7165cd0643ec4565120db887b..30ec5b6845335ec9c4056dd82453b112af086e33 100644 --- a/drivers/video/backlight/gpio_backlight.c +++ b/drivers/video/backlight/gpio_backlight.c @@ -87,8 +87,7 @@ static int gpio_backlight_probe(struct platform_device *pdev) /* Not booted with device tree or no phandle link to the node */ bl->props.power = def_value ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; - else if (gpiod_get_direction(gbl->gpiod) == 0 && - gpiod_get_value_cansleep(gbl->gpiod) == 0) + else if (gpiod_get_value_cansleep(gbl->gpiod) == 0) bl->props.power = FB_BLANK_POWERDOWN; else bl->props.power = FB_BLANK_UNBLANK; diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c index ba33b4dce0dfab25a780b618528b97beb495a725..04aac2ad53822cd2d2008e2676b83398db3e41bd 100644 --- a/drivers/video/fbdev/ep93xx-fb.c +++ b/drivers/video/fbdev/ep93xx-fb.c @@ -474,7 +474,6 @@ static int ep93xxfb_probe(struct platform_device *pdev) if (!info) return -ENOMEM; - info->dev = &pdev->dev; platform_set_drvdata(pdev, info); fbi = info->par; fbi->mach_info = mach_info; diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c index 9b2173f765c8cf94ccb2d29261332149f57b8375..fb7fae750181bf3738299408241ee0db2f5f8e27 100644 --- a/drivers/watchdog/intel-mid_wdt.c +++ b/drivers/watchdog/intel-mid_wdt.c @@ -203,3 +203,4 @@ module_platform_driver(mid_wdt_driver); MODULE_AUTHOR("David Cohen "); MODULE_DESCRIPTION("Watchdog Driver for Intel MID platform"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:intel_mid_wdt"); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index eb294d55a96a307a20eda973dab79b3d1bd26931..bf0f71413826956e442113ce5a2504a1cd6f0caf 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2505,11 +2505,10 @@ static int validate_super(struct btrfs_fs_info *fs_info, ret = -EINVAL; } - if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid, - BTRFS_FSID_SIZE)) { + if (memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) { btrfs_err(fs_info, "superblock fsid doesn't match fsid of fs_devices: %pU != %pU", - fs_info->super_copy->fsid, fs_info->fs_devices->fsid); + sb->fsid, fs_info->fs_devices->fsid); ret = -EINVAL; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index abd67f984fbcf0ae91600a0e628cc302b4272fc1..d23047b23005cb4ea53feeb9e5444c8b15f70050 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -301,10 +301,11 @@ static noinline int join_transaction(struct btrfs_fs_info *fs_info, spin_unlock(&fs_info->trans_lock); /* - * If we are ATTACH, we just want to catch the current transaction, - * and commit it. If there is no transaction, just return ENOENT. + * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the + * current transaction, and commit it. If there is no transaction, just + * return ENOENT. */ - if (type == TRANS_ATTACH) + if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART) return -ENOENT; /* diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index 28735e8c5e2069bf497412b78350caa8076eb110..5f2e2fa2ba09084b7ac6d20ef72d4c7f8a5fbf6e 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -466,7 +466,8 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, } } else { list_for_each_entry(iter, &recv_list, list) { - if (!iter->info.wait) { + if (!iter->info.wait && + iter->info.fsid == info.fsid) { op = iter; break; } @@ -478,8 +479,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, if (info.wait) WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK); else - WARN_ON(op->info.fsid != info.fsid || - op->info.number != info.number || + WARN_ON(op->info.number != info.number || op->info.owner != info.owner || op->info.optype != info.optype); diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 4efe71efe1277c5fdc01ce4e1f6624424c50237b..bdbf130416c738df12971575f4723d2d5d1802e2 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -903,11 +903,11 @@ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group) } /* - * This function returns the number of file system metadata clusters at + * This function returns the number of file system metadata blocks at * the beginning of a block group, including the reserved gdt blocks. */ -static unsigned ext4_num_base_meta_clusters(struct super_block *sb, - ext4_group_t block_group) +unsigned int ext4_num_base_meta_blocks(struct super_block *sb, + ext4_group_t block_group) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned num; @@ -925,8 +925,15 @@ static unsigned ext4_num_base_meta_clusters(struct super_block *sb, } else { /* For META_BG_BLOCK_GROUPS */ num += ext4_bg_num_gdb(sb, block_group); } - return EXT4_NUM_B2C(sbi, num); + return num; } + +static unsigned int ext4_num_base_meta_clusters(struct super_block *sb, + ext4_group_t block_group) +{ + return EXT4_NUM_B2C(EXT4_SB(sb), ext4_num_base_meta_blocks(sb, block_group)); +} + /** * ext4_inode_to_goal_block - return a hint for block allocation * @inode: inode for block allocation diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index 663649ff86b7be9216b2ae0122216cd5872a8bee..82c42eee46101f23c2466a684a42181ee309356e 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -215,7 +215,6 @@ int ext4_setup_system_zone(struct super_block *sb) struct ext4_system_blocks *system_blks; struct ext4_group_desc *gdp; ext4_group_t i; - int flex_size = ext4_flex_bg_size(sbi); int ret; system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL); @@ -223,12 +222,13 @@ int ext4_setup_system_zone(struct super_block *sb) return -ENOMEM; for (i=0; i < ngroups; i++) { + unsigned int meta_blks = ext4_num_base_meta_blocks(sb, i); + cond_resched(); - if (ext4_bg_has_super(sb, i) && - ((i < 5) || ((i % flex_size) == 0))) { + if (meta_blks != 0) { ret = add_system_zone(system_blks, ext4_group_first_block_no(sb, i), - ext4_bg_num_gdb(sb, i) + 1, 0); + meta_blks, 0); if (ret) goto err; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 4c72b2a5238f0ad5eeeab4abe144ee02432d5588..2326449795e6f0e755f32858a4dd750c28ababa0 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2988,6 +2988,8 @@ extern const char *ext4_decode_error(struct super_block *sb, int errno, extern void ext4_mark_group_bitmap_corrupted(struct super_block *sb, ext4_group_t block_group, unsigned int flags); +extern unsigned int ext4_num_base_meta_blocks(struct super_block *sb, + ext4_group_t block_group); extern __printf(7, 8) void __ext4_error(struct super_block *, const char *, unsigned int, bool, diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c index d5294e663df5010db6d2829385f281fc6ff376a1..14e99ffa57af9a26f3850ec27df0f08c00c44719 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -243,8 +243,16 @@ static int fuse_direntplus_link(struct file *file, dput(dentry); dentry = alias; } - if (IS_ERR(dentry)) + if (IS_ERR(dentry)) { + if (!IS_ERR(inode)) { + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + fi->nlookup--; + spin_unlock(&fi->lock); + } return PTR_ERR(dentry); + } } if (fc->readdirplus_auto) set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index fbc7304bed56b59d52b8f40e304ac9d4e1b872e2..018af6ec97b4002eb2d5a1bd8da5e63b07005bea 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -509,13 +509,31 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, return result; } +static void nfs_direct_add_page_head(struct list_head *list, + struct nfs_page *req) +{ + struct nfs_page *head = req->wb_head; + + if (!list_empty(&head->wb_list) || !nfs_lock_request(head)) + return; + if (!list_empty(&head->wb_list)) { + nfs_unlock_request(head); + return; + } + list_add(&head->wb_list, list); + kref_get(&head->wb_kref); + kref_get(&head->wb_kref); +} + static void nfs_direct_join_group(struct list_head *list, struct inode *inode) { struct nfs_page *req, *subreq; list_for_each_entry(req, list, wb_list) { - if (req->wb_head != req) + if (req->wb_head != req) { + nfs_direct_add_page_head(&req->wb_list, req); continue; + } subreq = req->wb_this_page; if (subreq == req) continue; diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c index 537b80d693f1ef8f9d71de664f9a87c5f0ca176a..d4829f3f22935f71718477d91901402364e7659d 100644 --- a/fs/nfs/pnfs_dev.c +++ b/fs/nfs/pnfs_dev.c @@ -152,7 +152,7 @@ nfs4_get_device_info(struct nfs_server *server, set_bit(NFS_DEVICEID_NOCACHE, &d->flags); out_free_pages: - for (i = 0; i < max_pages; i++) + while (--i >= 0) __free_page(pages[i]); kfree(pages); out_free_pdev: diff --git a/fs/proc/base.c b/fs/proc/base.c index a8a64b7aca62f227f75732fe1e32578ee191d851..d8e5fb88acdf9777815d476cc8371f58e6b97b72 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3725,7 +3725,8 @@ static int proc_tid_comm_permission(struct inode *inode, int mask) } static const struct inode_operations proc_tid_comm_inode_operations = { - .permission = proc_tid_comm_permission, + .setattr = proc_setattr, + .permission = proc_tid_comm_permission, }; /* diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index b6183f1f4ebcf06ead573ddd44d39a5ebd931ffd..a0fa3820ef2ab54fd89c7847072ab6ac7bbf76e4 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -504,7 +504,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, sig ^= PERSISTENT_RAM_SIG; if (prz->buffer->sig == sig) { - if (buffer_size(prz) == 0) { + if (buffer_size(prz) == 0 && buffer_start(prz) == 0) { pr_debug("found existing empty buffer\n"); return 0; } diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 3e11190b7118ec06698540cc6ebb02998ba17e39..499a27372a40b41d41a3e6a4458af35fcc770052 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -695,7 +695,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, struct kernel_lb_addr eloc, tmpeloc; int c = 1; loff_t lbcount = 0, b_off = 0; - udf_pblk_t newblocknum, newblock; + udf_pblk_t newblocknum, newblock = 0; sector_t offset = 0; int8_t etype; struct udf_inode_info *iinfo = UDF_I(inode); @@ -798,7 +798,6 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len); if (ret < 0) { *err = ret; - newblock = 0; goto out_free; } c = 0; @@ -861,7 +860,6 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, goal, err); if (!newblocknum) { *err = -ENOSPC; - newblock = 0; goto out_free; } if (isBeyondEOF) diff --git a/fs/verity/signature.c b/fs/verity/signature.c index d066b1b9f027d29c3b9e1d5491c5bb3e11244b35..006378d85ff04b95c7387578c5b9044819516bfa 100644 --- a/fs/verity/signature.c +++ b/fs/verity/signature.c @@ -99,6 +99,22 @@ int fsverity_verify_signature(struct fsverity_info *vi, return -EBADMSG; } + if (fsverity_keyring->keys.nr_leaves_on_tree == 0) { + /* + * The ".fs-verity" keyring is empty, due to builtin signatures + * being supported by the kernel but not actually being used. + * In this case, verify_pkcs7_signature() would always return an + * error, usually ENOKEY. It could also be EBADMSG if the + * PKCS#7 is malformed, but that isn't very important to + * distinguish. So, just skip to ENOKEY to avoid the attack + * surface of the PKCS#7 parser, which would otherwise be + * reachable by any task able to execute FS_IOC_ENABLE_VERITY. + */ + fsverity_err(inode, + "fs-verity keyring is empty, rejecting signed file!"); + return -ENOKEY; + } + d = kzalloc(sizeof(*d) + hash_alg->digest_size, GFP_KERNEL); if (!d) return -ENOMEM; diff --git a/include/acpi/apei.h b/include/acpi/apei.h index 680f80960c3dcf36f6dac83c4061cce7f5103f1b..a6ac2e8b72da86d52100af29ce8df316edf8636b 100644 --- a/include/acpi/apei.h +++ b/include/acpi/apei.h @@ -27,14 +27,16 @@ extern int hest_disable; extern int erst_disable; #ifdef CONFIG_ACPI_APEI_GHES extern bool ghes_disable; +void __init ghes_init(void); #else #define ghes_disable 1 +static inline void ghes_init(void) { } #endif #ifdef CONFIG_ACPI_APEI void __init acpi_hest_init(void); #else -static inline void acpi_hest_init(void) { return; } +static inline void acpi_hest_init(void) { } #endif typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data); diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index 0a241c5c911d8caa7436e3139d22432ae00cfa34..255701e1251b4ac242456693f998e3940a36851c 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -46,9 +46,13 @@ int sdei_unregister_ghes(struct ghes *ghes); /* For use by arch code when CPU hotplug notifiers are not appropriate. */ int sdei_mask_local_cpu(void); int sdei_unmask_local_cpu(void); +void __init sdei_init(void); +void sdei_handler_abort(void); #else static inline int sdei_mask_local_cpu(void) { return 0; } static inline int sdei_unmask_local_cpu(void) { return 0; } +static inline void sdei_init(void) { } +static inline void sdei_handler_abort(void) { } #endif /* CONFIG_ARM_SDE_INTERFACE */ diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index e147ea6794670888fbf949e9cee5dc38e16d8c3e..91db78e67edccadca6a92c179e24aeb985c64362 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -52,6 +52,10 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) case ARPHRD_NONE: case ARPHRD_RAWIP: case ARPHRD_PIMREG: + /* PPP adds its l2 header automatically in ppp_start_xmit(). + * This makes it look like an l3 device to __bpf_redirect() and tcf_mirred_init(). + */ + case ARPHRD_PPP: return false; default: return true; diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h index 5e0a7b7647c3bcebafe632df3dc2360744cf8eaa..22b8ee8f03114156649a037b9b1d0b4fc8dc45d7 100644 --- a/include/linux/usb/typec_altmode.h +++ b/include/linux/usb/typec_altmode.h @@ -67,7 +67,7 @@ struct typec_altmode_ops { int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo); int typec_altmode_exit(struct typec_altmode *altmode); -void typec_altmode_attention(struct typec_altmode *altmode, u32 vdo); +int typec_altmode_attention(struct typec_altmode *altmode, u32 vdo); int typec_altmode_vdm(struct typec_altmode *altmode, const u32 header, const u32 *vdo, int count); int typec_altmode_notify(struct typec_altmode *altmode, unsigned long conf, diff --git a/include/net/ip.h b/include/net/ip.h index 8d1173577fb5c9a309a07daa11deb24c3116a850..9be2efe00f2c0043033cd4ef8b921799ab876d23 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -56,6 +56,7 @@ struct inet_skb_parm { #define IPSKB_FRAG_PMTU BIT(6) #define IPSKB_L3SLAVE BIT(7) #define IPSKB_NOPOLICY BIT(8) +#define IPSKB_MULTIPATH BIT(9) u16 frag_max_size; }; diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 1ddd401a8981f6089f14698b749a8ec6d1405d91..58d8e6260aa1389eee610391964937d1f079d2cd 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -455,15 +455,14 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) tstats->tx_packets++; u64_stats_update_end(&tstats->syncp); put_cpu_ptr(tstats); + return; + } + + if (pkt_len < 0) { + DEV_STATS_INC(dev, tx_errors); + DEV_STATS_INC(dev, tx_aborted_errors); } else { - struct net_device_stats *err_stats = &dev->stats; - - if (pkt_len < 0) { - err_stats->tx_errors++; - err_stats->tx_aborted_errors++; - } else { - err_stats->tx_dropped++; - } + DEV_STATS_INC(dev, tx_dropped); } } diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4c8f97a6da5a7ea18416f96efe256de93003a5f4..47d644de0e47c21ac418c87fc6f3852459023fdd 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1249,7 +1249,7 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val) return 0; } -static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val) +static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val) { int ret; diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index 81485c1a9879e14ae1b791284bce45adae3aa11c..fe8594a0396cae42953b5bdb7699309e76190332 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -176,6 +176,16 @@ static void io_worker_ref_put(struct io_wq *wq) complete(&wq->worker_done); } +bool io_wq_worker_stopped(void) +{ + struct io_worker *worker = current->pf_io_worker; + + if (WARN_ON_ONCE(!io_wq_current_is_worker())) + return true; + + return test_bit(IO_WQ_BIT_EXIT, &worker->wqe->wq->state); +} + static void io_worker_cancel_cb(struct io_worker *worker) { struct io_wqe_acct *acct = io_wqe_get_acct(worker); diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h index bf5c4c533760574f87d0cab165be2f4c60ecbff3..48721cbd5f40bf2ae9102b2b0f35c53670cc8548 100644 --- a/io_uring/io-wq.h +++ b/io_uring/io-wq.h @@ -129,6 +129,7 @@ void io_wq_hash_work(struct io_wq_work *work, void *val); int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask); int io_wq_max_workers(struct io_wq *wq, int *new_count); +bool io_wq_worker_stopped(void); static inline bool io_wq_is_hashed(struct io_wq_work *work) { diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index c555488f1cdc896afae6fd174612c41298bc188d..32cca265a7fb20958a39ca9ea9d7f09afb07f5af 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2665,6 +2665,11 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) break; } ret = io_do_iopoll(ctx, &nr_events, min); + + if (task_sigpending(current)) { + ret = -EINTR; + goto out; + } } while (!ret && nr_events < min && !need_resched()); out: mutex_unlock(&ctx->uring_lock); @@ -5571,6 +5576,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked) if (ret > 0) return; + io_tw_lock(req->ctx, locked); io_poll_remove_entries(req); spin_lock(&ctx->completion_lock); hash_del(&req->hash_node); @@ -6896,7 +6902,8 @@ static void io_wq_submit_work(struct io_wq_work *work) */ if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL)) break; - + if (io_wq_worker_stopped()) + break; /* * If REQ_F_NOWAIT is set, then don't wait or retry with * poll. -EAGAIN is final for that case. diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index fbe13cfdb85b0c906a9f80bc8e689fa78a20f295..d3e0ae1168069c26db6a8415405e16449d2f0dab 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -8890,7 +8890,7 @@ static struct trace_array *trace_array_create(const char *name) if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) goto out_free_tr; - if (!alloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) goto out_free_tr; tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; @@ -9736,7 +9736,7 @@ __init static int tracer_alloc_buffers(void) if (trace_create_savedcmd() < 0) goto out_free_temp_buffer; - if (!alloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL)) goto out_free_savedcmd; /* TODO: make the number of buffers hot pluggable with CPUS */ diff --git a/lib/idr.c b/lib/idr.c index 8331b44dd39e33ed0ffbc351a7ad2913ccfc6ab2..da36054c3ca02058dcfa3338c712ba664b79b13a 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(idr_alloc); * @end: The maximum ID (exclusive). * @gfp: Memory allocation flags. * - * Allocates an unused ID in the range specified by @nextid and @end. If + * Allocates an unused ID in the range specified by @start and @end. If * @end is <= 0, it is treated as one larger than %INT_MAX. This allows * callers to use @start + N as @end as long as N is within integer range. * The search for an unused ID will start at the last ID allocated and will diff --git a/lib/test_meminit.c b/lib/test_meminit.c index 3ca717f1139774db6b64df470dee4938bba2d7b4..75638404ed573f02a88dc8ddd9bf5ede3659b759 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -86,7 +86,7 @@ static int __init test_pages(int *total_failures) int failures = 0, num_tests = 0; int i; - for (i = 0; i < 10; i++) + for (i = 0; i <= MAX_ORDER; i++) num_tests += do_alloc_pages_order(i, &failures); REPORT_FAILURES_IN_FN(); diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index b8d082f557183a6223526f29099efcc114d5b8a7..3d5192177560db4a73687b5e322dfb19be43feb1 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1589,8 +1589,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb) memset(&keys, 0, sizeof(keys)); __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric, - &keys, NULL, 0, 0, 0, - FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); + &keys, NULL, 0, 0, 0, 0); return __flow_hash_from_keys(&keys, &hashrnd); } diff --git a/net/core/sock.c b/net/core/sock.c index 365e67a765f2329946dae258190201b4754d1197..c5b9d1fc3b0f484755a7b984569be1be127c9834 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -690,7 +690,8 @@ bool sk_mc_loop(struct sock *sk) return false; if (!sk) return true; - switch (sk->sk_family) { + /* IPV6_ADDRFORM can change sk->sk_family under us. */ + switch (READ_ONCE(sk->sk_family)) { case AF_INET: return inet_sk(sk)->mc_loop; #if IS_ENABLED(CONFIG_IPV6) @@ -2314,9 +2315,9 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) break; - if (sk->sk_shutdown & SEND_SHUTDOWN) + if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) break; - if (sk->sk_err) + if (READ_ONCE(sk->sk_err)) break; timeo = schedule_timeout(timeo); } @@ -2344,7 +2345,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, goto failure; err = -EPIPE; - if (sk->sk_shutdown & SEND_SHUTDOWN) + if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) goto failure; if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf)) diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index a2a8b952b3c55bf9e79253102d26591f34155cc9..398dc3e47d0c80b76fb76648d7059ba4008c6fa5 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -243,12 +243,17 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info) int err; struct net *net = dev_net(skb->dev); - /* Only need dccph_dport & dccph_sport which are the first - * 4 bytes in dccp header. + /* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x, + * which is in byte 7 of the dccp header. * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us. + * + * Later on, we want to access the sequence number fields, which are + * beyond 8 bytes, so we have to pskb_may_pull() ourselves. */ - BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); - BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); + dh = (struct dccp_hdr *)(skb->data + offset); + if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh))) + return -EINVAL; + iph = (struct iphdr *)skb->data; dh = (struct dccp_hdr *)(skb->data + offset); sk = __inet_lookup_established(net, &dccp_hashinfo, diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 64e91783860df3b6c20019cab84752cbfa635d91..bfe11e96af7c904fa1c3d444749b524561f94bdd 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -67,7 +67,7 @@ static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb) static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { - const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; + const struct ipv6hdr *hdr; const struct dccp_hdr *dh; struct dccp_sock *dp; struct ipv6_pinfo *np; @@ -76,12 +76,17 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, __u64 seq; struct net *net = dev_net(skb->dev); - /* Only need dccph_dport & dccph_sport which are the first - * 4 bytes in dccp header. + /* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x, + * which is in byte 7 of the dccp header. * Our caller (icmpv6_notify()) already pulled 8 bytes for us. + * + * Later on, we want to access the sequence number fields, which are + * beyond 8 bytes, so we have to pskb_may_pull() ourselves. */ - BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); - BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); + dh = (struct dccp_hdr *)(skb->data + offset); + if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh))) + return -EINVAL; + hdr = (const struct ipv6hdr *)skb->data; dh = (struct dccp_hdr *)(skb->data + offset); sk = __inet6_lookup_established(net, &dccp_hashinfo, diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index aec48e670fb6954f7045cfb3ec308576f820015e..2a02cb2edec2f406fedf14cf98b6152c2ef19d1a 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -531,6 +531,7 @@ static int fill_frame_info(struct hsr_frame_info *frame, proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto; /* FIXME: */ netdev_warn_once(skb->dev, "VLAN not yet supported"); + return -EINVAL; } frame->is_from_san = false; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 88b6120878cd9abfb3b9fe0423e5dbf0ffdd6375..da1ca8081c0357282b7461c5ac5dca57909be2fd 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -351,14 +351,14 @@ static void __inet_del_ifa(struct in_device *in_dev, { struct in_ifaddr *promote = NULL; struct in_ifaddr *ifa, *ifa1; - struct in_ifaddr *last_prim; + struct in_ifaddr __rcu **last_prim; struct in_ifaddr *prev_prom = NULL; int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev); ASSERT_RTNL(); ifa1 = rtnl_dereference(*ifap); - last_prim = rtnl_dereference(in_dev->ifa_list); + last_prim = ifap; if (in_dev->dead) goto no_promotions; @@ -372,7 +372,7 @@ static void __inet_del_ifa(struct in_device *in_dev, while ((ifa = rtnl_dereference(*ifap1)) != NULL) { if (!(ifa->ifa_flags & IFA_F_SECONDARY) && ifa1->ifa_scope <= ifa->ifa_scope) - last_prim = ifa; + last_prim = &ifa->ifa_next; if (!(ifa->ifa_flags & IFA_F_SECONDARY) || ifa1->ifa_mask != ifa->ifa_mask || @@ -436,9 +436,9 @@ static void __inet_del_ifa(struct in_device *in_dev, rcu_assign_pointer(prev_prom->ifa_next, next_sec); - last_sec = rtnl_dereference(last_prim->ifa_next); + last_sec = rtnl_dereference(*last_prim); rcu_assign_pointer(promote->ifa_next, last_sec); - rcu_assign_pointer(last_prim->ifa_next, promote); + rcu_assign_pointer(*last_prim, promote); } promote->ifa_flags &= ~IFA_F_SECONDARY; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 4e94796ccdbd144c5748e78c70b2484acea1a324..ed20d6ac10dc2a337fc69526a437a71a2c52d4a1 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -278,7 +278,8 @@ void fib_release_info(struct fib_info *fi) hlist_del(&nexthop_nh->nh_hash); } endfor_nexthops(fi) } - fi->fib_dead = 1; + /* Paired with READ_ONCE() from fib_table_lookup() */ + WRITE_ONCE(fi->fib_dead, 1); fib_info_put(fi); } spin_unlock_bh(&fib_info_lock); @@ -1599,6 +1600,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg, link_it: ofi = fib_find_info(fi); if (ofi) { + /* fib_table_lookup() should not see @fi yet. */ fi->fib_dead = 1; free_fib_info(fi); ofi->fib_treeref++; @@ -1637,6 +1639,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg, failure: if (fi) { + /* fib_table_lookup() should not see @fi yet. */ fi->fib_dead = 1; free_fib_info(fi); } diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index d11fb16234a6af17aa65349a58ae79acbaf49d7a..456240d2adc118c779ed5e3a9295faff38f908a3 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1534,7 +1534,8 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, } if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) continue; - if (fi->fib_dead) + /* Paired with WRITE_ONCE() in fib_release_info() */ + if (READ_ONCE(fi->fib_dead)) continue; if (fa->fa_info->fib_scope < flp->flowi4_scope) continue; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 72f249be44a096e51d2ca2678af679f494888bc6..569b64f3e1dcdc790c4592c58776f5b9e5e30b57 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -577,7 +577,8 @@ static void ip_sublist_rcv_finish(struct list_head *head) static struct sk_buff *ip_extract_route_hint(const struct net *net, struct sk_buff *skb, int rt_type) { - if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST) + if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST || + IPCB(skb)->flags & IPSKB_MULTIPATH) return NULL; return skb; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b70572477394778e8948263fbf15ad7e78346360..445b1a2966d7902ca94cdc5cafe4b9e3142cb05c 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2068,6 +2068,7 @@ static int ip_mkroute_input(struct sk_buff *skb, int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys); fib_select_multipath(res, h); + IPCB(skb)->flags |= IPSKB_MULTIPATH; } #endif diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 9b414681500a51e3acfc8152f9cb82a981173d28..0eafe26c05f7717bc87960d7957dd630e6beb248 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1359,7 +1359,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block) * idev->desync_factor if it's larger */ cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft); - max_desync_factor = min_t(__u32, + max_desync_factor = min_t(long, idev->cnf.max_desync_factor, cnf_temp_preferred_lft - regen_advance); diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 32b516ab9c47561b1952b2dca0a860c457d1a2b1..39b3c7fbf9f660a72ce185e5e748c47040864c11 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1064,15 +1064,18 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) out_error: kcm_push(kcm); - if (copied && sock->type == SOCK_SEQPACKET) { + if (sock->type == SOCK_SEQPACKET) { /* Wrote some bytes before encountering an * error, return partial success. */ - goto partial_message; - } - - if (head != kcm->seq_skb) + if (copied) + goto partial_message; + if (head != kcm->seq_skb) + kfree_skb(head); + } else { kfree_skb(head); + kcm->seq_skb = NULL; + } err = sk_stream_error(sk, msg->msg_flags, err); @@ -1982,6 +1985,8 @@ static __net_exit void kcm_exit_net(struct net *net) * that all multiplexors and psocks have been destroyed. */ WARN_ON(!list_empty(&knet->mux_list)); + + mutex_destroy(&knet->mutex); } static struct pernet_operations kcm_net_ops = { diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index 4f6b5b6fba3ed81340133e22b46141319fd05968..a5b63158f081c8d6935199b8d9c5241de7d520e8 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -61,6 +61,7 @@ struct fq_pie_sched_data { struct pie_params p_params; u32 ecn_prob; u32 flows_cnt; + u32 flows_cursor; u32 quantum; u32 memory_limit; u32 new_flow_count; @@ -378,21 +379,31 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, static void fq_pie_timer(struct timer_list *t) { struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer); + unsigned long next, tupdate; struct Qdisc *sch = q->sch; spinlock_t *root_lock; /* to lock qdisc for probability calculations */ - u32 idx; + int max_cnt, i; root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); - for (idx = 0; idx < q->flows_cnt; idx++) - pie_calculate_probability(&q->p_params, &q->flows[idx].vars, - q->flows[idx].backlog); - - /* reset the timer to fire after 'tupdate' jiffies. */ - if (q->p_params.tupdate) - mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate); + /* Limit this expensive loop to 2048 flows per round. */ + max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048); + for (i = 0; i < max_cnt; i++) { + pie_calculate_probability(&q->p_params, + &q->flows[q->flows_cursor].vars, + q->flows[q->flows_cursor].backlog); + q->flows_cursor++; + } + tupdate = q->p_params.tupdate; + next = 0; + if (q->flows_cursor >= q->flows_cnt) { + q->flows_cursor = 0; + next = tupdate; + } + if (tupdate) + mod_timer(&q->adapt_timer, jiffies + next); spin_unlock(root_lock); } diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 982a87b3e11f8f6edde79b3767629f0599789e1e..963b94517ec20ff88607bce4e3ffc4154b54a563 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -284,7 +284,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) assoc->init_retries, assoc->shutdown_retries, assoc->rtx_data_chunks, refcount_read(&sk->sk_wmem_alloc), - sk->sk_wmem_queued, + READ_ONCE(sk->sk_wmem_queued), sk->sk_sndbuf, sk->sk_rcvbuf); seq_printf(seq, "\n"); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index fa4d31b507f297aca7bb71ec4e7d1828caf8dc71..68d53e3f0d07aa3a75b3b86f6f1e120ff18caec9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -68,7 +68,7 @@ #include /* Forward declarations for internal helper functions. */ -static bool sctp_writeable(struct sock *sk); +static bool sctp_writeable(const struct sock *sk); static void sctp_wfree(struct sk_buff *skb); static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, size_t msg_len); @@ -138,7 +138,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk); - sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk); + sk_wmem_queued_add(sk, chunk->skb->truesize + sizeof(struct sctp_chunk)); sk_mem_charge(sk, chunk->skb->truesize); } @@ -8900,7 +8900,7 @@ static void sctp_wfree(struct sk_buff *skb) struct sock *sk = asoc->base.sk; sk_mem_uncharge(sk, skb->truesize); - sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk); + sk_wmem_queued_add(sk, -(skb->truesize + sizeof(struct sctp_chunk))); asoc->sndbuf_used -= skb->truesize + sizeof(struct sctp_chunk); WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc)); @@ -9055,9 +9055,9 @@ void sctp_write_space(struct sock *sk) * UDP-style sockets or TCP-style sockets, this code should work. * - Daisy */ -static bool sctp_writeable(struct sock *sk) +static bool sctp_writeable(const struct sock *sk) { - return sk->sk_sndbuf > sk->sk_wmem_queued; + return READ_ONCE(sk->sk_sndbuf) > READ_ONCE(sk->sk_wmem_queued); } /* Wait for an association to go into ESTABLISHED state. If timeout is 0, diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index e84241ff4ac4f5fc76c31b42bb8a27b975c87f42..ab9ecdd1af0ac4b082fa9adceaa6c71e66aa4bf3 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1101,6 +1101,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport) { struct smc_link_group *lgr, *n; + spin_lock_bh(&smc_lgr_list.lock); list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) { struct smc_link *link; @@ -1115,6 +1116,7 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport) if (link) smc_llc_add_link_local(link); } + spin_unlock_bh(&smc_lgr_list.lock); } /* link is down - switch connections to alternate link, diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 8d941cbba5cb7a0bc5799d4d4a4ad884e7296d3a..237488b1b58b6323c801f2b60a900c6d1784ae3a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -587,7 +587,7 @@ static void unix_release_sock(struct sock *sk, int embrion) * What the above comment does talk about? --ANK(980817) */ - if (unix_tot_inflight) + if (READ_ONCE(unix_tot_inflight)) unix_gc(); /* Garbage collect fds */ } diff --git a/net/unix/scm.c b/net/unix/scm.c index aa27a02478dc1a7e4022f77e6ea7ac55f40b95c7..e8e2a00bb0f58512a82eaa577d456a7f8b50754c 100644 --- a/net/unix/scm.c +++ b/net/unix/scm.c @@ -63,7 +63,7 @@ void unix_inflight(struct user_struct *user, struct file *fp) /* Paired with READ_ONCE() in wait_for_unix_gc() */ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); } - user->unix_inflight++; + WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1); spin_unlock(&unix_gc_lock); } @@ -84,7 +84,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp) /* Paired with READ_ONCE() in wait_for_unix_gc() */ WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); } - user->unix_inflight--; + WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1); spin_unlock(&unix_gc_lock); } @@ -98,7 +98,7 @@ static inline bool too_many_unix_fds(struct task_struct *p) { struct user_struct *user = current_user(); - if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE))) + if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE))) return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); return false; } diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c index 748da578b418c4acb53f454e17e2370d2050ceb6..d1f5bcff4b62d68d83e6134b0c883876619f07a2 100644 --- a/scripts/kconfig/preprocess.c +++ b/scripts/kconfig/preprocess.c @@ -396,6 +396,9 @@ static char *eval_clause(const char *str, size_t len, int argc, char *argv[]) p++; } + + if (new_argc >= FUNCTION_MAX_ARGS) + pperror("too many function arguments"); new_argv[new_argc++] = prev; /* diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 7c64134472c7750744c216a1240eea1ea7dc6cb7..ee30372f771335bf9bdfd2ece698db895912fffe 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1743,6 +1743,7 @@ int cmd_top(int argc, const char **argv) top.session = perf_session__new(NULL, false, NULL); if (IS_ERR(top.session)) { status = PTR_ERR(top.session); + top.session = NULL; goto out_delete_evlist; } diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index b0e1880cf992b15150ac645595aba7e86d4367e1..f2586e46d53e8dc9254a65f22bed4422cfb9304d 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -407,11 +407,6 @@ static bool hist_browser__selection_has_children(struct hist_browser *browser) return container_of(ms, struct callchain_list, ms)->has_children; } -static bool hist_browser__he_selection_unfolded(struct hist_browser *browser) -{ - return browser->he_selection ? browser->he_selection->unfolded : false; -} - static bool hist_browser__selection_unfolded(struct hist_browser *browser) { struct hist_entry *he = browser->he_selection; @@ -584,8 +579,8 @@ static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he, return n; } -static void __hist_entry__set_folding(struct hist_entry *he, - struct hist_browser *hb, bool unfold) +static void hist_entry__set_folding(struct hist_entry *he, + struct hist_browser *hb, bool unfold) { hist_entry__init_have_children(he); he->unfolded = unfold ? he->has_children : false; @@ -603,34 +598,12 @@ static void __hist_entry__set_folding(struct hist_entry *he, he->nr_rows = 0; } -static void hist_entry__set_folding(struct hist_entry *he, - struct hist_browser *browser, bool unfold) -{ - double percent; - - percent = hist_entry__get_percent_limit(he); - if (he->filtered || percent < browser->min_pcnt) - return; - - __hist_entry__set_folding(he, browser, unfold); - - if (!he->depth || unfold) - browser->nr_hierarchy_entries++; - if (he->leaf) - browser->nr_callchain_rows += he->nr_rows; - else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) { - browser->nr_hierarchy_entries++; - he->has_no_entry = true; - he->nr_rows = 1; - } else - he->has_no_entry = false; -} - static void __hist_browser__set_folding(struct hist_browser *browser, bool unfold) { struct rb_node *nd; struct hist_entry *he; + double percent; nd = rb_first_cached(&browser->hists->entries); while (nd) { @@ -640,6 +613,21 @@ __hist_browser__set_folding(struct hist_browser *browser, bool unfold) nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD); hist_entry__set_folding(he, browser, unfold); + + percent = hist_entry__get_percent_limit(he); + if (he->filtered || percent < browser->min_pcnt) + continue; + + if (!he->depth || unfold) + browser->nr_hierarchy_entries++; + if (he->leaf) + browser->nr_callchain_rows += he->nr_rows; + else if (unfold && !hist_entry__has_hierarchy_children(he, browser->min_pcnt)) { + browser->nr_hierarchy_entries++; + he->has_no_entry = true; + he->nr_rows = 1; + } else + he->has_no_entry = false; } } @@ -659,8 +647,10 @@ static void hist_browser__set_folding_selected(struct hist_browser *browser, boo if (!browser->he_selection) return; - hist_entry__set_folding(browser->he_selection, browser, unfold); - browser->b.nr_entries = hist_browser__nr_entries(browser); + if (unfold == browser->he_selection->unfolded) + return; + + hist_browser__toggle_fold(browser); } static void ui_browser__warn_lost_events(struct ui_browser *browser) @@ -731,8 +721,8 @@ static int hist_browser__handle_hotkey(struct hist_browser *browser, bool warn_l hist_browser__set_folding(browser, true); break; case 'e': - /* Expand the selected entry. */ - hist_browser__set_folding_selected(browser, !hist_browser__he_selection_unfolded(browser)); + /* Toggle expand/collapse the selected entry. */ + hist_browser__toggle_fold(browser); break; case 'H': browser->show_headers = !browser->show_headers; @@ -1778,7 +1768,7 @@ static void hists_browser__hierarchy_headers(struct hist_browser *browser) hists_browser__scnprintf_hierarchy_headers(browser, headers, sizeof(headers)); - ui_browser__gotorc(&browser->b, 0, 0); + ui_browser__gotorc_title(&browser->b, 0, 0); ui_browser__set_color(&browser->b, HE_COLORSET_ROOT); ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1); } diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 4aaaf23b4878b514a781493ea284112518e994e8..1f56a29802e46276a9b71bc663d1fe3395953737 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1718,8 +1718,11 @@ static int symbol__disassemble_bpf(struct symbol *sym, perf_exe(tpath, sizeof(tpath)); bfdf = bfd_openr(tpath, NULL); - assert(bfdf); - assert(bfd_check_format(bfdf, bfd_object)); + if (bfdf == NULL) + abort(); + + if (!bfd_check_format(bfdf, bfd_object)) + abort(); s = open_memstream(&buf, &buf_size); if (!s) { @@ -1767,7 +1770,8 @@ static int symbol__disassemble_bpf(struct symbol *sym, #else disassemble = disassembler(bfdf); #endif - assert(disassemble); + if (disassemble == NULL) + abort(); fflush(s); do { diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index be850e9f88520dcaafa8fac94b79975159188aa5..dd06770b43f1c2d0df7a941ade650d14485f39fd 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -3987,7 +3987,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused, union perf_event *event, struct evlist **pevlist) { - u32 i, ids, n_ids; + u32 i, n_ids; + u64 *ids; struct evsel *evsel; struct evlist *evlist = *pevlist; @@ -4003,9 +4004,8 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused, evlist__add(evlist, evsel); - ids = event->header.size; - ids -= (void *)&event->attr.id - (void *)event; - n_ids = ids / sizeof(u64); + n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size; + n_ids = n_ids / sizeof(u64); /* * We don't have the cpu and thread maps on the header, so * for allocating the perf_sample_id table we fake 1 cpu and @@ -4014,8 +4014,9 @@ int perf_event__process_attr(struct perf_tool *tool __maybe_unused, if (perf_evsel__alloc_id(&evsel->core, 1, n_ids)) return -ENOMEM; + ids = (void *)&event->attr.attr + event->attr.attr.size; for (i = 0; i < n_ids; i++) { - perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]); + perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]); } return 0; diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh index cc9c846585f05de2bd6670dc64931ab3f553d351..83616f0779a7e6e9948fc2a7f5da7403bacc13bf 100644 --- a/tools/testing/selftests/kselftest/runner.sh +++ b/tools/testing/selftests/kselftest/runner.sh @@ -33,9 +33,10 @@ tap_timeout() { # Make sure tests will time out if utility is available. if [ -x /usr/bin/timeout ] ; then - /usr/bin/timeout --foreground "$kselftest_timeout" "$1" + /usr/bin/timeout --foreground "$kselftest_timeout" \ + /usr/bin/timeout "$kselftest_timeout" $1 else - "$1" + $1 fi } @@ -65,17 +66,25 @@ run_one() TEST_HDR_MSG="selftests: $DIR: $BASENAME_TEST" echo "# $TEST_HDR_MSG" - if [ ! -x "$TEST" ]; then - echo -n "# Warning: file $TEST is " - if [ ! -e "$TEST" ]; then - echo "missing!" - else - echo "not executable, correct this." - fi + if [ ! -e "$TEST" ]; then + echo "# Warning: file $TEST is missing!" echo "not ok $test_num $TEST_HDR_MSG" else + cmd="./$BASENAME_TEST" + if [ ! -x "$TEST" ]; then + echo "# Warning: file $TEST is not executable" + + if [ $(head -n 1 "$TEST" | cut -c -2) = "#!" ] + then + interpreter=$(head -n 1 "$TEST" | cut -c 3-) + cmd="$interpreter ./$BASENAME_TEST" + else + echo "not ok $test_num $TEST_HDR_MSG" + return + fi + fi cd `dirname $TEST` > /dev/null - ((((( tap_timeout ./$BASENAME_TEST 2>&1; echo $? >&3) | + ((((( tap_timeout "$cmd" 2>&1; echo $? >&3) | tap_prefix >&4) 3>&1) | (read xs; exit $xs)) 4>>"$logfile" && echo "ok $test_num $TEST_HDR_MSG") ||