diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index fd913ae64ea87a909595396b059a7b98138abd47..131f93a69d92bc5ef9487f0789916d0df694f655 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -82,6 +82,8 @@ Brief summary of control files. memory.swappiness set/show swappiness parameter of vmscan (See sysctl's vm.swappiness) memory.move_charge_at_immigrate set/show controls of moving charges + This knob is deprecated and shouldn't be + used. memory.oom_control set/show oom controls. memory.numa_stat show the number of memory usage per numa node @@ -740,8 +742,15 @@ NOTE2: It is recommended to set the soft limit always below the hard limit, otherwise the hard limit will take precedence. -8. Move charges at task migration -================================= +8. Move charges at task migration (DEPRECATED!) +=============================================== + +THIS IS DEPRECATED! + +It's expensive and unreliable! It's better practice to launch workload +tasks directly from inside their target cgroup. Use dedicated workload +cgroups to allow fine-grained policy adjustments without having to +move physical pages between control domains. Users can move charges associated with a task along with task migration, that is, uncharge task's pages from the old cgroup and charge them to the new cgroup. diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst index 7e061ed449aaab9ebfbb4aff73640021aa55e848..0fba3758d0da89273376b126717f8e031cf18630 100644 --- a/Documentation/admin-guide/hw-vuln/spectre.rst +++ b/Documentation/admin-guide/hw-vuln/spectre.rst @@ -479,8 +479,16 @@ Spectre variant 2 On Intel Skylake-era systems the mitigation covers most, but not all, cases. See :ref:`[3] ` for more details. - On CPUs with hardware mitigation for Spectre variant 2 (e.g. Enhanced - IBRS on x86), retpoline is automatically disabled at run time. + On CPUs with hardware mitigation for Spectre variant 2 (e.g. IBRS + or enhanced IBRS on x86), retpoline is automatically disabled at run time. + + Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at + boot, by setting the IBRS bit, and they're automatically protected against + Spectre v2 variant attacks, including cross-thread branch target injections + on SMT systems (STIBP). In other words, eIBRS enables STIBP too. + + Legacy IBRS systems clear the IBRS bit on exit to userspace and + therefore explicitly enable STIBP for that The retpoline mitigation is turned on by default on vulnerable CPUs. It can be forced on or off by the administrator @@ -504,9 +512,12 @@ Spectre variant 2 For Spectre variant 2 mitigation, individual user programs can be compiled with return trampolines for indirect branches. This protects them from consuming poisoned entries in the branch - target buffer left by malicious software. Alternatively, the - programs can disable their indirect branch speculation via prctl() - (See :ref:`Documentation/userspace-api/spec_ctrl.rst `). + target buffer left by malicious software. + + On legacy IBRS systems, at return to userspace, implicit STIBP is disabled + because the kernel clears the IBRS bit. In this case, the userspace programs + can disable indirect branch speculation via prctl() (See + :ref:`Documentation/userspace-api/spec_ctrl.rst `). On x86, this will turn on STIBP to guard against attacks from the sibling thread when the user program is running, and use IBPB to flush the branch target buffer when switching to/from the program. diff --git a/Documentation/admin-guide/kdump/gdbmacros.txt b/Documentation/admin-guide/kdump/gdbmacros.txt index 82aecdcae8a6c182d0bebea90492b79a199510a8..030de95e3e6b2464f130ecd2f2e93f8ce804d151 100644 --- a/Documentation/admin-guide/kdump/gdbmacros.txt +++ b/Documentation/admin-guide/kdump/gdbmacros.txt @@ -312,10 +312,10 @@ define dmesg set var $prev_flags = $info->flags end - set var $id = ($id + 1) & $id_mask if ($id == $end_id) loop_break end + set var $id = ($id + 1) & $id_mask end end document dmesg diff --git a/Documentation/dev-tools/gdb-kernel-debugging.rst b/Documentation/dev-tools/gdb-kernel-debugging.rst index 4756f6b3a04e54d6b09254809fc5b6ce84499d42..10cdd990b63d00cd56a38c7db1853e514965d285 100644 --- a/Documentation/dev-tools/gdb-kernel-debugging.rst +++ b/Documentation/dev-tools/gdb-kernel-debugging.rst @@ -39,6 +39,10 @@ Setup this mode. In this case, you should build the kernel with CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR. +- Build the gdb scripts (required on kernels v5.1 and above):: + + make scripts_gdb + - Enable the gdb stub of QEMU/KVM, either - at VM startup time by appending "-s" to the QEMU command line diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 2b4b64797191fb96d1ab62491f7f655b07bad14c..08295f488d057e1016a7b8160cb3c5233a5fcb93 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -4031,6 +4031,18 @@ not holding a previously reported uncorrected error). :Parameters: struct kvm_s390_cmma_log (in, out) :Returns: 0 on success, a negative value on error +Errors: + + ====== ============================================================= + ENOMEM not enough memory can be allocated to complete the task + ENXIO if CMMA is not enabled + EINVAL if KVM_S390_CMMA_PEEK is not set but migration mode was not enabled + EINVAL if KVM_S390_CMMA_PEEK is not set but dirty tracking has been + disabled (and thus migration mode was automatically disabled) + EFAULT if the userspace address is invalid or if no page table is + present for the addresses (e.g. when using hugepages). + ====== ============================================================= + This ioctl is used to get the values of the CMMA bits on the s390 architecture. It is meant to be used in two scenarios: @@ -4111,12 +4123,6 @@ mask is unused. values points to the userspace buffer where the result will be stored. -This ioctl can fail with -ENOMEM if not enough memory can be allocated to -complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if -KVM_S390_CMMA_PEEK is not set but migration mode was not enabled, with --EFAULT if the userspace address is invalid or if no page table is -present for the addresses (e.g. when using hugepages). - 4.108 KVM_S390_SET_CMMA_BITS ---------------------------- diff --git a/Documentation/virt/kvm/devices/vm.rst b/Documentation/virt/kvm/devices/vm.rst index 60acc39e0e937c52d5e98c160ccf5f8fb9825264..147efec626e5218825e36d7e4035e07e4892d86b 100644 --- a/Documentation/virt/kvm/devices/vm.rst +++ b/Documentation/virt/kvm/devices/vm.rst @@ -302,6 +302,10 @@ Allows userspace to start migration mode, needed for PGSTE migration. Setting this attribute when migration mode is already active will have no effects. +Dirty tracking must be enabled on all memslots, else -EINVAL is returned. When +dirty tracking is disabled on any memslot, migration mode is automatically +stopped. + :Parameters: none :Returns: -ENOMEM if there is not enough free memory to start migration mode; -EINVAL if the state of the VM is invalid (e.g. no memory defined); diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 8b0f81a58b948b4735d3e8207b5601bffc0dcaf9..751d3197ca766c4d6c7bde82cee27383f327c20b 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -235,7 +235,21 @@ do_entIF(unsigned long type, struct pt_regs *regs) { int signo, code; - if ((regs->ps & ~IPL_MAX) == 0) { + if (type == 3) { /* FEN fault */ + /* Irritating users can call PAL_clrfen to disable the + FPU for the process. The kernel will then trap in + do_switch_stack and undo_switch_stack when we try + to save and restore the FP registers. + + Given that GCC by default generates code that uses the + FP registers, PAL_clrfen is not useful except for DoS + attacks. So turn the bleeding FPU back on and be done + with it. */ + current_thread_info()->pcb.flags |= 1; + __reload_thread(¤t_thread_info()->pcb); + return; + } + if (!user_mode(regs)) { if (type == 1) { const unsigned int *data = (const unsigned int *) regs->pc; @@ -368,20 +382,6 @@ do_entIF(unsigned long type, struct pt_regs *regs) } break; - case 3: /* FEN fault */ - /* Irritating users can call PAL_clrfen to disable the - FPU for the process. The kernel will then trap in - do_switch_stack and undo_switch_stack when we try - to save and restore the FP registers. - - Given that GCC by default generates code that uses the - FP registers, PAL_clrfen is not useful except for DoS - attacks. So turn the bleeding FPU back on and be done - with it. */ - current_thread_info()->pcb.flags |= 1; - __reload_thread(¤t_thread_info()->pcb); - return; - case 5: /* illoc */ default: /* unexpected instruction-fault type */ ; diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi index 021d9fc1b49235c75342ba8d1d949f5f77fc4f25..27a1a895266559190e4708321d9c896cf89f9aee 100644 --- a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi +++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi @@ -10,7 +10,7 @@ / { thermal-zones { cpu_thermal: cpu-thermal { - thermal-sensors = <&tmu 0>; + thermal-sensors = <&tmu>; polling-delay-passive = <0>; polling-delay = <0>; trips { diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index a1e54449f33f011f0a9d1181464e5991ee754b5b..41f0e64b1365ecfb6a654aef5b42711e2bfe5414 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi @@ -605,7 +605,7 @@ i2c_8: i2c@138e0000 { status = "disabled"; hdmi_i2c_phy: hdmiphy@38 { - compatible = "exynos4210-hdmiphy"; + compatible = "samsung,exynos4210-hdmiphy"; reg = <0x38>; }; }; diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi index fddc661ded28f6664d4c7cb5c645fb83b7773876..448e1b153a01b4d0108c5685f846901bc9991096 100644 --- a/arch/arm/boot/dts/exynos4210.dtsi +++ b/arch/arm/boot/dts/exynos4210.dtsi @@ -382,7 +382,6 @@ &cpu_alert2 { &cpu_thermal { polling-delay-passive = <0>; polling-delay = <0>; - thermal-sensors = <&tmu 0>; }; &gic { diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index bd2d8835dd3695b02660b60aba6d40cf207ccfc8..62051e600b325914971630dc666913a9be1294bb 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -1109,7 +1109,7 @@ timer { &cpu_thermal { polling-delay-passive = <0>; polling-delay = <0>; - thermal-sensors = <&tmu 0>; + thermal-sensors = <&tmu>; cooling-maps { map0 { diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts index bd1d8499a108b658c8a0c1f9d753036946161f39..147c077e8855191aded667ff2fde9460b5eac0b3 100644 --- a/arch/arm/boot/dts/exynos5410-odroidxu.dts +++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts @@ -116,7 +116,6 @@ &clock_audss { }; &cpu0_thermal { - thermal-sensors = <&tmu_cpu0 0>; polling-delay-passive = <0>; polling-delay = <0>; diff --git a/arch/arm/boot/dts/exynos5422-odroidhc1.dts b/arch/arm/boot/dts/exynos5422-odroidhc1.dts index 88f5c150a30a17df075a166472572e6724cd42d4..a1871d4a0f2a9fd7ae47d96a50d5ffe45f7f9489 100644 --- a/arch/arm/boot/dts/exynos5422-odroidhc1.dts +++ b/arch/arm/boot/dts/exynos5422-odroidhc1.dts @@ -29,7 +29,7 @@ blueled { thermal-zones { cpu0_thermal: cpu0-thermal { - thermal-sensors = <&tmu_cpu0 0>; + thermal-sensors = <&tmu_cpu0>; trips { cpu0_alert0: cpu-alert-0 { temperature = <70000>; /* millicelsius */ @@ -84,7 +84,7 @@ map1 { }; }; cpu1_thermal: cpu1-thermal { - thermal-sensors = <&tmu_cpu1 0>; + thermal-sensors = <&tmu_cpu1>; trips { cpu1_alert0: cpu-alert-0 { temperature = <70000>; @@ -128,7 +128,7 @@ map1 { }; }; cpu2_thermal: cpu2-thermal { - thermal-sensors = <&tmu_cpu2 0>; + thermal-sensors = <&tmu_cpu2>; trips { cpu2_alert0: cpu-alert-0 { temperature = <70000>; @@ -172,7 +172,7 @@ map1 { }; }; cpu3_thermal: cpu3-thermal { - thermal-sensors = <&tmu_cpu3 0>; + thermal-sensors = <&tmu_cpu3>; trips { cpu3_alert0: cpu-alert-0 { temperature = <70000>; @@ -216,7 +216,7 @@ map1 { }; }; gpu_thermal: gpu-thermal { - thermal-sensors = <&tmu_gpu 0>; + thermal-sensors = <&tmu_gpu>; trips { gpu_alert0: gpu-alert-0 { temperature = <70000>; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi index 5da2d81e3be274744c534c32f967a24791983e3a..099ed4384be891123fe3dbec8617c67b8ab73fa8 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi @@ -50,7 +50,7 @@ fan0: pwm-fan { thermal-zones { cpu0_thermal: cpu0-thermal { - thermal-sensors = <&tmu_cpu0 0>; + thermal-sensors = <&tmu_cpu0>; polling-delay-passive = <250>; polling-delay = <0>; trips { @@ -139,7 +139,7 @@ cpu0_cooling_map4: map4 { }; }; cpu1_thermal: cpu1-thermal { - thermal-sensors = <&tmu_cpu1 0>; + thermal-sensors = <&tmu_cpu1>; polling-delay-passive = <250>; polling-delay = <0>; trips { @@ -212,7 +212,7 @@ cpu1_cooling_map4: map4 { }; }; cpu2_thermal: cpu2-thermal { - thermal-sensors = <&tmu_cpu2 0>; + thermal-sensors = <&tmu_cpu2>; polling-delay-passive = <250>; polling-delay = <0>; trips { @@ -285,7 +285,7 @@ cpu2_cooling_map4: map4 { }; }; cpu3_thermal: cpu3-thermal { - thermal-sensors = <&tmu_cpu3 0>; + thermal-sensors = <&tmu_cpu3>; polling-delay-passive = <250>; polling-delay = <0>; trips { @@ -358,7 +358,7 @@ cpu3_cooling_map4: map4 { }; }; gpu_thermal: gpu-thermal { - thermal-sensors = <&tmu_gpu 0>; + thermal-sensors = <&tmu_gpu>; polling-delay-passive = <250>; polling-delay = <0>; trips { diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 25fa651c937d5b24315d519ccce9210af8cfc7e1..ebdf4d910af2f1429832115bb3e937c1d4d1f90b 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -38,7 +38,7 @@ static inline bool mips_syscall_is_indirect(struct task_struct *task, static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - return current_thread_info()->syscall; + return task_thread_info(task)->syscall; } static inline void mips_syscall_update_nr(struct task_struct *task, diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index aae24dc75df61bfed0c062822f60645bb783f380..0f7e7a68d57bb20add0c33f81c1f4f94d3247286 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -241,6 +241,7 @@ static void pop_kprobe(struct kprobe_ctlblk *kcb) { __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; + kcb->prev_kprobe.kp = NULL; } NOKPROBE_SYMBOL(pop_kprobe); @@ -402,12 +403,11 @@ static int post_kprobe_handler(struct pt_regs *regs) if (!p) return 0; + resume_execution(p, regs); if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); } - - resume_execution(p, regs); pop_kprobe(kcb); preempt_enable_no_resched(); diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 9505bdb0aa54499e154a2322c8503c7a2e2e391d..d7291eb0d0c070739aee2ffbcfbc7423ed726faf 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -188,5 +188,6 @@ SECTIONS DISCARDS /DISCARD/ : { *(.eh_frame) + *(.interp) } } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 59db85fb63e1cb0015bfb9cfc4c728a76e617b36..7ffc73ba220fbe34111fec5ae674d1b3f58ba131 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5012,6 +5012,23 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, /* When we are protected, we should not change the memory slots */ if (kvm_s390_pv_get_handle(kvm)) return -EINVAL; + + if (!kvm->arch.migration_mode) + return 0; + + /* + * Turn off migration mode when: + * - userspace creates a new memslot with dirty logging off, + * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and + * dirty logging is turned off. + * Migration mode expects dirty page logging being enabled to store + * its dirty bitmap. + */ + if (change != KVM_MR_DELETE && + !(mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) + WARN(kvm_s390_vm_stop_migration(kvm), + "Failed to stop migration mode"); + return 0; } diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 5060956b8e7d61181452d47d998d181ef1877f0f..1bc42ce2659908778078528a67c169c4c719f6a0 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -289,15 +289,17 @@ segment_overlaps_others (struct dcss_segment *seg) /* * real segment loading function, called from segment_load + * Must return either an error code < 0, or the segment type code >= 0 */ static int __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end) { unsigned long start_addr, end_addr, dummy; struct dcss_segment *seg; - int rc, diag_cc; + int rc, diag_cc, segtype; start_addr = end_addr = 0; + segtype = -1; seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA); if (seg == NULL) { rc = -ENOMEM; @@ -326,9 +328,9 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long seg->res_name[8] = '\0'; strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name)); seg->res->name = seg->res_name; - rc = seg->vm_segtype; - if (rc == SEG_TYPE_SC || - ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared)) + segtype = seg->vm_segtype; + if (segtype == SEG_TYPE_SC || + ((segtype == SEG_TYPE_SR || segtype == SEG_TYPE_ER) && !do_nonshared)) seg->res->flags |= IORESOURCE_READONLY; /* Check for overlapping resources before adding the mapping. */ @@ -386,7 +388,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long out_free: kfree(seg); out: - return rc; + return rc < 0 ? rc : segtype; } /* diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index f73327397b898a3b0ec7cb801bd16070324b6894..509cc0262fdc254338aa164675c070d88576ac31 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -131,7 +131,7 @@ static inline unsigned int x86_cpuid_family(void) int __init microcode_init(void); extern void __init load_ucode_bsp(void); extern void load_ucode_ap(void); -void reload_early_microcode(void); +void reload_early_microcode(unsigned int cpu); extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); extern bool initrd_gone; void microcode_bsp_resume(void); @@ -139,7 +139,7 @@ void microcode_bsp_resume(void); static inline int __init microcode_init(void) { return 0; }; static inline void __init load_ucode_bsp(void) { } static inline void load_ucode_ap(void) { } -static inline void reload_early_microcode(void) { } +static inline void reload_early_microcode(unsigned int cpu) { } static inline void microcode_bsp_resume(void) { } static inline bool get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; } diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 7063b5a43220a96a363f97e3567fabd0fbc8be70..a645b25ee442a7ccc936fb903e7e9a3ad65f2411 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -47,12 +47,12 @@ struct microcode_amd { extern void __init load_ucode_amd_bsp(unsigned int family); extern void load_ucode_amd_ap(unsigned int family); extern int __init save_microcode_in_initrd_amd(unsigned int family); -void reload_ucode_amd(void); +void reload_ucode_amd(unsigned int cpu); #else static inline void __init load_ucode_amd_bsp(unsigned int family) {} static inline void load_ucode_amd_ap(unsigned int family) {} static inline int __init save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } -static inline void reload_ucode_amd(void) {} +static inline void reload_ucode_amd(unsigned int cpu) {} #endif #endif /* _ASM_X86_MICROCODE_AMD_H */ diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 04c17be9b5fdab86845999cc594634558fec18e3..bc5b4d788c08dbc8f47bb8d72c7f267982e716e6 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type); #define MRR_BIOS 0 #define MRR_APM 1 +void cpu_emergency_disable_virtualization(void); + typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); void nmi_panic_self_stop(struct pt_regs *regs); void nmi_shootdown_cpus(nmi_shootdown_cb callback); diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index fda3e7747c2238f2d8f1b81594026b81c631caae..8eefa3386d8cefd655e4b5c3f8e98e446d2a356e 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -120,7 +120,21 @@ static inline void cpu_svm_disable(void) wrmsrl(MSR_VM_HSAVE_PA, 0); rdmsrl(MSR_EFER, efer); - wrmsrl(MSR_EFER, efer & ~EFER_SVME); + if (efer & EFER_SVME) { + /* + * Force GIF=1 prior to disabling SVM to ensure INIT and NMI + * aren't blocked, e.g. if a fatal error occurred between CLGI + * and STGI. Note, STGI may #UD if SVM is disabled from NMI + * context between reading EFER and executing STGI. In that + * case, GIF must already be set, otherwise the NMI would have + * been blocked, so just eat the fault. + */ + asm_volatile_goto("1: stgi\n\t" + _ASM_EXTABLE(1b, %l[fault]) + ::: "memory" : fault); +fault: + wrmsrl(MSR_EFER, efer & ~EFER_SVME); + } } /** Makes sure SVM is disabled, if it is supported on the CPU diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 234a96f25248dbb32dd3c68f133500dfd0d003c6..d3bce6d380ed6a6f6848b012e4897c1cd0aaf199 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -55,7 +55,9 @@ struct cont_desc { }; static u32 ucode_new_rev; -static u8 amd_ucode_patch[PATCH_MAX_SIZE]; + +/* One blob per node. */ +static u8 amd_ucode_patch[MAX_NUMNODES][PATCH_MAX_SIZE]; /* * Microcode patch container file is prepended to the initrd in cpio @@ -429,7 +431,7 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); #else new_rev = &ucode_new_rev; - patch = &amd_ucode_patch; + patch = &amd_ucode_patch[0]; #endif desc.cpuid_1_eax = cpuid_1_eax; @@ -548,8 +550,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax) apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false); } -static enum ucode_state -load_microcode_amd(bool save, u8 family, const u8 *data, size_t size); +static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) { @@ -567,19 +568,19 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) if (!desc.mc) return -EINVAL; - ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); + ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size); if (ret > UCODE_UPDATED) return -EINVAL; return 0; } -void reload_ucode_amd(void) +void reload_ucode_amd(unsigned int cpu) { - struct microcode_amd *mc; u32 rev, dummy __always_unused; + struct microcode_amd *mc; - mc = (struct microcode_amd *)amd_ucode_patch; + mc = (struct microcode_amd *)amd_ucode_patch[cpu_to_node(cpu)]; rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); @@ -845,9 +846,10 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, return UCODE_OK; } -static enum ucode_state -load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) +static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) { + struct cpuinfo_x86 *c; + unsigned int nid, cpu; struct ucode_patch *p; enum ucode_state ret; @@ -860,22 +862,22 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) return ret; } - p = find_patch(0); - if (!p) { - return ret; - } else { - if (boot_cpu_data.microcode >= p->patch_id) - return ret; + for_each_node(nid) { + cpu = cpumask_first(cpumask_of_node(nid)); + c = &cpu_data(cpu); - ret = UCODE_NEW; - } + p = find_patch(cpu); + if (!p) + continue; - /* save BSP's matching patch for early load */ - if (!save) - return ret; + if (c->microcode >= p->patch_id) + continue; - memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); - memcpy(amd_ucode_patch, p->data, min_t(u32, p->size, PATCH_MAX_SIZE)); + ret = UCODE_NEW; + + memset(&amd_ucode_patch[nid], 0, PATCH_MAX_SIZE); + memcpy(&amd_ucode_patch[nid], p->data, min_t(u32, p->size, PATCH_MAX_SIZE)); + } return ret; } @@ -901,12 +903,11 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, { char fw_name[36] = "amd-ucode/microcode_amd.bin"; struct cpuinfo_x86 *c = &cpu_data(cpu); - bool bsp = c->cpu_index == boot_cpu_data.cpu_index; enum ucode_state ret = UCODE_NFOUND; const struct firmware *fw; /* reload ucode container only on the boot cpu */ - if (!refresh_fw || !bsp) + if (!refresh_fw) return UCODE_OK; if (c->x86 >= 0x15) @@ -921,7 +922,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, if (!verify_container(fw->data, fw->size, false)) goto fw_release; - ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size); + ret = load_microcode_amd(c->x86, fw->data, fw->size); fw_release: release_firmware(fw); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 36583bc4b88c30d9c5a2cd6593ab6ce3e791f790..24254d141178945f75ba7596280773abbb0ea082 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -315,7 +315,7 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) #endif } -void reload_early_microcode(void) +void reload_early_microcode(unsigned int cpu) { int vendor, family; @@ -329,7 +329,7 @@ void reload_early_microcode(void) break; case X86_VENDOR_AMD: if (family >= 0x10) - reload_ucode_amd(); + reload_ucode_amd(cpu); break; default: break; @@ -707,7 +707,7 @@ void microcode_bsp_resume(void) if (uci->valid && uci->mc) microcode_ops->apply_microcode(cpu); else if (!uci->mc) - reload_early_microcode(); + reload_early_microcode(cpu); } static struct syscore_ops mc_syscore_ops = { diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index b1deacbeb266934b76c7d46b556784d00ae8cb91..a932a07d002538ead40b174ffe9fabc772af1c94 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include @@ -94,15 +93,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs) */ cpu_crash_vmclear_loaded_vmcss(); - /* Disable VMX or SVM if needed. - * - * We need to disable virtualization on all CPUs. - * Having VMX or SVM enabled on any CPU may break rebooting - * after the kdump kernel has finished its task. - */ - cpu_emergency_vmxoff(); - cpu_emergency_svm_disable(); - /* * Disable Intel PT to stop its logging */ @@ -161,12 +151,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs) */ cpu_crash_vmclear_loaded_vmcss(); - /* Booting kdump kernel with VMX or SVM enabled won't work, - * because (among other limitations) we can't disable paging - * with the virt flags. - */ - cpu_emergency_vmxoff(); - cpu_emergency_svm_disable(); + cpu_emergency_disable_virtualization(); /* * Disable Intel PT to stop its logging diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 3d620149200647a8e27ab4bb5c40ae26e248175d..e37e5e82481a59a1601001d0f7bf5f8d87bebedf 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -46,8 +46,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) /* This function only handles jump-optimized kprobe */ if (kp && kprobe_optimized(kp)) { op = container_of(kp, struct optimized_kprobe, kp); - /* If op->list is not empty, op is under optimizing */ - if (list_empty(&op->list)) + /* If op is optimized or under unoptimizing */ + if (list_empty(&op->list) || optprobe_queued_unopt(op)) goto found; } } @@ -346,7 +346,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op) for (i = 1; i < op->optinsn.size; i++) { p = get_kprobe(op->kp.addr + i); - if (p && !kprobe_disabled(p)) + if (p && !kprobe_disarmed(p)) return -EEXIST; } diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index df3514835b3563522fdf93a2e9e36aabb2234b69..4d8c0e2581500f1f269556aa6c5a92974777abc5 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -528,33 +528,29 @@ static inline void kb_wait(void) } } -static void vmxoff_nmi(int cpu, struct pt_regs *regs) -{ - cpu_emergency_vmxoff(); -} +static inline void nmi_shootdown_cpus_on_restart(void); -/* Use NMIs as IPIs to tell all CPUs to disable virtualization */ -static void emergency_vmx_disable_all(void) +static void emergency_reboot_disable_virtualization(void) { /* Just make sure we won't change CPUs while doing this */ local_irq_disable(); /* - * Disable VMX on all CPUs before rebooting, otherwise we risk hanging - * the machine, because the CPU blocks INIT when it's in VMX root. + * Disable virtualization on all CPUs before rebooting to avoid hanging + * the system, as VMX and SVM block INIT when running in the host. * * We can't take any locks and we may be on an inconsistent state, so - * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt. + * use NMIs as IPIs to tell the other CPUs to disable VMX/SVM and halt. * - * Do the NMI shootdown even if VMX if off on _this_ CPU, as that - * doesn't prevent a different CPU from being in VMX root operation. + * Do the NMI shootdown even if virtualization is off on _this_ CPU, as + * other CPUs may have virtualization enabled. */ - if (cpu_has_vmx()) { - /* Safely force _this_ CPU out of VMX root operation. */ - __cpu_emergency_vmxoff(); + if (cpu_has_vmx() || cpu_has_svm(NULL)) { + /* Safely force _this_ CPU out of VMX/SVM operation. */ + cpu_emergency_disable_virtualization(); - /* Halt and exit VMX root operation on the other CPUs. */ - nmi_shootdown_cpus(vmxoff_nmi); + /* Disable VMX/SVM and halt on other CPUs. */ + nmi_shootdown_cpus_on_restart(); } } @@ -590,7 +586,7 @@ static void native_machine_emergency_restart(void) unsigned short mode; if (reboot_emergency) - emergency_vmx_disable_all(); + emergency_reboot_disable_virtualization(); tboot_shutdown(TB_SHUTDOWN_REBOOT); @@ -795,6 +791,17 @@ void machine_crash_shutdown(struct pt_regs *regs) /* This is the CPU performing the emergency shutdown work. */ int crashing_cpu = -1; +/* + * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during + * reboot. VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if + * GIF=0, i.e. if the crash occurred between CLGI and STGI. + */ +void cpu_emergency_disable_virtualization(void) +{ + cpu_emergency_vmxoff(); + cpu_emergency_svm_disable(); +} + #if defined(CONFIG_SMP) static nmi_shootdown_cb shootdown_callback; @@ -817,7 +824,14 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) return NMI_HANDLED; local_irq_disable(); - shootdown_callback(cpu, regs); + if (shootdown_callback) + shootdown_callback(cpu, regs); + + /* + * Prepare the CPU for reboot _after_ invoking the callback so that the + * callback can safely use virtualization instructions, e.g. VMCLEAR. + */ + cpu_emergency_disable_virtualization(); atomic_dec(&waiting_for_crash_ipi); /* Assume hlt works */ @@ -828,18 +842,32 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) return NMI_HANDLED; } -/* - * Halt all other CPUs, calling the specified function on each of them +/** + * nmi_shootdown_cpus - Stop other CPUs via NMI + * @callback: Optional callback to be invoked from the NMI handler + * + * The NMI handler on the remote CPUs invokes @callback, if not + * NULL, first and then disables virtualization to ensure that + * INIT is recognized during reboot. * - * This function can be used to halt all other CPUs on crash - * or emergency reboot time. The function passed as parameter - * will be called inside a NMI handler on all CPUs. + * nmi_shootdown_cpus() can only be invoked once. After the first + * invocation all other CPUs are stuck in crash_nmi_callback() and + * cannot respond to a second NMI. */ void nmi_shootdown_cpus(nmi_shootdown_cb callback) { unsigned long msecs; + local_irq_disable(); + /* + * Avoid certain doom if a shootdown already occurred; re-registering + * the NMI handler will cause list corruption, modifying the callback + * will do who knows what, etc... + */ + if (WARN_ON_ONCE(crash_ipi_issued)) + return; + /* Make a note of crashing cpu. Will be used in NMI callback. */ crashing_cpu = safe_smp_processor_id(); @@ -867,7 +895,17 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) msecs--; } - /* Leave the nmi callback set */ + /* + * Leave the nmi callback set, shootdown is a one-time thing. Clearing + * the callback could result in a NULL pointer dereference if a CPU + * (finally) responds after the timeout expires. + */ +} + +static inline void nmi_shootdown_cpus_on_restart(void) +{ + if (!crash_ipi_issued) + nmi_shootdown_cpus(NULL); } /* @@ -897,6 +935,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) /* No other CPUs to shoot down */ } +static inline void nmi_shootdown_cpus_on_restart(void) { } + void run_crash_ipi_callback(struct pt_regs *regs) { } diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index eff4ce3b10da7185cb770e35b6006516e72830db..95758ae120bafa608ed69e573a68ca0c2bb577dc 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include /* * Some notes on x86 processor bugs affecting SMP operation: @@ -122,7 +122,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) return NMI_HANDLED; - cpu_emergency_vmxoff(); + cpu_emergency_disable_virtualization(); stop_this_cpu(NULL); return NMI_HANDLED; @@ -134,7 +134,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) DEFINE_IDTENTRY_SYSVEC(sysvec_reboot) { ack_APIC_irq(); - cpu_emergency_vmxoff(); + cpu_emergency_disable_virtualization(); stop_this_cpu(NULL); } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 260727eaa6b9629d1cd7cf3df2286c28e43043ff..21189804524a77f591cdfd6e0f6912e373da6df7 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2115,10 +2115,14 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) break; case APIC_SELF_IPI: - if (apic_x2apic_mode(apic)) - kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0); - else + /* + * Self-IPI exists only when x2APIC is enabled. Bits 7:0 hold + * the vector, everything else is reserved. + */ + if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK)) ret = 1; + else + kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0); break; default: ret = 1; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index cc49a921339f778aaf948b52f92dd797677203d8..11078e1663683ad16a69d2e08fb9303867264d20 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -80,11 +80,9 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) } /* - * Look up and return a brd's page for a given sector. - * If one does not exist, allocate an empty page, and insert that. Then - * return it. + * Insert a new page for a given sector, if one does not already exist. */ -static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) +static int brd_insert_page(struct brd_device *brd, sector_t sector) { pgoff_t idx; struct page *page; @@ -92,7 +90,7 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) page = brd_lookup_page(brd, sector); if (page) - return page; + return 0; /* * Must use NOIO because we don't want to recurse back into the @@ -101,11 +99,11 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM; page = alloc_page(gfp_flags); if (!page) - return NULL; + return -ENOMEM; if (radix_tree_preload(GFP_NOIO)) { __free_page(page); - return NULL; + return -ENOMEM; } spin_lock(&brd->brd_lock); @@ -120,8 +118,7 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) spin_unlock(&brd->brd_lock); radix_tree_preload_end(); - - return page; + return 0; } /* @@ -174,16 +171,17 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) { unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; size_t copy; + int ret; copy = min_t(size_t, n, PAGE_SIZE - offset); - if (!brd_insert_page(brd, sector)) - return -ENOSPC; + ret = brd_insert_page(brd, sector); + if (ret) + return ret; if (copy < n) { sector += copy >> SECTOR_SHIFT; - if (!brd_insert_page(brd, sector)) - return -ENOSPC; + ret = brd_insert_page(brd, sector); } - return 0; + return ret; } /* diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 340b1df365f72372ffa1827749eda9c155796313..932d4bb8e403534f2b061071101e18923d8ee32b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -5369,8 +5369,7 @@ static void rbd_dev_release(struct device *dev) module_put(THIS_MODULE); } -static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, - struct rbd_spec *spec) +static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec) { struct rbd_device *rbd_dev; @@ -5415,9 +5414,6 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc, rbd_dev->dev.parent = &rbd_root_dev; device_initialize(&rbd_dev->dev); - rbd_dev->rbd_client = rbdc; - rbd_dev->spec = spec; - return rbd_dev; } @@ -5430,12 +5426,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, { struct rbd_device *rbd_dev; - rbd_dev = __rbd_dev_create(rbdc, spec); + rbd_dev = __rbd_dev_create(spec); if (!rbd_dev) return NULL; - rbd_dev->opts = opts; - /* get an id and fill in device name */ rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0, minor_to_rbd_dev_id(1 << MINORBITS), @@ -5452,6 +5446,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, /* we have a ref from do_rbd_add() */ __module_get(THIS_MODULE); + rbd_dev->rbd_client = rbdc; + rbd_dev->spec = spec; + rbd_dev->opts = opts; + dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id); return rbd_dev; @@ -6812,7 +6810,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) goto out_err; } - parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec); + parent = __rbd_dev_create(rbd_dev->parent_spec); if (!parent) { ret = -ENOMEM; goto out_err; @@ -6822,8 +6820,8 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) * Images related by parent/child relationships always share * rbd_client and spec/parent_spec, so bump their refcounts. */ - __rbd_get_client(rbd_dev->rbd_client); - rbd_spec_get(rbd_dev->parent_spec); + parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client); + parent->spec = rbd_spec_get(rbd_dev->parent_spec); __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 3d905fda9b29a4576564766b3ac06fb8eb6d50c2..2695ece47eb0e242d5999a1832b2f1b780816ad2 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -393,6 +393,10 @@ static const struct usb_device_id blacklist_table[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01), .driver_info = BTUSB_IGNORE }, + /* Realtek 8821CE Bluetooth devices */ + { USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + /* Realtek 8822CE Bluetooth devices */ { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 4771397495130533a2a15c90208809d6a4905da2..0f2bac24e564dccc917970d0dede2bd6df962bea 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -92,7 +92,7 @@ #define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250) enum ssif_intf_state { - SSIF_NORMAL, + SSIF_IDLE, SSIF_GETTING_FLAGS, SSIF_GETTING_EVENTS, SSIF_CLEARING_FLAGS, @@ -100,8 +100,8 @@ enum ssif_intf_state { /* FIXME - add watchdog stuff. */ }; -#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \ - && (ssif)->curr_msg == NULL) +#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \ + && (ssif)->curr_msg == NULL) /* * Indexes into stats[] in ssif_info below. @@ -348,9 +348,9 @@ static void return_hosed_msg(struct ssif_info *ssif_info, /* * Must be called with the message lock held. This will release the - * message lock. Note that the caller will check SSIF_IDLE and start a - * new operation, so there is no need to check for new messages to - * start in here. + * message lock. Note that the caller will check IS_SSIF_IDLE and + * start a new operation, so there is no need to check for new + * messages to start in here. */ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) { @@ -367,7 +367,7 @@ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags) if (start_send(ssif_info, msg, 3) != 0) { /* Error, just go to normal state. */ - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; } } @@ -382,7 +382,7 @@ static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags) mb[0] = (IPMI_NETFN_APP_REQUEST << 2); mb[1] = IPMI_GET_MSG_FLAGS_CMD; if (start_send(ssif_info, mb, 2) != 0) - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; } static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, @@ -393,7 +393,7 @@ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags, flags = ipmi_ssif_lock_cond(ssif_info, &oflags); ssif_info->curr_msg = NULL; - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); ipmi_free_smi_msg(msg); } @@ -407,7 +407,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags) msg = ipmi_alloc_smi_msg(); if (!msg) { - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -430,7 +430,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, msg = ipmi_alloc_smi_msg(); if (!msg) { - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -448,9 +448,9 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, /* * Must be called with the message lock held. This will release the - * message lock. Note that the caller will check SSIF_IDLE and start a - * new operation, so there is no need to check for new messages to - * start in here. + * message lock. Note that the caller will check IS_SSIF_IDLE and + * start a new operation, so there is no need to check for new + * messages to start in here. */ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) { @@ -466,7 +466,7 @@ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags) /* Events available. */ start_event_fetch(ssif_info, flags); else { - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); } } @@ -579,7 +579,7 @@ static void watch_timeout(struct timer_list *t) if (ssif_info->watch_timeout) { mod_timer(&ssif_info->watch_timer, jiffies + ssif_info->watch_timeout); - if (SSIF_IDLE(ssif_info)) { + if (IS_SSIF_IDLE(ssif_info)) { start_flag_fetch(ssif_info, flags); /* Releases lock */ return; } @@ -782,7 +782,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } switch (ssif_info->ssif_state) { - case SSIF_NORMAL: + case SSIF_IDLE: ipmi_ssif_unlock_cond(ssif_info, flags); if (!msg) break; @@ -800,7 +800,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, * Error fetching flags, or invalid length, * just give up for now. */ - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); dev_warn(&ssif_info->client->dev, "Error getting flags: %d %d, %x\n", @@ -835,7 +835,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, "Invalid response clearing flags: %x %x\n", data[0], data[1]); } - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; ipmi_ssif_unlock_cond(ssif_info, flags); break; @@ -913,7 +913,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } flags = ipmi_ssif_lock_cond(ssif_info, &oflags); - if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) { + if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) { if (ssif_info->req_events) start_event_fetch(ssif_info, flags); else if (ssif_info->req_flags) @@ -1087,7 +1087,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags) unsigned long oflags; restart: - if (!SSIF_IDLE(ssif_info)) { + if (!IS_SSIF_IDLE(ssif_info)) { ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -1310,7 +1310,7 @@ static void shutdown_ssif(void *send_info) dev_set_drvdata(&ssif_info->client->dev, NULL); /* make sure the driver is not looking for flags any more. */ - while (ssif_info->ssif_state != SSIF_NORMAL) + while (ssif_info->ssif_state != SSIF_IDLE) schedule_timeout(1); ssif_info->stopping = true; @@ -1882,7 +1882,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) } spin_lock_init(&ssif_info->lock); - ssif_info->ssif_state = SSIF_NORMAL; + ssif_info->ssif_state = SSIF_IDLE; timer_setup(&ssif_info->retry_timer, retry_timeout, 0); timer_setup(&ssif_info->watch_timer, watch_timeout, 0); diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index c1d379bd7af330741b1065603df4ba1d1d15d946..a02777c93c07bcbcecc3cb5a95ee8227efe52528 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -398,8 +398,8 @@ static void unregister_dev_dax(void *dev) dev_dbg(dev, "%s\n", __func__); kill_dev_dax(dev_dax); - free_dev_dax_ranges(dev_dax); device_del(dev); + free_dev_dax_ranges(dev_dax); put_device(dev); } diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c index b4368c5b6a0cc9df10cff1a2b6145e0a72bfdced..27d669f8b5f3acd8a2ef40ed7ca6ddfd02980849 100644 --- a/drivers/dax/kmem.c +++ b/drivers/dax/kmem.c @@ -114,7 +114,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax) if (rc) { dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n", i, range.start, range.end); - release_resource(res); + remove_resource(res); kfree(res); data->res[i] = NULL; if (mapped) @@ -159,7 +159,7 @@ static int dev_dax_kmem_remove(struct dev_dax *dev_dax) rc = remove_memory(dev_dax->target_node, range.start, range_len(&range)); if (rc == 0) { - release_resource(data->res[i]); + remove_resource(data->res[i]); kfree(data->res[i]); data->res[i] = NULL; success++; diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c index 916f26adc5955b8c021e607001558ec4028146b1..922c079d13c8a6e6b40c31e5881fd0cd2771583f 100644 --- a/drivers/firmware/google/framebuffer-coreboot.c +++ b/drivers/firmware/google/framebuffer-coreboot.c @@ -43,9 +43,7 @@ static int framebuffer_probe(struct coreboot_device *dev) fb->green_mask_pos == formats[i].green.offset && fb->green_mask_size == formats[i].green.length && fb->blue_mask_pos == formats[i].blue.offset && - fb->blue_mask_size == formats[i].blue.length && - fb->reserved_mask_pos == formats[i].transp.offset && - fb->reserved_mask_size == formats[i].transp.length) + fb->blue_mask_size == formats[i].blue.length) pdata.format = formats[i].name; } if (!pdata.format) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index fbe15f4b75fd52c87fdcd9359c992b3a0c8739f6..dbdf0e210522c3e3ab330852a45b302550d93157 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2051,12 +2051,14 @@ static int dm_resume(void *handle) drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->dc_link) + continue; + /* * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->dc_link && - aconnector->dc_link->type == dc_connection_mst_branch) + if (aconnector->dc_link->type == dc_connection_mst_branch) continue; mutex_lock(&aconnector->hpd_lock); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 99887bcfada0430b3941521ee85e5d40c39cc558..7e0a55aa2b180cd685fbfe12737bb7be86782687 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -616,6 +616,7 @@ static bool dc_construct_ctx(struct dc *dc, dc_ctx->perf_trace = dc_perf_trace_create(); if (!dc_ctx->perf_trace) { + kfree(dc_ctx); ASSERT_CRITICAL(false); return false; } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index ce739ba45c551a7c83990afd58be12d592cbe13b..8768073794fbf618d2c81116763ed3d1a25e4c6c 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -278,6 +278,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Lenovo IdeaPad Duet 3 10IGL5 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* Lenovo Yoga Book X90F / X91F / X91L */ .matches = { /* Non exact match to match all versions */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 51e8318cc8ff478519d22136082cc119fde314f0..5a76aa1389173de141b9d0ba2ba9ea4666468377 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1913,6 +1913,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) /* setup workqueue */ msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); + if (!msm_host->workqueue) + return -ENOMEM; + INIT_WORK(&msm_host->err_work, dsi_err_worker); INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index eeccf40bae41667adb5d5f7e2156200f21840a62..1b1ddc5fe6dcc0614566d581716ad799dd01170e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1444,22 +1444,26 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) { struct dsi_data *dsi = s->private; unsigned long flags; - struct dsi_irq_stats stats; + struct dsi_irq_stats *stats; + + stats = kmalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) + return -ENOMEM; spin_lock_irqsave(&dsi->irq_stats_lock, flags); - stats = dsi->irq_stats; + *stats = dsi->irq_stats; memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats)); dsi->irq_stats.last_reset = jiffies; spin_unlock_irqrestore(&dsi->irq_stats_lock, flags); seq_printf(s, "period %u ms\n", - jiffies_to_msecs(jiffies - stats.last_reset)); + jiffies_to_msecs(jiffies - stats->last_reset)); - seq_printf(s, "irqs %d\n", stats.irq_count); + seq_printf(s, "irqs %d\n", stats->irq_count); #define PIS(x) \ - seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]); + seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1]); seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1); PIS(VC0); @@ -1483,10 +1487,10 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) #define PIS(x) \ seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \ - stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \ - stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \ - stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \ - stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]); + stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \ + stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \ + stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \ + stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]); seq_printf(s, "-- VC interrupts --\n"); PIS(CS); @@ -1502,7 +1506,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) #define PIS(x) \ seq_printf(s, "%-20s %10d\n", #x, \ - stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]); + stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]); seq_printf(s, "-- CIO interrupts --\n"); PIS(ERRSYNCESC1); @@ -1527,6 +1531,8 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) PIS(ULPSACTIVENOT_ALL1); #undef PIS + kfree(stats); + return 0; } #endif diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 8287410f471fb88ecf5f458b3b0baad4fd4a0c62..131f425c363a571d5b004b9599292da4f0892d3e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1022,6 +1022,7 @@ void radeon_atombios_fini(struct radeon_device *rdev) { if (rdev->mode_info.atom_context) { kfree(rdev->mode_info.atom_context->scratch); + kfree(rdev->mode_info.atom_context->iio); } kfree(rdev->mode_info.atom_context); rdev->mode_info.atom_context = NULL; diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index 403af68fa4400d46acdc5ae61837ddaa2d5f84b1..7ea26e5fbcb28e008dc4473a82768a861c861503 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -43,6 +43,7 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, size_t num) { struct spi_device *spi = mipi->spi; + unsigned int bpw = 8; void *data = par; u32 speed_hz; int i, ret; @@ -56,8 +57,6 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, * The displays are Raspberry Pi HATs and connected to the 8-bit only * SPI controller, so 16-bit command and parameters need byte swapping * before being transferred as 8-bit on the big endian SPI bus. - * Pixel data bytes have already been swapped before this function is - * called. */ buf[0] = cpu_to_be16(*cmd); gpiod_set_value_cansleep(mipi->dc, 0); @@ -71,12 +70,18 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par, for (i = 0; i < num; i++) buf[i] = cpu_to_be16(par[i]); num *= 2; - speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num); data = buf; } + /* + * Check whether pixel data bytes needs to be swapped or not + */ + if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes) + bpw = 16; + gpiod_set_value_cansleep(mipi->dc, 1); - ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, data, num); + speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num); + ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, data, num); free: kfree(buf); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index f4e2e6937758952ed499e6adbaf89ed147221945..1f60a381ae63eb258e8d710e8220de4ddf6cd0a7 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -933,6 +933,7 @@ static const char *keys[KEY_MAX + 1] = { [KEY_VOICECOMMAND] = "VoiceCommand", [KEY_EMOJI_PICKER] = "EmojiPicker", [KEY_DICTATE] = "Dictate", + [KEY_MICMUTE] = "MicrophoneMute", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", [KEY_BRIGHTNESS_MAX] = "BrightnessMax", [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 75a4d8d6bb0fd34217e6731c109ebbb65731a0cc..3399953256d85e62ab0b5a4522429e13b0a9074a 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -675,6 +675,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel break; } + if ((usage->hid & 0xf0) == 0xa0) { /* SystemControl */ + switch (usage->hid & 0xf) { + case 0x9: map_key_clear(KEY_MICMUTE); break; + default: goto ignore; + } + break; + } + if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */ switch (usage->hid & 0xf) { case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 66b1051620390b6ccb84ecf21394e1ad1da5e742..f5ea8e1d844528692e76beb3e0fdda2be934baa1 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -3763,6 +3763,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) bool connected; unsigned int connect_mask = HID_CONNECT_DEFAULT; struct hidpp_ff_private_data data; + bool will_restart = false; /* report_fixup needs drvdata to be set before we call hid_parse */ hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL); @@ -3818,6 +3819,10 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) return ret; } + if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT || + hidpp->quirks & HIDPP_QUIRK_UNIFYING) + will_restart = true; + INIT_WORK(&hidpp->work, delayed_work_cb); mutex_init(&hidpp->send_mutex); init_waitqueue_head(&hidpp->wait); @@ -3832,7 +3837,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) * Plain USB connections need to actually call start and open * on the transport driver to allow incoming data. */ - ret = hid_hw_start(hdev, 0); + ret = hid_hw_start(hdev, will_restart ? 0 : connect_mask); if (ret) { hid_err(hdev, "hw start failed\n"); goto hid_hw_start_fail; @@ -3869,6 +3874,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) hidpp->wireless_feature_index = 0; else if (ret) goto hid_hw_init_fail; + ret = 0; } if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) { @@ -3883,19 +3889,21 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) hidpp_connect_event(hidpp); - /* Reset the HID node state */ - hid_device_io_stop(hdev); - hid_hw_close(hdev); - hid_hw_stop(hdev); + if (will_restart) { + /* Reset the HID node state */ + hid_device_io_stop(hdev); + hid_hw_close(hdev); + hid_hw_stop(hdev); - if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) - connect_mask &= ~HID_CONNECT_HIDINPUT; + if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) + connect_mask &= ~HID_CONNECT_HIDINPUT; - /* Now export the actual inputs and hidraw nodes to the world */ - ret = hid_hw_start(hdev, connect_mask); - if (ret) { - hid_err(hdev, "%s:hid_hw_start returned error\n", __func__); - goto hid_hw_start_fail; + /* Now export the actual inputs and hidraw nodes to the world */ + ret = hid_hw_start(hdev, connect_mask); + if (ret) { + hid_err(hdev, "%s:hid_hw_start returned error\n", __func__); + goto hid_hw_start_fail; + } } if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) { diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 42b84ebff057928c6602d2ba64b3337115488a30..eaae5de2ab6167cde4e49b3be0cede39c5b75205 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -550,66 +550,49 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx) ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO); } -static int coretemp_probe(struct platform_device *pdev) +static int coretemp_device_add(int zoneid) { - struct device *dev = &pdev->dev; + struct platform_device *pdev; struct platform_data *pdata; + int err; /* Initialize the per-zone data structures */ - pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL); + pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; - pdata->pkg_id = pdev->id; + pdata->pkg_id = zoneid; ida_init(&pdata->ida); - platform_set_drvdata(pdev, pdata); - pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME, - pdata, NULL); - return PTR_ERR_OR_ZERO(pdata->hwmon_dev); -} - -static int coretemp_remove(struct platform_device *pdev) -{ - struct platform_data *pdata = platform_get_drvdata(pdev); - int i; + pdev = platform_device_alloc(DRVNAME, zoneid); + if (!pdev) { + err = -ENOMEM; + goto err_free_pdata; + } - for (i = MAX_CORE_DATA - 1; i >= 0; --i) - if (pdata->core_data[i]) - coretemp_remove_core(pdata, i); + err = platform_device_add(pdev); + if (err) + goto err_put_dev; - ida_destroy(&pdata->ida); + platform_set_drvdata(pdev, pdata); + zone_devices[zoneid] = pdev; return 0; -} -static struct platform_driver coretemp_driver = { - .driver = { - .name = DRVNAME, - }, - .probe = coretemp_probe, - .remove = coretemp_remove, -}; +err_put_dev: + platform_device_put(pdev); +err_free_pdata: + kfree(pdata); + return err; +} -static struct platform_device *coretemp_device_add(unsigned int cpu) +static void coretemp_device_remove(int zoneid) { - int err, zoneid = topology_logical_die_id(cpu); - struct platform_device *pdev; - - if (zoneid < 0) - return ERR_PTR(-ENOMEM); - - pdev = platform_device_alloc(DRVNAME, zoneid); - if (!pdev) - return ERR_PTR(-ENOMEM); - - err = platform_device_add(pdev); - if (err) { - platform_device_put(pdev); - return ERR_PTR(err); - } + struct platform_device *pdev = zone_devices[zoneid]; + struct platform_data *pdata = platform_get_drvdata(pdev); - zone_devices[zoneid] = pdev; - return pdev; + ida_destroy(&pdata->ida); + kfree(pdata); + platform_device_unregister(pdev); } static int coretemp_cpu_online(unsigned int cpu) @@ -633,7 +616,10 @@ static int coretemp_cpu_online(unsigned int cpu) if (!cpu_has(c, X86_FEATURE_DTHERM)) return -ENODEV; - if (!pdev) { + pdata = platform_get_drvdata(pdev); + if (!pdata->hwmon_dev) { + struct device *hwmon; + /* Check the microcode version of the CPU */ if (chk_ucode_version(cpu)) return -EINVAL; @@ -644,9 +630,11 @@ static int coretemp_cpu_online(unsigned int cpu) * online. So, initialize per-pkg data structures and * then bring this core online. */ - pdev = coretemp_device_add(cpu); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + hwmon = hwmon_device_register_with_groups(&pdev->dev, DRVNAME, + pdata, NULL); + if (IS_ERR(hwmon)) + return PTR_ERR(hwmon); + pdata->hwmon_dev = hwmon; /* * Check whether pkgtemp support is available. @@ -656,7 +644,6 @@ static int coretemp_cpu_online(unsigned int cpu) coretemp_add_core(pdev, cpu, 1); } - pdata = platform_get_drvdata(pdev); /* * Check whether a thread sibling is already online. If not add the * interface for this CPU core. @@ -675,18 +662,14 @@ static int coretemp_cpu_offline(unsigned int cpu) struct temp_data *tdata; int i, indx = -1, target; - /* - * Don't execute this on suspend as the device remove locks - * up the machine. - */ + /* No need to tear down any interfaces for suspend */ if (cpuhp_tasks_frozen) return 0; /* If the physical CPU device does not exist, just return */ - if (!pdev) - return 0; - pd = platform_get_drvdata(pdev); + if (!pd->hwmon_dev) + return 0; for (i = 0; i < NUM_REAL_CORES; i++) { if (pd->cpu_map[i] == topology_core_id(cpu)) { @@ -718,13 +701,14 @@ static int coretemp_cpu_offline(unsigned int cpu) } /* - * If all cores in this pkg are offline, remove the device. This - * will invoke the platform driver remove function, which cleans up - * the rest. + * If all cores in this pkg are offline, remove the interface. */ + tdata = pd->core_data[PKG_SYSFS_ATTR_NO]; if (cpumask_empty(&pd->cpumask)) { - zone_devices[topology_logical_die_id(cpu)] = NULL; - platform_device_unregister(pdev); + if (tdata) + coretemp_remove_core(pd, PKG_SYSFS_ATTR_NO); + hwmon_device_unregister(pd->hwmon_dev); + pd->hwmon_dev = NULL; return 0; } @@ -732,7 +716,6 @@ static int coretemp_cpu_offline(unsigned int cpu) * Check whether this core is the target for the package * interface. We need to assign it to some other cpu. */ - tdata = pd->core_data[PKG_SYSFS_ATTR_NO]; if (tdata && tdata->cpu == cpu) { target = cpumask_first(&pd->cpumask); mutex_lock(&tdata->update_lock); @@ -751,7 +734,7 @@ static enum cpuhp_state coretemp_hp_online; static int __init coretemp_init(void) { - int err; + int i, err; /* * CPUID.06H.EAX[0] indicates whether the CPU has thermal @@ -767,20 +750,22 @@ static int __init coretemp_init(void) if (!zone_devices) return -ENOMEM; - err = platform_driver_register(&coretemp_driver); - if (err) - goto outzone; + for (i = 0; i < max_zones; i++) { + err = coretemp_device_add(i); + if (err) + goto outzone; + } err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online", coretemp_cpu_online, coretemp_cpu_offline); if (err < 0) - goto outdrv; + goto outzone; coretemp_hp_online = err; return 0; -outdrv: - platform_driver_unregister(&coretemp_driver); outzone: + while (i--) + coretemp_device_remove(i); kfree(zone_devices); return err; } @@ -788,8 +773,11 @@ module_init(coretemp_init) static void __exit coretemp_exit(void) { + int i; + cpuhp_remove_state(coretemp_hp_online); - platform_driver_unregister(&coretemp_driver); + for (i = 0; i < max_zones; i++) + coretemp_device_remove(i); kfree(zone_devices); } module_exit(coretemp_exit) diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9b2aec30980108ea5cad4f56066a7ed4d99b45e0..f98ad4366301b5681c85057e1c3edc31efd6f22f 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -1883,6 +1883,7 @@ static void process_deferred_bios(struct work_struct *ws) else commit_needed = process_bio(cache, bio) || commit_needed; + cond_resched(); } if (commit_needed) @@ -1905,6 +1906,7 @@ static void requeue_deferred_bios(struct cache *cache) while ((bio = bio_list_pop(&bios))) { bio->bi_status = BLK_STS_DM_REQUEUE; bio_endio(bio); + cond_resched(); } } @@ -1945,6 +1947,8 @@ static void check_migrations(struct work_struct *ws) r = mg_start(cache, op, NULL); if (r) break; + + cond_resched(); } } diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index a2cc9e45cbba1639ede3cf51d324a4ec4685d2f1..36a4ef51ecaa8103597d689ce68fad2f598041f0 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -301,8 +301,11 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) */ bio_for_each_segment(bvec, bio, iter) { if (bio_iter_len(bio, iter) > corrupt_bio_byte) { - char *segment = (page_address(bio_iter_page(bio, iter)) - + bio_iter_offset(bio, iter)); + char *segment; + struct page *page = bio_iter_page(bio, iter); + if (unlikely(page == ZERO_PAGE(0))) + break; + segment = (page_address(page) + bio_iter_offset(bio, iter)); segment[corrupt_bio_byte] = fc->corrupt_bio_value; DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " "(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n", @@ -359,9 +362,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) /* * Corrupt matching writes. */ - if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) { - if (all_corrupt_bio_flags_match(bio, fc)) - corrupt_bio_data(bio, fc); + if (fc->corrupt_bio_byte) { + if (fc->corrupt_bio_rw == WRITE) { + if (all_corrupt_bio_flags_match(bio, fc)) + corrupt_bio_data(bio, fc); + } goto map_bio; } @@ -387,13 +392,14 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, return DM_ENDIO_DONE; if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { - if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && - all_corrupt_bio_flags_match(bio, fc)) { - /* - * Corrupt successful matching READs while in down state. - */ - corrupt_bio_data(bio, fc); - + if (fc->corrupt_bio_byte) { + if ((fc->corrupt_bio_rw == READ) && + all_corrupt_bio_flags_match(bio, fc)) { + /* + * Corrupt successful matching READs while in down state. + */ + corrupt_bio_data(bio, fc); + } } else if (!test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags)) { /* diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index f3c519e18a12f990b9fc0570b348fe6c5183d852..c890bb3e5185208e61d13a0b52826fa417f9e517 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2217,6 +2217,7 @@ static void process_thin_deferred_bios(struct thin_c *tc) throttle_work_update(&pool->throttle); dm_pool_issue_prefetches(pool->pmd); } + cond_resched(); } blk_finish_plug(&plug); } @@ -2299,6 +2300,7 @@ static void process_thin_deferred_cells(struct thin_c *tc) else pool->process_cell(tc, cell); } + cond_resched(); } while (!list_empty(&cells)); } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c52c15056229d94bdccb2a3f76730c3651ca50cd..c3819388b9a457d156b8bb77e98b82d1e0f07306 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2383,6 +2383,7 @@ static void dm_wq_work(struct work_struct *work) break; submit_bio_noacct(bio); + cond_resched(); } } diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 2fe4a0bd028449602ba9daee1a5c3cd238c9b2fe..d6838c8ebd7e859f0c818d79a0a1a1e633032f40 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -1831,6 +1831,9 @@ static void cio2_pci_remove(struct pci_dev *pci_dev) v4l2_device_unregister(&cio2->v4l2_dev); media_device_cleanup(&cio2->media_dev); mutex_destroy(&cio2->lock); + + pm_runtime_forbid(&pci_dev->dev); + pm_runtime_get_noresume(&pci_dev->dev); } static int __maybe_unused cio2_runtime_suspend(struct device *dev) diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 2c256d455c9f3d55d5d86f30da7e8fe068858bdb..342215231932144ccb4bee5459c0adfc4429c84c 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -2424,6 +2424,15 @@ void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, erase->size_mask = (1 << erase->size_shift) - 1; } +/** + * spi_nor_mask_erase_type() - mask out a SPI NOR erase type + * @erase: pointer to a structure that describes a SPI NOR erase type + */ +void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase) +{ + erase->size = 0; +} + /** * spi_nor_init_uniform_erase_map() - Initialize uniform erase map * @map: the erase map of the SPI NOR diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index 6f62ee861231ae3ff66e860f2e4bd2ced9f93a0c..788775bb67958d8529ffa7782bc477a49b00fb9b 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -424,6 +424,7 @@ void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, u8 opcode); +void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase); struct spi_nor_erase_region * spi_nor_region_next(struct spi_nor_erase_region *region); void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index 08de2a2b44520f4b489917816eb2bd068668dbab..9dc0528ea88426ecaed0e912b43f0b4e341843c6 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -852,7 +852,7 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, */ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) if (!(regions_erase_type & BIT(erase[i].idx))) - spi_nor_set_erase_type(&erase[i], 0, 0xFF); + spi_nor_mask_erase_type(&erase[i]); return 0; } @@ -1063,7 +1063,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor, erase_type[i].opcode = (dwords[1] >> erase_type[i].idx * 8) & 0xFF; else - spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF); + spi_nor_mask_erase_type(&erase_type[i]); } /* diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index 199e7e031d7d90d8dc12be691b730d32424a6d39..3b3cb3a6e2e89acdf77b9e0697c0fd61d0d7b752 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c @@ -1671,6 +1671,11 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv) val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1); val8 &= ~BIT(0); rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8); + + /* + * Fix transmission failure of rtl8192e. + */ + rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00); } struct rtl8xxxu_fileops rtl8192eu_fops = { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 376782b7aba81aa58f733667f59482722af220d7..deef1c09de31976fe6a9693b05cbdc7723dffb74 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -5908,7 +5908,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed) { struct rtl8xxxu_priv *priv = hw->priv; struct device *dev = &priv->udev->dev; - u16 val16; int ret = 0, channel; bool ht40; @@ -5918,14 +5917,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed) __func__, hw->conf.chandef.chan->hw_value, changed, hw->conf.chandef.width); - if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { - val16 = ((hw->conf.long_frame_max_tx_count << - RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) | - ((hw->conf.short_frame_max_tx_count << - RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK); - rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16); - } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { switch (hw->conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index 578b387100d9bb5343ffe162f0e89a197756b3ed..d2e2b101978f8fe9656234b2a9101ebf976393e6 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -1081,8 +1081,8 @@ static int atmel_pinctrl_probe(struct platform_device *pdev) pin_desc[i].number = i; /* Pin naming convention: P(bank_name)(bank_pin_number). */ - pin_desc[i].name = kasprintf(GFP_KERNEL, "P%c%d", - bank + 'A', line); + pin_desc[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "P%c%d", + bank + 'A', line); group->name = group_names[i] = pin_desc[i].name; group->pin = pin_desc[i].number; diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 9015486e38c18fd12a3ba54b231bdbc7d5733390..52ecd47c18e2d6be482c7114a6decbdb1f8d91ef 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1891,7 +1891,7 @@ static int at91_gpio_probe(struct platform_device *pdev) } for (i = 0; i < chip->ngpio; i++) - names[i] = kasprintf(GFP_KERNEL, "pio%c%d", alias_idx + 'A', i); + names[i] = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pio%c%d", alias_idx + 'A', i); chip->names = (const char *const *)names; diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c index 7b8ec8c0bd1511a283ef4619867f98be59260c3c..660e179a82a2c18237c4a3d6f71224499c265fc0 100644 --- a/drivers/regulator/max77802-regulator.c +++ b/drivers/regulator/max77802-regulator.c @@ -95,9 +95,11 @@ static int max77802_set_suspend_disable(struct regulator_dev *rdev) { unsigned int val = MAX77802_OFF_PWRREQ; struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); + unsigned int id = rdev_get_id(rdev); int shift = max77802_get_opmode_shift(id); + if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode))) + return -EINVAL; max77802->opmode[id] = val; return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, rdev->desc->enable_mask, val << shift); @@ -111,7 +113,7 @@ static int max77802_set_suspend_disable(struct regulator_dev *rdev) static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); + unsigned int id = rdev_get_id(rdev); unsigned int val; int shift = max77802_get_opmode_shift(id); @@ -128,6 +130,9 @@ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode) return -EINVAL; } + if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode))) + return -EINVAL; + max77802->opmode[id] = val; return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, rdev->desc->enable_mask, val << shift); @@ -136,8 +141,10 @@ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode) static unsigned max77802_get_mode(struct regulator_dev *rdev) { struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); + unsigned int id = rdev_get_id(rdev); + if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode))) + return -EINVAL; return max77802_map_mode(max77802->opmode[id]); } @@ -161,10 +168,13 @@ static int max77802_set_suspend_mode(struct regulator_dev *rdev, unsigned int mode) { struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); + unsigned int id = rdev_get_id(rdev); unsigned int val; int shift = max77802_get_opmode_shift(id); + if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode))) + return -EINVAL; + /* * If the regulator has been disabled for suspend * then is invalid to try setting a suspend mode. @@ -210,9 +220,11 @@ static int max77802_set_suspend_mode(struct regulator_dev *rdev, static int max77802_enable(struct regulator_dev *rdev) { struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev); - int id = rdev_get_id(rdev); + unsigned int id = rdev_get_id(rdev); int shift = max77802_get_opmode_shift(id); + if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode))) + return -EINVAL; if (max77802->opmode[id] == MAX77802_OFF_PWRREQ) max77802->opmode[id] = MAX77802_OPMODE_NORMAL; @@ -541,7 +553,7 @@ static int max77802_pmic_probe(struct platform_device *pdev) for (i = 0; i < MAX77802_REG_MAX; i++) { struct regulator_dev *rdev; - int id = regulators[i].id; + unsigned int id = regulators[i].id; int shift = max77802_get_opmode_shift(id); int ret; @@ -559,10 +571,12 @@ static int max77802_pmic_probe(struct platform_device *pdev) * the hardware reports OFF as the regulator operating mode. * Default to operating mode NORMAL in that case. */ - if (val == MAX77802_STATUS_OFF) - max77802->opmode[id] = MAX77802_OPMODE_NORMAL; - else - max77802->opmode[id] = val; + if (id < ARRAY_SIZE(max77802->opmode)) { + if (val == MAX77802_STATUS_OFF) + max77802->opmode[id] = MAX77802_OPMODE_NORMAL; + else + max77802->opmode[id] = val; + } rdev = devm_regulator_register(&pdev->dev, ®ulators[i], &config); diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index 35269f998210534ecc77c9432eb035cc8d174bc7..754c6fcc6e642f5494c5ee600e66ada567a703ca 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -923,10 +923,14 @@ static int s5m8767_pmic_probe(struct platform_device *pdev) for (i = 0; i < pdata->num_regulators; i++) { const struct sec_voltage_desc *desc; - int id = pdata->regulators[i].id; + unsigned int id = pdata->regulators[i].id; int enable_reg, enable_val; struct regulator_dev *rdev; + BUILD_BUG_ON(ARRAY_SIZE(regulators) != ARRAY_SIZE(reg_voltage_map)); + if (WARN_ON_ONCE(id >= ARRAY_SIZE(regulators))) + continue; + desc = reg_voltage_map[id]; if (desc) { regulators[id].n_voltages = diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c index 6dc955ecab80f98c319bf67b500bdf32399f2209..968128b78e59c2b00e42d5680dccb7320e80fddf 100644 --- a/drivers/remoteproc/mtk_scp_ipi.c +++ b/drivers/remoteproc/mtk_scp_ipi.c @@ -164,21 +164,21 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len, WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf)) return -EINVAL; - mutex_lock(&scp->send_lock); - ret = clk_prepare_enable(scp->clk); if (ret) { dev_err(scp->dev, "failed to enable clock\n"); - goto unlock_mutex; + return ret; } + mutex_lock(&scp->send_lock); + /* Wait until SCP receives the last command */ timeout = jiffies + msecs_to_jiffies(2000); do { if (time_after(jiffies, timeout)) { dev_err(scp->dev, "%s: IPI timeout!\n", __func__); ret = -ETIMEDOUT; - goto clock_disable; + goto unlock_mutex; } } while (readl(scp->reg_base + scp->data->host_to_scp_reg)); @@ -205,10 +205,9 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len, ret = 0; } -clock_disable: - clk_disable_unprepare(scp->clk); unlock_mutex: mutex_unlock(&scp->send_lock); + clk_disable_unprepare(scp->clk); return ret; } diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c index b45ee2cb2c0449cbb9e5a12524864bee6491665a..3417eef0aca3fd383369570ca1bc9f444f1ba69e 100644 --- a/drivers/rtc/rtc-pm8xxx.c +++ b/drivers/rtc/rtc-pm8xxx.c @@ -219,7 +219,6 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { int rc, i; u8 value[NUM_8_BIT_RTC_REGS]; - unsigned int ctrl_reg; unsigned long secs, irq_flags; struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; @@ -231,6 +230,11 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) secs >>= 8; } + rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl, + regs->alarm_en, 0); + if (rc) + return rc; + spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value, @@ -240,19 +244,11 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) goto rtc_rw_fail; } - rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg); - if (rc) - goto rtc_rw_fail; - - if (alarm->enabled) - ctrl_reg |= regs->alarm_en; - else - ctrl_reg &= ~regs->alarm_en; - - rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg); - if (rc) { - dev_err(dev, "Write to RTC alarm control register failed\n"); - goto rtc_rw_fail; + if (alarm->enabled) { + rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl, + regs->alarm_en, regs->alarm_en); + if (rc) + goto rtc_rw_fail; } dev_dbg(dev, "Alarm Set for h:m:s=%ptRt, y-m-d=%ptRdr\n", diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index fb04470d7d4bbb2b77b3655ee880839e7de45d98..6e7c230d308f730e680dfa93bf1eaf1b7bf45a4f 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -57,6 +57,7 @@ static unsigned int target_mwait; static struct dentry *debug_dir; +static bool poll_pkg_cstate_enable; /* user selected target */ static unsigned int set_target_ratio; @@ -262,6 +263,9 @@ static unsigned int get_compensation(int ratio) { unsigned int comp = 0; + if (!poll_pkg_cstate_enable) + return 0; + /* we only use compensation if all adjacent ones are good */ if (ratio == 1 && cal_data[ratio].confidence >= CONFIDENCE_OK && @@ -534,7 +538,8 @@ static int start_power_clamp(void) control_cpu = cpumask_first(cpu_online_mask); clamping = true; - schedule_delayed_work(&poll_pkg_cstate_work, 0); + if (poll_pkg_cstate_enable) + schedule_delayed_work(&poll_pkg_cstate_work, 0); /* start one kthread worker per online cpu */ for_each_online_cpu(cpu) { @@ -603,11 +608,15 @@ static int powerclamp_get_max_state(struct thermal_cooling_device *cdev, static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { - if (true == clamping) - *state = pkg_cstate_ratio_cur; - else + if (clamping) { + if (poll_pkg_cstate_enable) + *state = pkg_cstate_ratio_cur; + else + *state = set_target_ratio; + } else { /* to save power, do not poll idle ratio while not clamping */ *state = -1; /* indicates invalid state */ + } return 0; } @@ -732,6 +741,9 @@ static int __init powerclamp_init(void) goto exit_unregister; } + if (topology_max_packages() == 1 && topology_max_die_per_package() == 1) + poll_pkg_cstate_enable = true; + cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL, &powerclamp_cooling_ops); if (IS_ERR(cooling_dev)) { diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 844db4652dd1d9247219afe60126648a415d5fbd..8fdd34ff20ef541c1f60b7116e21778024b25c55 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -859,12 +859,13 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon, bool no_cached_open = tcon->nohandlecache; struct cached_fid *cfid = NULL; - oparms.tcon = tcon; - oparms.desired_access = FILE_READ_ATTRIBUTES; - oparms.disposition = FILE_OPEN; - oparms.create_options = cifs_create_options(cifs_sb, 0); - oparms.fid = &fid; - oparms.reconnect = false; + oparms = (struct cifs_open_parms) { + .tcon = tcon, + .desired_access = FILE_READ_ATTRIBUTES, + .disposition = FILE_OPEN, + .create_options = cifs_create_options(cifs_sb, 0), + .fid = &fid, + }; if (no_cached_open) { rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 0d736bf971465373b117d5ec5d9b8ea31009beeb..6a61aef49c2a1110f172601b2f66c3f3c6fc235b 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -106,7 +106,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent clu.dir = ei->hint_bmap.clu; } - while (clu_offset > 0) { + while (clu_offset > 0 && clu.dir != EXFAT_EOF_CLUSTER) { if (exfat_get_next_cluster(sb, &(clu.dir))) return -EIO; @@ -240,10 +240,7 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx) fake_offset = 1; } - if (cpos & (DENTRY_SIZE - 1)) { - err = -ENOENT; - goto unlock; - } + cpos = round_up(cpos, DENTRY_SIZE); /* name buffer should be allocated before use */ err = exfat_alloc_namebuf(nb); diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 0d139c7d150d9b6741befcd0d407e7dba340f8e5..07b09af57436fba33ca5033a37ff0ca2b7a73439 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -42,7 +42,7 @@ enum { #define ES_2_ENTRIES 2 #define ES_ALL_ENTRIES 0 -#define DIR_DELETED 0xFFFF0321 +#define DIR_DELETED 0xFFFFFFF7 /* type values */ #define TYPE_UNUSED 0x0000 diff --git a/fs/exfat/file.c b/fs/exfat/file.c index c819e8427ea577f1734c783986fbddc19445b721..819f47278305e9463023df56d58a5381fb2a7739 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -250,8 +250,7 @@ void exfat_truncate(struct inode *inode, loff_t size) else mark_inode_dirty(inode); - inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> - inode->i_blkbits; + inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9; write_size: aligned_size = i_size_read(inode); if (aligned_size & (blocksize - 1)) { diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index 2a9f6a80584ee3d63d9325b3f540d5457fa9a217..4bd73820a4ac0620936cd2eb9f9ff21184474363 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -242,8 +242,7 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset, return err; } /* end of if != DIR_DELETED */ - inode->i_blocks += - num_to_be_allocated << sbi->sect_per_clus_bits; + inode->i_blocks += EXFAT_CLU_TO_B(num_to_be_allocated, sbi) >> 9; /* * Move *clu pointer along FAT chains (hole care) because the @@ -600,8 +599,7 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info) exfat_save_attr(inode, info->attr); - inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> - inode->i_blkbits; + inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9; inode->i_mtime = info->mtime; inode->i_ctime = info->mtime; ei->i_crtime = info->crtime; diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 935f6005090091342c109456c50c0e5a27b8bc5b..1382d816912c8c00b25970c11208a62ee61259c2 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -398,7 +398,7 @@ static int exfat_find_empty_entry(struct inode *inode, ei->i_size_ondisk += sbi->cluster_size; ei->i_size_aligned += sbi->cluster_size; ei->flags = p_dir->flags; - inode->i_blocks += 1 << sbi->sect_per_clus_bits; + inode->i_blocks += sbi->cluster_size >> 9; } return dentry; diff --git a/fs/exfat/super.c b/fs/exfat/super.c index ba70ed1c980490cf593232722fe1e579459e0e7d..62d79af257a909c94cc4bde7936b68126ada5088 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -364,8 +364,7 @@ static int exfat_read_root(struct inode *inode) inode->i_op = &exfat_dir_inode_operations; inode->i_fop = &exfat_dir_operations; - inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> - inode->i_blkbits; + inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9; ei->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff; ei->i_size_aligned = i_size_read(inode); ei->i_size_ondisk = i_size_read(inode); diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index d9826897135cb244d39075bbdefa708b70d89ba6..24caf5c693b4393c47b084774e6dbc8d2060ce74 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1418,6 +1418,13 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle, uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) }; int err; + if (inode->i_sb->s_root == NULL) { + ext4_warning(inode->i_sb, + "refuse to create EA inode when umounting"); + WARN_ON(1); + return ERR_PTR(-EINVAL); + } + /* * Let the next inode be the goal, so we try and allocate the EA inode * in the same group, or nearby one. diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index c31bc50d79b95d8b82aa9c3c301c6fa3c2c9769b..4e0374c9ca8d7d2e987c80f8f8a2066819983a77 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -721,7 +721,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) } if (fio->io_wbc && !is_read_io(fio->op)) - wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE); + wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE); __attach_io_flag(fio); bio_set_op_attrs(bio, fio->op, fio->op_flags); @@ -929,7 +929,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) } if (fio->io_wbc) - wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE); + wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE); inc_page_count(fio->sbi, WB_DATA_TYPE(page)); @@ -1003,7 +1003,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio) } if (fio->io_wbc) - wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE); + wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE); io->last_block_in_bio = fio->new_blkaddr; f2fs_trace_ios(fio, 0); diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index f97c23ec93ce5b539627458fae648f65c07c2c08..4e794c1390cc14df0cb5cb9d9bc1962d7e4ece14 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -420,18 +420,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, dentry_blk = page_address(page); + /* + * Start by zeroing the full block, to ensure that all unused space is + * zeroed and no uninitialized memory is leaked to disk. + */ + memset(dentry_blk, 0, F2FS_BLKSIZE); + make_dentry_ptr_inline(dir, &src, inline_dentry); make_dentry_ptr_block(dir, &dst, dentry_blk); /* copy data from inline dentry block to new dentry block */ memcpy(dst.bitmap, src.bitmap, src.nr_bitmap); - memset(dst.bitmap + src.nr_bitmap, 0, dst.nr_bitmap - src.nr_bitmap); - /* - * we do not need to zero out remainder part of dentry and filename - * field, since we have used bitmap for marking the usage status of - * them, besides, we can also ignore copying/zeroing reserved space - * of dentry block, because them haven't been used so far. - */ memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max); memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN); diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 9820aad4b19a85d8202efa0ec87f0d0279733831..e01b6a2d12d30642f0fecf6b4363a16cdbce10f1 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -145,8 +145,10 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) return -EIO; error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); - if (error || gfs2_withdrawn(sdp)) + if (error) { + gfs2_consist(sdp); return error; + } if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { gfs2_consist(sdp); @@ -158,7 +160,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) gfs2_log_pointers_init(sdp, head.lh_blkno); error = gfs2_quota_init(sdp); - if (!error && !gfs2_withdrawn(sdp)) + if (!error && gfs2_withdrawn(sdp)) + error = -EIO; + if (!error) set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); return error; } diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index c0a73a6ffb28bd9c6201e3469f5e8314924ff336..397e02a566970fdf14a80f2bd377f1fe159c4f90 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -281,6 +281,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) tree->node_hash[hash] = node; tree->node_hash_cnt++; } else { + hfs_bnode_get(node2); spin_unlock(&tree->hash_lock); kfree(node); wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags)); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 9f776bdba2696bacc96b0942544fd7950c7a89f2..6d9cce22106c94cfe99b1aa6b39dafdb031c06ec 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -984,36 +984,28 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, * ie. locked but not dirty) or tune2fs (which may actually have * the buffer dirtied, ugh.) */ - if (buffer_dirty(bh)) { + if (buffer_dirty(bh) && jh->b_transaction) { + warn_dirty_buffer(bh); /* - * First question: is this buffer already part of the current - * transaction or the existing committing transaction? - */ - if (jh->b_transaction) { - J_ASSERT_JH(jh, - jh->b_transaction == transaction || - jh->b_transaction == - journal->j_committing_transaction); - if (jh->b_next_transaction) - J_ASSERT_JH(jh, jh->b_next_transaction == - transaction); - warn_dirty_buffer(bh); - } - /* - * In any case we need to clean the dirty flag and we must - * do it under the buffer lock to be sure we don't race - * with running write-out. + * We need to clean the dirty flag and we must do it under the + * buffer lock to be sure we don't race with running write-out. */ JBUFFER_TRACE(jh, "Journalling dirty buffer"); clear_buffer_dirty(bh); + /* + * The buffer is going to be added to BJ_Reserved list now and + * nothing guarantees jbd2_journal_dirty_metadata() will be + * ever called for it. So we need to set jbddirty bit here to + * make sure the buffer is dirtied and written out when the + * journaling machinery is done with it. + */ set_buffer_jbddirty(bh); } - unlock_buffer(bh); - error = -EROFS; if (is_handle_aborted(handle)) { spin_unlock(&jh->b_state_lock); + unlock_buffer(bh); goto out; } error = 0; @@ -1023,8 +1015,10 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, * b_next_transaction points to it */ if (jh->b_transaction == transaction || - jh->b_next_transaction == transaction) + jh->b_next_transaction == transaction) { + unlock_buffer(bh); goto done; + } /* * this is the first time this transaction is touching this buffer, @@ -1048,10 +1042,24 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, */ smp_wmb(); spin_lock(&journal->j_list_lock); + if (test_clear_buffer_dirty(bh)) { + /* + * Execute buffer dirty clearing and jh->b_transaction + * assignment under journal->j_list_lock locked to + * prevent bh being removed from checkpoint list if + * the buffer is in an intermediate state (not dirty + * and jh->b_transaction is NULL). + */ + JBUFFER_TRACE(jh, "Journalling dirty buffer"); + set_buffer_jbddirty(bh); + } __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); spin_unlock(&journal->j_list_lock); + unlock_buffer(bh); goto done; } + unlock_buffer(bh); + /* * If there is already a copy-out version of this buffer, then we don't * need to make another one diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index 758d9661ef1e4a01bcdd104899b89fdaa5f8939b..98e77ea957ff3945836aff55fca02e36615ab50e 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -107,14 +107,6 @@ static int __ocfs2_move_extent(handle_t *handle, */ replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED; - ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), - context->et.et_root_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - goto out; - } - ret = ocfs2_split_extent(handle, &context->et, path, index, &replace_rec, context->meta_ac, &context->dealloc); @@ -123,8 +115,6 @@ static int __ocfs2_move_extent(handle_t *handle, goto out; } - ocfs2_journal_dirty(handle, context->et.et_root_bh); - context->new_phys_cpos = new_p_cpos; /* @@ -446,7 +436,7 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode, bg = (struct ocfs2_group_desc *)gd_bh->b_data; if (vict_blkno < (le64_to_cpu(bg->bg_blkno) + - le16_to_cpu(bg->bg_bits))) { + (le16_to_cpu(bg->bg_bits) << bits_per_unit))) { *ret_bh = gd_bh; *vict_bit = (vict_blkno - blkno) >> @@ -561,6 +551,7 @@ static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh, last_free_bits++; if (last_free_bits == move_len) { + i -= move_len; *goal_bit = i; *phys_cpos = base_cpos + i; break; @@ -1032,18 +1023,19 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp) context->range = ⦥ + /* + * ok, the default theshold for the defragmentation + * is 1M, since our maximum clustersize was 1M also. + * any thought? + */ + if (!range.me_threshold) + range.me_threshold = 1024 * 1024; + + if (range.me_threshold > i_size_read(inode)) + range.me_threshold = i_size_read(inode); + if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) { context->auto_defrag = 1; - /* - * ok, the default theshold for the defragmentation - * is 1M, since our maximum clustersize was 1M also. - * any thought? - */ - if (!range.me_threshold) - range.me_threshold = 1024 * 1024; - - if (range.me_threshold > i_size_read(inode)) - range.me_threshold = i_size_read(inode); if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG) context->partial = 1; diff --git a/fs/udf/file.c b/fs/udf/file.c index ad8eefad27d7f031d937aef7fdd5bb09083c2712..e283a62701b838065cca5a4d3318f8038dee3885 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -147,26 +147,24 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from) goto out; down_write(&iinfo->i_data_sem); - if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { - loff_t end = iocb->ki_pos + iov_iter_count(from); - - if (inode->i_sb->s_blocksize < - (udf_file_entry_alloc_offset(inode) + end)) { - err = udf_expand_file_adinicb(inode); - if (err) { - inode_unlock(inode); - udf_debug("udf_expand_adinicb: err=%d\n", err); - return err; - } - } else { - iinfo->i_lenAlloc = max(end, inode->i_size); - up_write(&iinfo->i_data_sem); + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && + inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) + + iocb->ki_pos + iov_iter_count(from))) { + err = udf_expand_file_adinicb(inode); + if (err) { + inode_unlock(inode); + udf_debug("udf_expand_adinicb: err=%d\n", err); + return err; } } else up_write(&iinfo->i_data_sem); retval = __generic_file_write_iter(iocb, from); out: + down_write(&iinfo->i_data_sem); + if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && retval > 0) + iinfo->i_lenAlloc = inode->i_size; + up_write(&iinfo->i_data_sem); inode_unlock(inode); if (retval > 0) { diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 2132bfab67f35d55b9cb26158231443b601c6442..81876284a83c0e7d2f05fd727d84ada1adc5eb50 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -525,8 +525,10 @@ static int udf_do_extend_file(struct inode *inode, } if (fake) { - udf_add_aext(inode, last_pos, &last_ext->extLocation, - last_ext->extLength, 1); + err = udf_add_aext(inode, last_pos, &last_ext->extLocation, + last_ext->extLength, 1); + if (err < 0) + goto out_err; count++; } else { struct kernel_lb_addr tmploc; @@ -560,7 +562,7 @@ static int udf_do_extend_file(struct inode *inode, err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) - return err; + goto out_err; count++; } if (new_block_bytes) { @@ -569,7 +571,7 @@ static int udf_do_extend_file(struct inode *inode, err = udf_add_aext(inode, last_pos, &last_ext->extLocation, last_ext->extLength, 1); if (err) - return err; + goto out_err; count++; } @@ -583,6 +585,11 @@ static int udf_do_extend_file(struct inode *inode, return -EIO; return count; +out_err: + /* Remove extents we've created so far */ + udf_clear_extent_cache(inode); + udf_truncate_extents(inode); + return err; } /* Extend the final block of the file to final_block_len bytes */ @@ -797,19 +804,17 @@ static sector_t inode_getblk(struct inode *inode, sector_t block, c = 0; offset = 0; count += ret; - /* We are not covered by a preallocated extent? */ - if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != - EXT_NOT_RECORDED_ALLOCATED) { - /* Is there any real extent? - otherwise we overwrite - * the fake one... */ - if (count) - c = !c; - laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | - inode->i_sb->s_blocksize; - memset(&laarr[c].extLocation, 0x00, - sizeof(struct kernel_lb_addr)); - count++; - } + /* + * Is there any real extent? - otherwise we overwrite the fake + * one... + */ + if (count) + c = !c; + laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | + inode->i_sb->s_blocksize; + memset(&laarr[c].extLocation, 0x00, + sizeof(struct kernel_lb_addr)); + count++; endnum = c + 1; lastblock = 1; } else { @@ -1086,23 +1091,8 @@ static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr, blocksize - 1) >> blocksize_bits)))) { if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + - (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + - blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { - lip1->extLength = (lip1->extLength - - (li->extLength & - UDF_EXTENT_LENGTH_MASK) + - UDF_EXTENT_LENGTH_MASK) & - ~(blocksize - 1); - li->extLength = (li->extLength & - UDF_EXTENT_FLAG_MASK) + - (UDF_EXTENT_LENGTH_MASK + 1) - - blocksize; - lip1->extLocation.logicalBlockNum = - li->extLocation.logicalBlockNum + - ((li->extLength & - UDF_EXTENT_LENGTH_MASK) >> - blocksize_bits); - } else { + (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + + blocksize - 1) <= UDF_EXTENT_LENGTH_MASK) { li->extLength = lip1->extLength + (((li->extLength & UDF_EXTENT_LENGTH_MASK) + @@ -1393,6 +1383,7 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode) ret = -EIO; goto out; } + iinfo->i_hidden = hidden_inode; iinfo->i_unique = 0; iinfo->i_lenEAttr = 0; iinfo->i_lenExtents = 0; @@ -1728,8 +1719,12 @@ static int udf_update_inode(struct inode *inode, int do_sync) if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0) fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); - else - fe->fileLinkCount = cpu_to_le16(inode->i_nlink); + else { + if (iinfo->i_hidden) + fe->fileLinkCount = cpu_to_le16(0); + else + fe->fileLinkCount = cpu_to_le16(inode->i_nlink); + } fe->informationLength = cpu_to_le64(inode->i_size); @@ -1900,8 +1895,13 @@ struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino, if (!inode) return ERR_PTR(-ENOMEM); - if (!(inode->i_state & I_NEW)) + if (!(inode->i_state & I_NEW)) { + if (UDF_I(inode)->i_hidden != hidden_inode) { + iput(inode); + return ERR_PTR(-EFSCORRUPTED); + } return inode; + } memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr)); err = udf_read_inode(inode, hidden_inode); diff --git a/fs/udf/super.c b/fs/udf/super.c index 3448098e547681001c6a24ea76031b3ecc464583..4af9ce34ee8047ddbff8285b0cfbfc581756d4c6 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -147,6 +147,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb) ei->i_next_alloc_goal = 0; ei->i_strat4096 = 0; ei->i_streamdir = 0; + ei->i_hidden = 0; init_rwsem(&ei->i_data_sem); ei->cached_extent.lstart = -1; spin_lock_init(&ei->i_extent_cache_lock); diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h index 06ff7006b822738ef73845b28fe68ae34f9890bf..312b7c9ef10e230df91bd03192b848181b6d8c5e 100644 --- a/fs/udf/udf_i.h +++ b/fs/udf/udf_i.h @@ -44,7 +44,8 @@ struct udf_inode_info { unsigned i_use : 1; /* unallocSpaceEntry */ unsigned i_strat4096 : 1; unsigned i_streamdir : 1; - unsigned reserved : 25; + unsigned i_hidden : 1; /* hidden system inode */ + unsigned reserved : 24; __u8 *i_data; struct kernel_lb_addr i_locStreamdir; __u64 i_lenStreams; diff --git a/include/linux/ima.h b/include/linux/ima.h index 8fa7bcfb2da2c0e3d98ca6e03abed1f26193bcf4..cd8483fa703eaf844412a2add188b01ed967f3ca 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -18,7 +18,8 @@ extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_file_check(struct file *file, int mask); extern void ima_post_create_tmpfile(struct inode *inode); extern void ima_file_free(struct file *file); -extern int ima_file_mmap(struct file *file, unsigned long prot); +extern int ima_file_mmap(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags); extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot); extern int ima_load_data(enum kernel_load_data_id id, bool contents); extern int ima_post_load_data(char *buf, loff_t size, @@ -70,7 +71,8 @@ static inline void ima_file_free(struct file *file) return; } -static inline int ima_file_mmap(struct file *file, unsigned long prot) +static inline int ima_file_mmap(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) { return 0; } diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 4dbebd319b6f5d3117449ba5e3087b19b7cc22a1..18b7c40ffb37adce316d7cdb0c248712933dcbbd 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -342,6 +342,8 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, size_t *length, loff_t *ppos); #endif extern void wait_for_kprobe_optimizer(void); +bool optprobe_queued_unopt(struct optimized_kprobe *op); +bool kprobe_disarmed(struct kprobe *p); #else static inline void wait_for_kprobe_optimizer(void) { } #endif /* CONFIG_OPTPROBES */ diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index c1e4940091321a954585255fac248b06ff1ba057..1bca428d2b45be96fd9011cba957ec0cbcac78de 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -486,6 +486,7 @@ struct io_poll_iocb { struct file *file; struct wait_queue_head *head; __poll_t events; + int retries; struct wait_queue_entry wait; }; @@ -2460,6 +2461,15 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) static inline bool io_run_task_work(void) { + /* + * PF_IO_WORKER never returns to userspace, so check here if we have + * notify work that needs processing. + */ + if (current->flags & PF_IO_WORKER && + test_thread_flag(TIF_NOTIFY_RESUME)) { + __set_current_state(TASK_RUNNING); + tracehook_notify_resume(NULL); + } if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) { __set_current_state(TASK_RUNNING); tracehook_notify_signal(); @@ -4986,7 +4996,7 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); sr->bgid = READ_ONCE(sqe->buf_group); - sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; + sr->msg_flags = READ_ONCE(sqe->msg_flags); if (sr->msg_flags & MSG_DONTWAIT) req->flags |= REQ_F_NOWAIT; @@ -5740,6 +5750,14 @@ enum { IO_APOLL_READY }; +/* + * We can't reliably detect loops in repeated poll triggers and issue + * subsequently failing. But rather than fail these immediately, allow a + * certain amount of retries before we give up. Given that this condition + * should _rarely_ trigger even once, we should be fine with a larger value. + */ +#define APOLL_MAX_RETRY 128 + static int io_arm_poll_handler(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; @@ -5751,8 +5769,6 @@ static int io_arm_poll_handler(struct io_kiocb *req) if (!req->file || !file_can_poll(req->file)) return IO_APOLL_ABORTED; - if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED) - return IO_APOLL_ABORTED; if (!def->pollin && !def->pollout) return IO_APOLL_ABORTED; @@ -5770,8 +5786,13 @@ static int io_arm_poll_handler(struct io_kiocb *req) if (req->flags & REQ_F_POLLED) { apoll = req->apoll; kfree(apoll->double_poll); + if (unlikely(!--apoll->poll.retries)) { + apoll->double_poll = NULL; + return IO_APOLL_ABORTED; + } } else { apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); + apoll->poll.retries = APOLL_MAX_RETRY; } if (unlikely(!apoll)) return IO_APOLL_ABORTED; @@ -9050,14 +9071,17 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, pages, vmas); if (pret == nr_pages) { + struct file *file = vmas[0]->vm_file; + /* don't support file backed memory */ for (i = 0; i < nr_pages; i++) { - struct vm_area_struct *vma = vmas[i]; - - if (vma_is_shmem(vma)) + if (vmas[i]->vm_file != file) { + ret = -EINVAL; + break; + } + if (!file) continue; - if (vma->vm_file && - !is_file_hugepages(vma->vm_file)) { + if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) { ret = -EOPNOTSUPP; break; } @@ -9683,6 +9707,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, while (!list_empty_careful(&ctx->iopoll_list)) { io_iopoll_try_reap_events(ctx); ret = true; + cond_resched(); } } diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index c6b419db68efc5595fb8c02abe3a67153ad7ec8b..1720998933f8dfcb86df570b103dc897781e7e9f 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -495,6 +495,9 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) return; hwirq = irq_data->hwirq; + + mutex_lock(&irq_domain_mutex); + irq_set_status_flags(irq, IRQ_NOREQUEST); /* remove chip and handler */ @@ -514,10 +517,12 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) /* Clear reverse map for this hwirq */ irq_domain_clear_mapping(domain, hwirq); + + mutex_unlock(&irq_domain_mutex); } -int irq_domain_associate(struct irq_domain *domain, unsigned int virq, - irq_hw_number_t hwirq) +static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq) { struct irq_data *irq_data = irq_get_irq_data(virq); int ret; @@ -530,7 +535,6 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq, if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) return -EINVAL; - mutex_lock(&irq_domain_mutex); irq_data->hwirq = hwirq; irq_data->domain = domain; if (domain->ops->map) { @@ -547,7 +551,6 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq, } irq_data->domain = NULL; irq_data->hwirq = 0; - mutex_unlock(&irq_domain_mutex); return ret; } @@ -558,12 +561,23 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq, domain->mapcount++; irq_domain_set_mapping(domain, hwirq, irq_data); - mutex_unlock(&irq_domain_mutex); irq_clear_status_flags(virq, IRQ_NOREQUEST); return 0; } + +int irq_domain_associate(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq) +{ + int ret; + + mutex_lock(&irq_domain_mutex); + ret = irq_domain_associate_locked(domain, virq, hwirq); + mutex_unlock(&irq_domain_mutex); + + return ret; +} EXPORT_SYMBOL_GPL(irq_domain_associate); void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, @@ -823,13 +837,8 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) } irq_data = irq_get_irq_data(virq); - if (!irq_data) { - if (irq_domain_is_hierarchy(domain)) - irq_domain_free_irqs(virq, 1); - else - irq_dispose_mapping(virq); + if (WARN_ON(!irq_data)) return 0; - } /* Store trigger type */ irqd_set_trigger_type(irq_data, type); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 75150e75551809c24d8734e1e606c82cf0b9ab32..86d71c49b495711a09cc1db894ba17711fe4000e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -447,8 +447,8 @@ static inline int kprobe_optready(struct kprobe *p) return 0; } -/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ -static inline int kprobe_disarmed(struct kprobe *p) +/* Return true if the kprobe is disarmed. Note: p must be on hash list */ +bool kprobe_disarmed(struct kprobe *p) { struct optimized_kprobe *op; @@ -652,7 +652,7 @@ void wait_for_kprobe_optimizer(void) mutex_unlock(&kprobe_mutex); } -static bool optprobe_queued_unopt(struct optimized_kprobe *op) +bool optprobe_queued_unopt(struct optimized_kprobe *op) { struct optimized_kprobe *_op; diff --git a/kernel/resource.c b/kernel/resource.c index 817545ff80b9b245ed1ab8cd9cc7d0d1cd3ae3d7..100253d4909c987da7a05846864775bf7108b8e6 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -1293,20 +1293,6 @@ void release_mem_region_adjustable(resource_size_t start, resource_size_t size) continue; } - /* - * All memory regions added from memory-hotplug path have the - * flag IORESOURCE_SYSTEM_RAM. If the resource does not have - * this flag, we know that we are dealing with a resource coming - * from HMM/devm. HMM/devm use another mechanism to add/release - * a resource. This goes via devm_request_mem_region and - * devm_release_mem_region. - * HMM/devm take care to release their resources when they want, - * so if we are dealing with them, let us just back off here. - */ - if (!(res->flags & IORESOURCE_SYSRAM)) { - break; - } - if (!(res->flags & IORESOURCE_MEM)) break; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cb7b0aead7096d6c7440ad06705d5c571dcb1e71..9b15760e0541a5c082ccd0724cd8157b3d55ce73 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2803,6 +2803,9 @@ void deferred_split_huge_page(struct page *page) if (PageSwapCache(page)) return; + if (!list_empty(page_deferred_list(page))) + return; + spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (list_empty(page_deferred_list(page))) { count_vm_event(THP_DEFERRED_SPLIT_PAGE); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f4a6aeb0a9a29d619dd60db6b087d77e45975acd..6a7b65ec206943be1735b2788505adfa8539251a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3959,6 +3959,10 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, { struct mem_cgroup *memcg = mem_cgroup_from_css(css); + pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. " + "Please report your usecase to linux-mm@kvack.org if you " + "depend on this functionality.\n"); + if (val & ~MOVE_MASK) return -EINVAL; diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 600b97677085f95c866882d9db0698302bc14a0f..dd4b28b11ebe3c9be4ab26b0f17001734ae766c8 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -378,7 +378,9 @@ static int process_measurement(struct file *file, const struct cred *cred, /** * ima_file_mmap - based on policy, collect/store measurement. * @file: pointer to the file to be measured (May be NULL) - * @prot: contains the protection that will be applied by the kernel. + * @reqprot: protection requested by the application + * @prot: protection that will be applied by the kernel + * @flags: operational flags * * Measure files being mmapped executable based on the ima_must_measure() * policy decision. @@ -386,7 +388,8 @@ static int process_measurement(struct file *file, const struct cred *cred, * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ -int ima_file_mmap(struct file *file, unsigned long prot) +int ima_file_mmap(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) { u32 secid; diff --git a/security/security.c b/security/security.c index 6c5f9a7c6b592e936d024fd62b3d2b67cca67252..60e3bee786e52e1aa35ef8dfea8d4ddc12ffd971 100644 --- a/security/security.c +++ b/security/security.c @@ -1534,12 +1534,13 @@ static inline unsigned long mmap_prot(struct file *file, unsigned long prot) int security_mmap_file(struct file *file, unsigned long prot, unsigned long flags) { + unsigned long prot_adj = mmap_prot(file, prot); int ret; - ret = call_int_hook(mmap_file, 0, file, prot, - mmap_prot(file, prot), flags); + + ret = call_int_hook(mmap_file, 0, file, prot, prot_adj, flags); if (ret) return ret; - return ima_file_mmap(file, prot); + return ima_file_mmap(file, prot, prot_adj, flags); } int security_mmap_addr(unsigned long addr) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index fffa681313b6671779f84b067f7e1be787c07cd4..f2ef75c8de42743df20b19af1c72aa7a1dc3a2c9 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -11153,6 +11153,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), + SND_PCI_QUIRK(0x103c, 0x870c, "HP", ALC897_FIXUP_HP_HSMIC_VERB), SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB), SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2), diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c index 9a30f6d35d1358e31282002bc07d2d276b3c65e1..40a0e0095030179c7ccb642672d34990f4ab9b02 100644 --- a/sound/pci/ice1712/aureon.c +++ b/sound/pci/ice1712/aureon.c @@ -1892,6 +1892,7 @@ static int aureon_add_controls(struct snd_ice1712 *ice) unsigned char id; snd_ice1712_save_gpio_status(ice); id = aureon_cs8415_get(ice, CS8415_ID); + snd_ice1712_restore_gpio_status(ice); if (id != 0x41) dev_info(ice->card->dev, "No CS8415 chip. Skipping CS8415 controls.\n"); @@ -1909,7 +1910,6 @@ static int aureon_add_controls(struct snd_ice1712 *ice) kctl->id.device = ice->pcm->device; } } - snd_ice1712_restore_gpio_status(ice); } return 0; diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c index e037826b24517c62facec338533783530b9e4434..2d41e6ab2ce4e0edf4e0b9be0aba00bbabb7bd26 100644 --- a/sound/soc/kirkwood/kirkwood-dma.c +++ b/sound/soc/kirkwood/kirkwood-dma.c @@ -86,7 +86,7 @@ kirkwood_dma_conf_mbus_windows(void __iomem *base, int win, /* try to find matching cs for current dma address */ for (i = 0; i < dram->num_cs; i++) { - const struct mbus_dram_window *cs = dram->cs + i; + const struct mbus_dram_window *cs = &dram->cs[i]; if ((cs->base & 0xffff0000) < (dma & 0xffff0000)) { writel(cs->base & 0xffff0000, base + KIRKWOOD_AUDIO_WIN_BASE_REG(win)); diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 8b1e3ae8fe50d919cea3fc396789594c6b65df3c..ea26f2b0c1bc2316d37b10501a431561f3593670 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -178,6 +178,7 @@ my $store_failures; my $store_successes; my $test_name; my $timeout; +my $run_timeout; my $connect_timeout; my $config_bisect_exec; my $booted_timeout; @@ -340,6 +341,7 @@ my %option_map = ( "STORE_SUCCESSES" => \$store_successes, "TEST_NAME" => \$test_name, "TIMEOUT" => \$timeout, + "RUN_TIMEOUT" => \$run_timeout, "CONNECT_TIMEOUT" => \$connect_timeout, "CONFIG_BISECT_EXEC" => \$config_bisect_exec, "BOOTED_TIMEOUT" => \$booted_timeout, @@ -1433,7 +1435,8 @@ sub reboot { # Still need to wait for the reboot to finish wait_for_monitor($time, $reboot_success_line); - + } + if ($powercycle || $time) { end_monitor; } } @@ -1799,6 +1802,14 @@ sub run_command { $command =~ s/\$SSH_USER/$ssh_user/g; $command =~ s/\$MACHINE/$machine/g; + if (!defined($timeout)) { + $timeout = $run_timeout; + } + + if (!defined($timeout)) { + $timeout = -1; # tell wait_for_input to wait indefinitely + } + doprint("$command ... "); $start_time = time; @@ -1825,13 +1836,10 @@ sub run_command { while (1) { my $fp = \*CMD; - if (defined($timeout)) { - doprint "timeout = $timeout\n"; - } my $line = wait_for_input($fp, $timeout); if (!defined($line)) { my $now = time; - if (defined($timeout) && (($now - $start_time) >= $timeout)) { + if ($timeout >= 0 && (($now - $start_time) >= $timeout)) { doprint "Hit timeout of $timeout, killing process\n"; $hit_timeout = 1; kill 9, $pid; @@ -2004,6 +2012,11 @@ sub wait_for_input $time = $timeout; } + if ($time < 0) { + # Negative number means wait indefinitely + undef $time; + } + $rin = ''; vec($rin, fileno($fp), 1) = 1; vec($rin, fileno(\*STDIN), 1) = 1; @@ -4283,6 +4296,9 @@ sub send_email { } sub cancel_test { + if ($monitor_cnt) { + end_monitor; + } if ($email_when_canceled) { my $name = get_test_name; send_email("KTEST: Your [$name] test was cancelled", diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf index 5e7d1d7297529be4ca1b684af3bd5b3e87b1fcfc..65957a9803b50abc3dc706bdb10fb55b214842a6 100644 --- a/tools/testing/ktest/sample.conf +++ b/tools/testing/ktest/sample.conf @@ -809,6 +809,11 @@ # is issued instead of a reboot. # CONNECT_TIMEOUT = 25 +# The timeout in seconds for how long to wait for any running command +# to timeout. If not defined, it will let it go indefinitely. +# (default undefined) +#RUN_TIMEOUT = 600 + # In between tests, a reboot of the box may occur, and this # is the time to wait for the console after it stops producing # output. Some machines may not produce a large lag on reboot diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c index d5bebb37238c0bc9908d0d1f82f954310fa696ea..ce6f3b916ef9d8f9367f60b5e3ed5a75414f56d6 100644 --- a/virt/kvm/coalesced_mmio.c +++ b/virt/kvm/coalesced_mmio.c @@ -187,15 +187,17 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, r = kvm_io_bus_unregister_dev(kvm, zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev); + kvm_iodevice_destructor(&dev->dev); + /* * On failure, unregister destroys all devices on the * bus _except_ the target device, i.e. coalesced_zones - * has been modified. No need to restart the walk as - * there aren't any zones left. + * has been modified. Bail after destroying the target + * device, there's no need to restart the walk as there + * aren't any zones left. */ if (r) break; - kvm_iodevice_destructor(&dev->dev); } }